Merge tag 'for-5.9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 3 Aug 2020 16:41:48 +0000 (09:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 3 Aug 2020 16:41:48 +0000 (09:41 -0700)
Pull btrfs updates from David Sterba:
 "We don't have any big feature updates this time, there are lots of
  small enhacements or fixes. A highlight perhaps is the parallel fsync
  performance improvements, numbers below.

  Regarding the dio/iomap that was reverted last time, the required API
  changes are likely to land in the upcoming cycle, the btrfs part will
  be updated afterwards.

  User visible changes:

   - new mount option rescue= to group all recovery-related mount
     options so we don't have many specific options, currently
     introducing only aliases for existing options, future extensions
     are in development to allow read-only mount with partially damaged
     structures:
      - usebackuproot is an alias for rescue=usebackuproot
      - nologreplay is an alias for rescue=nologreplay

   - start deprecation of mount option inode_cache, removal scheduled to
     v5.11

   - removed deprecated mount options alloc_start and subvolrootid

   - device stats corruption counter gets incremented when a checksum
     mismatch is found

   - qgroup information exported in /sys/fs/btrfs/<UUID>/qgroups/<id>
     using sysfs

   - add link /sys/fs/btrfs/<UUID>/bdi pointing to the associated
     backing dev info

   - FS_INFO ioctl enhancements:
      - add flags to request/describe newly added items
      - new item: numeric checksum type and checksum size
      - new item: generation
      - new item: metadata_uuid

   - seed device: with one new read-write device added, print the new
     device information in /proc/mounts

   - balance: detect cancellation by Ctrl-C in existing cancellation
     points

  Performance improvements:

   - optimized versions of various helpers on little-endian
     architectures, where we don't have to do LE/BE conversion from
     on-disk format

   - tree-log/fsync optimizations leading to lower max latency reported
     by dbench, reduced by about 12%

   - all chunk tree leaves are prefetched at mount time, can improve
     mount time on large (terabyte-sized) filesystems

   - speed up parallel fsync of files with reflinked/deduped extents,
     with jobs 16 to 1024 the throughput gets improved roughly by 50% on
     average and runtime decreased roughly by 30% on average, notable
     outlier is 128 jobs with +121.2% on throughput and -54.6% runtime

   - another speed up of parallel fsync, reduce number of checksum tree
     lookups and contention, the improvements start to show up with 2
     tasks with +20% throughput and -16% runtime up to 64 with +200%
     throughput and -66% runtime

  Core:

   - umount-time qgroup leak checker

   - qgroups
      - add a way to unreserve partial range after failure, avoiding
        some EDQUOT errors
      - improved flushing logic when EDQUOT is hit

   - possible EINTR interruption caused by failed reservations after
     transaction start is better handled and documented

   - transaction abort errors are unified to EROFS in case it's not the
     original reason of abort or we don't have other way to determine
     the reason

  Fixes:

   - make truncate succeed on a NOCOW file even if data space is
     exhausted

   - fix cancelling balance on filesystem with exhausted metadata space

   - anon block device:
      - preallocate anon bdev when subvolume is created to report
        failure early
      - shorten time the anon bdev id is allocated
      - don't allocate anon bdev for internal roots

   - minor memory leak in ref-verify

   - refuse invalid combinations of compression and NOCOW file flags

   - lockdep fixes, updating the device locks

   - remove obsolete fallback logic for block group profile adjustments
     when switching from 1 to more devices, causing allocation of
     unwanted block groups

  Other cleanups, refactoring, simplifications:

   - conversions from struct inode to struct btrfs_inode in internal
     functions

   - removal of unused struct members"

* tag 'for-5.9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (151 commits)
  btrfs: do not set the full sync flag on the inode during page release
  btrfs: release old extent maps during page release
  btrfs: fix race between page release and a fast fsync
  btrfs: open-code remount flag setting in btrfs_remount
  btrfs: if we're restriping, use the target restripe profile
  btrfs: don't adjust bg flags and use default allocation profiles
  btrfs: fix lockdep splat from btrfs_dump_space_info
  btrfs: move the chunk_mutex in btrfs_read_chunk_tree
  btrfs: open device without device_list_mutex
  btrfs: sysfs: use NOFS for device creation
  btrfs: return EROFS for BTRFS_FS_STATE_ERROR cases
  btrfs: document special case error codes for fs errors
  btrfs: don't WARN if we abort a transaction with EROFS
  btrfs: reduce contention on log trees when logging checksums
  btrfs: remove done label in writepage_delalloc
  btrfs: add comments for btrfs_reserve_flush_enum
  btrfs: relocation: review the call sites which can be interrupted by signal
  btrfs: avoid possible signal interruption of btrfs_drop_snapshot() on relocation tree
  btrfs: relocation: allow signal to cancel balance
  btrfs: raid56: remove out label in __raid56_parity_recover
  ...

191 files changed:
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-video-engine.yaml
Documentation/networking/bareudp.rst
Documentation/networking/devlink/devlink-trap.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/imx6qdl-icore.dtsi
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/keystone-k2g-evm.dts
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/include/asm/percpu.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/vdso.c
arch/arm/mm/mmu.c
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/checksum.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/smp.h
arch/arm64/kvm/mmu.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/perf/core-book3s.c
arch/sh/include/asm/pgalloc.h
arch/sh/kernel/entry-common.S
arch/x86/kernel/i8259.c
arch/x86/kvm/lapic.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
drivers/atm/atmtcp.c
drivers/char/random.c
drivers/char/tpm/eventlog/acpi.c
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm.h
drivers/char/tpm/tpm2-space.c
drivers/char/tpm/tpmrm-dev.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/bridge/nwl-dsi.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_mipi_dbi.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/panel/panel-simple.c
drivers/i2c/i2c-core-slave.c
drivers/infiniband/core/cq.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/rc.c
drivers/net/bareudp.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/ni/nixge.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/vxlan.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/pci/quirks.c
drivers/pinctrl/qcom/Kconfig
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/pinctrl/qcom/pinctrl-sc7180.c
drivers/vhost/scsi.c
drivers/virtio/Kconfig
drivers/virtio/virtio_balloon.c
fs/io_uring.c
include/acpi/actbl3.h
include/asm-generic/io.h
include/drm/drm_mode_config.h
include/linux/i2c.h
include/linux/list.h
include/linux/mlx5/mlx5_ifc.h
include/linux/random.h
include/linux/rhashtable.h
include/linux/tpm.h
include/linux/tpm_eventlog.h
include/net/addrconf.h
include/net/devlink.h
include/net/xfrm.h
include/rdma/rdmavt_qp.h
kernel/audit.c
kernel/audit.h
kernel/auditsc.c
kernel/bpf/btf.c
kernel/bpf/hashtab.c
kernel/sched/wait.c
kernel/signal.c
kernel/time/timer.c
lib/random32.c
lib/rhashtable.c
mm/filemap.c
net/9p/trans_fd.c
net/bluetooth/hci_event.c
net/bpfilter/bpfilter_kern.c
net/compat.c
net/core/devlink.c
net/ipv4/fib_trie.c
net/ipv6/anycast.c
net/ipv6/esp6.c
net/ipv6/ipv6_sockglue.c
net/ipv6/route.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/mesh.c
net/mac80211/mesh_pathtbl.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/util.c
net/mptcp/protocol.c
net/rds/recv.c
net/rxrpc/call_object.c
net/rxrpc/conn_object.c
net/rxrpc/recvmsg.c
net/rxrpc/sendmsg.c
net/sched/act_ct.c
net/wireless/nl80211.c
net/xfrm/espintcp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/Makefile.modpost
scripts/kconfig/.gitignore
scripts/kconfig/Makefile
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
scripts/mod/modpost.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/pcm.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/plugins/Makefile
tools/perf/arch/arm/util/auxtrace.c
tools/perf/tests/shell/record+zstd_comp_decomp.sh
tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/verifier/event_output.c
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/net/forwarding/ethtool.sh
tools/testing/selftests/net/psock_fanout.c
tools/testing/selftests/net/rxtimestamp.c
tools/testing/selftests/net/so_txtime.c
tools/testing/selftests/net/tcp_mmap.c

index 526593c..4cc1a67 100644 (file)
@@ -47,6 +47,9 @@ properties:
     $ref: /schemas/types.yaml#/definitions/phandle-array
     description: Phandle to the device SRAM
 
+  iommus:
+    maxItems: 1
+
   memory-region:
     description:
       CMA pool to use for buffers allocation instead of the default
index ff40656..b9d04ee 100644 (file)
@@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to
 leverage the UDP based load balancing capability of different networks.
 MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
 
-The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
-support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
-a UDP tunnel.
+The Bareudp tunnel module provides a generic L3 encapsulation support for
+tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
 
 Special Handling
 ----------------
index 1e3f3ff..2014307 100644 (file)
@@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table:
      - Contains packet traps for packets that should be locally delivered after
        routing, but do not match more specific packet traps (e.g.,
        ``ipv4_bgp``)
+   * - ``external_delivery``
+     - Contains packet traps for packets that should be routed through an
+       external interface (e.g., management interface) that does not belong to
+       the same device (e.g., switch ASIC) as the ingress interface
    * - ``ipv6``
      - Contains packet traps for various IPv6 control packets (e.g., Router
        Advertisements)
index f0569cf..4e2698c 100644 (file)
@@ -782,7 +782,7 @@ F:  include/dt-bindings/reset/altr,rst-mgr-a10sr.h
 F:     include/linux/mfd/altera-a10sr.h
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Thor Thayer <thor.thayer@linux.intel.com>
+M:     Joyce Ooi <joyce.ooi@intel.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/altera/
@@ -1425,7 +1425,7 @@ F:        arch/arm*/include/asm/perf_event.h
 F:     arch/arm*/kernel/hw_breakpoint.c
 F:     arch/arm*/kernel/perf_*
 F:     arch/arm/oprofile/common.c
-F:     drivers/perf/*
+F:     drivers/perf/
 F:     include/linux/perf/arm_pmu.h
 
 ARM PORT
@@ -14188,7 +14188,8 @@ F:      Documentation/devicetree/bindings/net/qcom,ethqos.txt
 F:     drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
 
 QUALCOMM GENERIC INTERFACE I2C DRIVER
-M:     Alok Chauhan <alokc@codeaurora.org>
+M:     Akash Asthana <akashast@codeaurora.org>
+M:     Mukesh Savaliya <msavaliy@codeaurora.org>
 L:     linux-i2c@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Supported
index 229e67f..24a4c1b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 3481165..9b1a24c 100644 (file)
 
                        comphy: phy@18300 {
                                compatible = "marvell,armada-380-comphy";
-                               reg = <0x18300 0x100>;
+                               reg-names = "comphy", "conf";
+                               reg = <0x18300 0x100>, <0x18460 4>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 
index 756f3a9..12997da 100644 (file)
 
        pinctrl_usbotg: usbotggrp {
                fsl,pins = <
-                       MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
+                       MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
                >;
        };
 
                        MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
                        MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
                        MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
+                       MX6QDL_PAD_GPIO_1__GPIO1_IO01  0x1b0b0
                >;
        };
 
index 8259244..14fd1de 100644 (file)
@@ -99,7 +99,7 @@
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ethphy0>;
        fsl,magic-packet;
        status = "okay";
index 3e5fb72..c99aa27 100644 (file)
 &fec2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet2>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-handle = <&ethphy2>;
        status = "okay";
 };
index db640ba..8b3d64c 100644 (file)
 
 &gbe0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii-id";
+       phy-mode = "rgmii-rxid";
        status = "okay";
 };
 
index bf531ef..0f95a6e 100644 (file)
                default-pool {
                        compatible = "shared-dma-pool";
                        size = <0x6000000>;
-                       alloc-ranges = <0x4a000000 0x6000000>;
+                       alloc-ranges = <0x40000000 0x10000000>;
                        reusable;
                        linux,cma-default;
                };
index e6b0367..c2b4fbf 100644 (file)
                default-pool {
                        compatible = "shared-dma-pool";
                        size = <0x6000000>;
-                       alloc-ranges = <0x4a000000 0x6000000>;
+                       alloc-ranges = <0x40000000 0x10000000>;
                        reusable;
                        linux,cma-default;
                };
index ffe1d10..6d6a379 100644 (file)
                default-pool {
                        compatible = "shared-dma-pool";
                        size = <0x6000000>;
-                       alloc-ranges = <0x4a000000 0x6000000>;
+                       alloc-ranges = <0x40000000 0x10000000>;
                        reusable;
                        linux,cma-default;
                };
index f44f448..1a3eedb 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef _ASM_ARM_PERCPU_H_
 #define _ASM_ARM_PERCPU_H_
 
+#include <asm/thread_info.h>
+
 /*
  * Same as asm-generic/percpu.h, except that we store the per cpu offset
  * in the TPIDRPRW. TPIDRPRW only exists on V6K and V7
index 02ca7ad..7fff88e 100644 (file)
@@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
        arch_install_hw_breakpoint(bp);
 }
 
+static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
+                                      struct arch_hw_breakpoint *info)
+{
+       return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
+}
+
 static void watchpoint_handler(unsigned long addr, unsigned int fsr,
                               struct pt_regs *regs)
 {
@@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
                }
 
                pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+               /*
+                * If we triggered a user watchpoint from a uaccess routine,
+                * then handle the stepping ourselves since userspace really
+                * can't help us with this.
+                */
+               if (watchpoint_fault_on_uaccess(regs, info))
+                       goto step;
+
                perf_bp_event(wp, regs);
 
                /*
-                * If no overflow handler is present, insert a temporary
-                * mismatch breakpoint so we can single-step over the
-                * watchpoint trigger.
+                * Defer stepping to the overflow handler if one is installed.
+                * Otherwise, insert a temporary mismatch breakpoint so that
+                * we can single-step over the watchpoint trigger.
                 */
-               if (is_default_overflow_handler(wp))
-                       enable_single_step(wp, instruction_pointer(regs));
+               if (!is_default_overflow_handler(wp))
+                       goto unlock;
 
+step:
+               enable_single_step(wp, instruction_pointer(regs));
 unlock:
                rcu_read_unlock();
        }
index 6bfdca4..fddd08a 100644 (file)
@@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
        if (!cntvct_ok) {
                vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
                vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+               vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
        }
 }
 
index 628028b..bcd8261 100644 (file)
@@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
        pud_t *pud;
 
        p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
-       if (!WARN_ON(!p4d))
+       if (WARN_ON(!p4d))
                return;
        pud = pud_alloc(mm, p4d, md->virtual);
        if (WARN_ON(!pud))
index 78b1361..9ce78a7 100644 (file)
                        resets = <&ccu RST_BUS_VE>;
                        interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                        allwinner,sram = <&ve_sram 1>;
+                       iommus = <&iommu 3>;
                };
 
                gpu: gpu@1800000 {
index 12f0eb5..619db9b 100644 (file)
@@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
        "664:\n\t"                                                      \
-       ".previous\n\t"                                                 \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
-       ".org   . - (662b-661b) + (664b-663b)\n"                        \
+       ".org   . - (662b-661b) + (664b-663b)\n\t"                      \
+       ".previous\n"                                                   \
        ".endif\n"
 
 #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb)       \
index b6f7bc6..93a161b 100644 (file)
@@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
        __uint128_t tmp;
        u64 sum;
+       int n = ihl; /* we want it signed */
 
        tmp = *(const __uint128_t *)iph;
        iph += 16;
-       ihl -= 4;
+       n -= 4;
        tmp += ((tmp >> 64) | (tmp << 64));
        sum = tmp >> 64;
        do {
                sum += *(const u32 *)iph;
                iph += 4;
-       } while (--ihl);
+       } while (--n > 0);
 
        sum += ((sum >> 32) | (sum << 32));
        return csum_fold((__force u32)(sum >> 32));
index c3e6fcc..e21d4a0 100644 (file)
@@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
                            ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
 
-#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \
-                                 system_supports_generic_auth()) && \
-                                ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
+#ifdef CONFIG_ARM64_PTR_AUTH
+#define vcpu_has_ptrauth(vcpu)                                         \
+       ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||                \
+         cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&               \
+        (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+#else
+#define vcpu_has_ptrauth(vcpu)         false
+#endif
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
 
index ea268d8..a0c8a0b 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
-#include <asm/pointer_auth.h>
 
 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
 
index 8c0035c..31058e6 100644 (file)
@@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
        return true;
 }
 
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
 {
        pud_t *pudp;
        pmd_t *pmdp;
@@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
                return false;
 
        if (pudp)
-               return kvm_s2pud_exec(pudp);
+               return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
        else if (pmdp)
-               return kvm_s2pmd_exec(pmdp);
+               return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
        else
-               return kvm_s2pte_exec(ptep);
+               return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
 }
 
 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * execute permissions, and we preserve whatever we have.
         */
        needs_exec = exec_fault ||
-               (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+               (fault_status == FSC_PERM &&
+                stage2_is_exec(kvm, fault_ipa, vma_pagesize));
 
        if (vma_pagesize == PUD_SIZE) {
                pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
index 0fc8bad..446e54c 100644 (file)
@@ -3072,10 +3072,18 @@ do_hash_page:
        ori     r0,r0,DSISR_BAD_FAULT_64S@l
        and.    r0,r5,r0                /* weird error? */
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
+
+       /*
+        * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+        * don't call hash_page, just fail the fault. This is required to
+        * prevent re-entrancy problems in the hash code, namely perf
+        * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+        * hash fault. See the comment in hash_preload().
+        */
        ld      r11, PACA_THREAD_INFO(r13)
-       lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
-       andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
-       bne     77f                     /* then don't call hash_page now */
+       lwz     r0,TI_PREEMPT(r11)
+       andis.  r0,r0,NMI_MASK@h
+       bne     77f
 
        /*
         * r3 contains the trap number
index 468169e..9b9f92a 100644 (file)
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
        pgd_t *pgdir;
        int rc, ssize, update_flags = 0;
        unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+       unsigned long flags;
 
        BUG_ON(get_region_id(ea) != USER_REGION_ID);
 
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                return;
 #endif /* CONFIG_PPC_64K_PAGES */
 
+       /*
+        * __hash_page_* must run with interrupts off, as it sets the
+        * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+        * time and may take a hash fault reading the user stack, see
+        * read_user_stack_slow() in the powerpc/perf code.
+        *
+        * If that takes a hash fault on the same page as we lock here, it
+        * will bail out when seeing H_PAGE_BUSY set, and retry the access
+        * leading to an infinite loop.
+        *
+        * Disabling interrupts here does not prevent perf interrupts, but it
+        * will prevent them taking hash faults (see the NMI test in
+        * do_hash_page), then read_user_stack's copy_from_user_nofault will
+        * fail and perf will fall back to read_user_stack_slow(), which
+        * walks the Linux page tables.
+        *
+        * Interrupts must also be off for the duration of the
+        * mm_is_thread_local test and update, to prevent preempt running the
+        * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+        */
+       local_irq_save(flags);
+
        /* Is that local to this CPU ? */
        if (mm_is_thread_local(mm))
                update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                                   mm_ctx_user_psize(&mm->context),
                                   mm_ctx_user_psize(&mm->context),
                                   pte_val(*ptep));
+
+       local_irq_restore(flags);
 }
 
 /*
index cd6a742..01d7028 100644 (file)
@@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
 
        perf_read_regs(regs);
 
+       /*
+        * If perf interrupts hit in a local_irq_disable (soft-masked) region,
+        * we consider them as NMIs. This is required to prevent hash faults on
+        * user addresses when reading callchains. See the NMI test in
+        * do_hash_page.
+        */
        nmi = perf_intr_is_nmi(regs);
        if (nmi)
                nmi_enter();
index 22d968b..d770da3 100644 (file)
@@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+#define __pmd_free_tlb(tlb, pmdp, addr)                pmd_free((tlb)->mm, (pmdp))
 #endif
 
 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -33,13 +34,4 @@ do {                                                 \
        tlb_remove_page((tlb), (pte));                  \
 } while (0)
 
-#if CONFIG_PGTABLE_LEVELS > 2
-#define __pmd_free_tlb(tlb, pmdp, addr)                        \
-do {                                                   \
-       struct page *page = virt_to_page(pmdp);         \
-       pgtable_pmd_page_dtor(page);                    \
-       tlb_remove_page((tlb), page);                   \
-} while (0);
-#endif
-
 #endif /* __ASM_SH_PGALLOC_H */
index 956a7a0..9bac5bb 100644 (file)
@@ -199,7 +199,7 @@ syscall_trace_entry:
        mov.l   @(OFF_R7,r15), r7   ! arg3
        mov.l   @(OFF_R3,r15), r3   ! syscall_nr
        !
-       mov.l   2f, r10                 ! Number of syscalls
+       mov.l   6f, r10                 ! Number of syscalls
        cmp/hs  r10, r3
        bf      syscall_call
        mov     #-ENOSYS, r0
@@ -353,7 +353,7 @@ ENTRY(system_call)
        tst     r9, r8
        bf      syscall_trace_entry
        !
-       mov.l   2f, r8                  ! Number of syscalls
+       mov.l   6f, r8                  ! Number of syscalls
        cmp/hs  r8, r3
        bt      syscall_badsys
        !
@@ -392,7 +392,7 @@ syscall_exit:
 #if !defined(CONFIG_CPU_SH2)
 1:     .long   TRA
 #endif
-2:     .long   NR_syscalls
+6:     .long   NR_syscalls
 3:     .long   sys_call_table
 7:     .long   do_syscall_trace_enter
 8:     .long   do_syscall_trace_leave
index f3c7625..282b4ee 100644 (file)
@@ -207,7 +207,7 @@ spurious_8259A_irq:
                 * lets ACK and report it. [once per IRQ]
                 */
                if (!(spurious_irq_mask & irqmask)) {
-                       printk(KERN_DEBUG
+                       printk_deferred(KERN_DEBUG
                               "spurious 8259A interrupt: IRQ%d.\n", irq);
                        spurious_irq_mask |= irqmask;
                }
index 5bf72fc..4ce2ddd 100644 (file)
@@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
+       if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
                        apic_lvtt_period(apic))
                return;
 
index c0da4dd..5bbf761 100644 (file)
@@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
        svm->nested.vmcb = 0;
        svm->vcpu.arch.hflags = 0;
 
-       if (pause_filter_count) {
+       if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
                control->pause_filter_count = pause_filter_count;
                if (pause_filter_thresh)
                        control->pause_filter_thresh = pause_filter_thresh;
@@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool in_kernel = (svm_get_cpl(vcpu) == 0);
 
-       if (pause_filter_thresh)
+       if (!kvm_pause_in_guest(vcpu->kvm))
                grow_ple_window(vcpu);
 
        kvm_vcpu_on_spin(vcpu, in_kernel);
@@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 
 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
-       if (pause_filter_thresh)
+       if (!kvm_pause_in_guest(vcpu->kvm))
                shrink_ple_window(vcpu);
 }
 
@@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
 
 static int svm_vm_init(struct kvm *kvm)
 {
+       if (!pause_filter_count || !pause_filter_thresh)
+               kvm->arch.pause_in_guest = true;
+
        if (avic) {
                int ret = avic_vm_init(kvm);
                if (ret)
index d4a4cec..11e4df5 100644 (file)
@@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
                return -EINVAL;
 
+       if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
+               return -EINVAL;
+
        /*
         * SMM temporarily disables VMX, so we cannot be in guest mode,
         * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
@@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
        if (ret)
                return ret;
 
-       /* Empty 'VMXON' state is permitted */
-       if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12))
-               return 0;
+       /* Empty 'VMXON' state is permitted if no VMCS loaded */
+       if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
+               /* See vmx_has_valid_vmcs12.  */
+               if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
+                   (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
+                   (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
+                       return -EINVAL;
+               else
+                       return 0;
+       }
 
        if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
                if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
index 758bccc..197148d 100644 (file)
@@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
        return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
 }
 
+/*
+ * Note: the same condition is checked against the state provided by userspace
+ * in vmx_set_nested_state; if it is satisfied, the nested state must include
+ * the VMCS12.
+ */
 static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
index d9fd702..7f814da 100644 (file)
@@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
                return -EMEDIUMTYPE;
        }
        dev_data = PRIV(dev);
-       if (!dev_data->persist) return 0;
+       if (!dev_data->persist) {
+               atm_dev_put(dev);
+               return 0;
+       }
        dev_data->persist = 0;
-       if (PRIV(dev)->vcc) return 0;
+       if (PRIV(dev)->vcc) {
+               atm_dev_put(dev);
+               return 0;
+       }
        kfree(dev_data);
        atm_dev_put(dev);
        atm_dev_deregister(dev);
index 2a41b21..d20ba1b 100644 (file)
@@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
 
        fast_mix(fast_pool);
        add_interrupt_bench(cycles);
+       this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
 
        if (unlikely(crng_init == 0)) {
                if ((fast_pool->count >= 64) &&
index 63ada5e..3633ed7 100644 (file)
@@ -49,9 +49,9 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
        void __iomem *virt;
        u64 len, start;
        struct tpm_bios_log *log;
-
-       if (chip->flags & TPM_CHIP_FLAG_TPM2)
-               return -ENODEV;
+       struct acpi_table_tpm2 *tbl;
+       struct acpi_tpm2_phy *tpm2_phy;
+       int format;
 
        log = &chip->log;
 
@@ -61,23 +61,44 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
        if (!chip->acpi_dev_handle)
                return -ENODEV;
 
-       /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
-       status = acpi_get_table(ACPI_SIG_TCPA, 1,
-                               (struct acpi_table_header **)&buff);
-
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       switch(buff->platform_class) {
-       case BIOS_SERVER:
-               len = buff->server.log_max_len;
-               start = buff->server.log_start_addr;
-               break;
-       case BIOS_CLIENT:
-       default:
-               len = buff->client.log_max_len;
-               start = buff->client.log_start_addr;
-               break;
+       if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+               status = acpi_get_table("TPM2", 1,
+                                       (struct acpi_table_header **)&tbl);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               if (tbl->header.length <
+                               sizeof(*tbl) + sizeof(struct acpi_tpm2_phy))
+                       return -ENODEV;
+
+               tpm2_phy = (void *)tbl + sizeof(*tbl);
+               len = tpm2_phy->log_area_minimum_length;
+
+               start = tpm2_phy->log_area_start_address;
+               if (!start || !len)
+                       return -ENODEV;
+
+               format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+       } else {
+               /* Find TCPA entry in RSDT (ACPI_LOGICAL_ADDRESSING) */
+               status = acpi_get_table(ACPI_SIG_TCPA, 1,
+                                       (struct acpi_table_header **)&buff);
+               if (ACPI_FAILURE(status))
+                       return -ENODEV;
+
+               switch (buff->platform_class) {
+               case BIOS_SERVER:
+                       len = buff->server.log_max_len;
+                       start = buff->server.log_start_addr;
+                       break;
+               case BIOS_CLIENT:
+               default:
+                       len = buff->client.log_max_len;
+                       start = buff->client.log_start_addr;
+                       break;
+               }
+
+               format = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
        }
        if (!len) {
                dev_warn(&chip->dev, "%s: TCPA log area empty\n", __func__);
@@ -98,7 +119,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
        memcpy_fromio(log->bios_event_log, virt, len);
 
        acpi_os_unmap_iomem(virt, len);
-       return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+       return format;
 
 err:
        kfree(log->bios_event_log);
index 8c77e88..ddaeceb 100644 (file)
@@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
        chip->cdev.owner = THIS_MODULE;
        chip->cdevs.owner = THIS_MODULE;
 
-       chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!chip->work_space.context_buf) {
-               rc = -ENOMEM;
-               goto out;
-       }
-       chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!chip->work_space.session_buf) {
+       rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
+       if (rc) {
                rc = -ENOMEM;
                goto out;
        }
index 0fbcede..947d1db 100644 (file)
@@ -59,6 +59,9 @@ enum tpm_addr {
 
 #define TPM_TAG_RQU_COMMAND 193
 
+/* TPM2 specific constants. */
+#define TPM2_SPACE_BUFFER_SIZE         16384 /* 16 kB */
+
 struct stclear_flags_t {
        __be16  tag;
        u8      deactivated;
@@ -228,7 +231,7 @@ unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
 int tpm2_probe(struct tpm_chip *chip);
 int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
 int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
-int tpm2_init_space(struct tpm_space *space);
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
 void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
 void tpm2_flush_space(struct tpm_chip *chip);
 int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
index 982d341..784b8b3 100644 (file)
@@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space)
        }
 }
 
-int tpm2_init_space(struct tpm_space *space)
+int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
 {
-       space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       space->context_buf = kzalloc(buf_size, GFP_KERNEL);
        if (!space->context_buf)
                return -ENOMEM;
 
-       space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       space->session_buf = kzalloc(buf_size, GFP_KERNEL);
        if (space->session_buf == NULL) {
                kfree(space->context_buf);
+               /* Prevent caller getting a dangling pointer. */
+               space->context_buf = NULL;
                return -ENOMEM;
        }
 
+       space->buf_size = buf_size;
        return 0;
 }
 
@@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
               sizeof(space->context_tbl));
        memcpy(&chip->work_space.session_tbl, &space->session_tbl,
               sizeof(space->session_tbl));
-       memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
-       memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
+       memcpy(chip->work_space.context_buf, space->context_buf,
+              space->buf_size);
+       memcpy(chip->work_space.session_buf, space->session_buf,
+              space->buf_size);
 
        rc = tpm2_load_space(chip);
        if (rc) {
@@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_chip *chip)
                        continue;
 
                rc = tpm2_save_context(chip, space->context_tbl[i],
-                                      space->context_buf, PAGE_SIZE,
+                                      space->context_buf, space->buf_size,
                                       &offset);
                if (rc == -ENOENT) {
                        space->context_tbl[i] = 0;
@@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_chip *chip)
                        continue;
 
                rc = tpm2_save_context(chip, space->session_tbl[i],
-                                      space->session_buf, PAGE_SIZE,
+                                      space->session_buf, space->buf_size,
                                       &offset);
-
                if (rc == -ENOENT) {
                        /* handle error saving session, just forget it */
                        space->session_tbl[i] = 0;
@@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
               sizeof(space->context_tbl));
        memcpy(&space->session_tbl, &chip->work_space.session_tbl,
               sizeof(space->session_tbl));
-       memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
-       memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
+       memcpy(space->context_buf, chip->work_space.context_buf,
+              space->buf_size);
+       memcpy(space->session_buf, chip->work_space.session_buf,
+              space->buf_size);
 
        return 0;
 out:
index 7a0a705..eef0fb0 100644 (file)
@@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
        if (priv == NULL)
                return -ENOMEM;
 
-       rc = tpm2_init_space(&priv->space);
+       rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
        if (rc) {
                kfree(priv);
                return -ENOMEM;
index 039e0f9..6945c3c 100644 (file)
@@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
        /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
        err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
                                   fw_cfg_sel_ko, "%d", entry->select);
-       if (err)
-               goto err_register;
+       if (err) {
+               kobject_put(&entry->kobj);
+               return err;
+       }
 
        /* add raw binary content access */
        err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
 
 err_add_raw:
        kobject_del(&entry->kobj);
-err_register:
        kfree(entry);
        return err;
 }
index d7e17e3..2129209 100644 (file)
@@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                return n ? -EFAULT : 0;
        }
        case AMDGPU_INFO_DEV_INFO: {
-               struct drm_amdgpu_info_device dev_info = {};
+               struct drm_amdgpu_info_device dev_info;
                uint64_t vm_size;
 
+               memset(&dev_info, 0, sizeof(dev_info));
                dev_info.device_id = dev->pdev->device;
                dev_info.chip_rev = adev->rev_id;
                dev_info.external_rev = adev->external_rev_id;
index ebb8a28..02e6f8c 100644 (file)
@@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                tmp_str++;
        while (isspace(*++tmp_str));
 
-       while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+       while (tmp_str[0]) {
+               sub_str = strsep(&tmp_str, delimiter);
                ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
                if (ret)
                        return -EINVAL;
@@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
        memcpy(buf_cpy, buf, bytes);
        buf_cpy[bytes] = '\0';
        tmp = buf_cpy;
-       while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
+       while (tmp[0]) {
+               sub_str = strsep(&tmp, delimiter);
                if (strlen(sub_str)) {
                        ret = kstrtol(sub_str, 0, &level);
                        if (ret)
@@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
                        i++;
                memcpy(buf_cpy, buf, count-i);
                tmp_str = buf_cpy;
-               while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
+               while (tmp_str[0]) {
+                       sub_str = strsep(&tmp_str, delimiter);
                        ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
                        if (ret)
                                return -EINVAL;
index 86ffa0c..710edc7 100644 (file)
@@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                 * the same resource. If we have a new DC context as part of
                 * the DM atomic state from validation we need to free it and
                 * retain the existing one instead.
+                *
+                * Furthermore, since the DM atomic state only contains the DC
+                * context and can safely be annulled, we can free the state
+                * and clear the associated private object now to free
+                * some memory and avoid a possible use-after-free later.
                 */
-               struct dm_atomic_state *new_dm_state, *old_dm_state;
 
-               new_dm_state = dm_atomic_get_new_state(state);
-               old_dm_state = dm_atomic_get_old_state(state);
+               for (i = 0; i < state->num_private_objs; i++) {
+                       struct drm_private_obj *obj = state->private_objs[i].ptr;
 
-               if (new_dm_state && old_dm_state) {
-                       if (new_dm_state->context)
-                               dc_release_state(new_dm_state->context);
+                       if (obj->funcs == adev->dm.atomic_obj.funcs) {
+                               int j = state->num_private_objs-1;
 
-                       new_dm_state->context = old_dm_state->context;
+                               dm_atomic_destroy_state(obj,
+                                               state->private_objs[i].state);
+
+                               /* If i is not at the end of the array then the
+                                * last element needs to be moved to where i was
+                                * before the array can safely be truncated.
+                                */
+                               if (i != j)
+                                       state->private_objs[i] =
+                                               state->private_objs[j];
 
-                       if (old_dm_state->context)
-                               dc_retain_state(old_dm_state->context);
+                               state->private_objs[j].ptr = NULL;
+                               state->private_objs[j].state = NULL;
+                               state->private_objs[j].old_state = NULL;
+                               state->private_objs[j].new_state = NULL;
+
+                               state->num_private_objs = j;
+                               break;
+                       }
                }
        }
 
index 05d8373..079f46f 100644 (file)
@@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
        bochs->dev->mode_config.preferred_depth = 24;
        bochs->dev->mode_config.prefer_shadow = 0;
        bochs->dev->mode_config.prefer_shadow_fbdev = 1;
+       bochs->dev->mode_config.fbdev_use_iomem = true;
        bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
 
        bochs->dev->mode_config.funcs = &bochs_mode_funcs;
index 87b58c1..648eb23 100644 (file)
@@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
 
        adv7511->bridge.funcs = &adv7511_bridge_funcs;
        adv7511->bridge.of_node = dev->of_node;
+       adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
 
        drm_bridge_add(&adv7511->bridge);
 
index b14d725..c7bc194 100644 (file)
@@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
        struct drm_panel *panel;
        int ret;
 
-       if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
-               DRM_ERROR("Fix bridge driver to make connector optional!");
-               return -EINVAL;
-       }
-
        ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
                                          &panel_bridge);
        if (ret)
index 5609e16..89cfd68 100644 (file)
@@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
        unsigned int y;
 
        for (y = clip->y1; y < clip->y2; y++) {
-               memcpy(dst, src, len);
+               if (!fb_helper->dev->mode_config.fbdev_use_iomem)
+                       memcpy(dst, src, len);
+               else
+                       memcpy_toio((void __iomem *)dst, src, len);
+
                src += fb->pitches[0];
                dst += fb->pitches[0];
        }
index 7bf628e..ee2058a 100644 (file)
@@ -871,9 +871,6 @@ err:
  * @file_priv: drm file-private structure
  *
  * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
  */
 int
 drm_gem_open_ioctl(struct drm_device *dev, void *data,
@@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
 
        /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
        ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
-       drm_gem_object_put_unlocked(obj);
        if (ret)
-               return ret;
+               goto err;
 
        args->handle = handle;
        args->size = obj->size;
 
-       return 0;
+err:
+       drm_gem_object_put_unlocked(obj);
+       return ret;
 }
 
 /**
index bb27c82..bf7888a 100644 (file)
@@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
                        }
                }
 
-               tr.len = chunk;
+               tr.len = chunk * 2;
                len -= chunk;
 
                ret = spi_sync(spi, &m);
index b50b44e..8fc3f67 100644 (file)
@@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
                 * configurations by passing the endpoints explicitly to
                 * drm_of_lvds_get_dual_link_pixel_order().
                 */
-               if (!current_pt || pixels_type != current_pt) {
-                       of_node_put(remote_port);
+               if (!current_pt || pixels_type != current_pt)
                        return -EINVAL;
-               }
        }
 
        return pixels_type;
index 08802e5..4d2290f 100644 (file)
@@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
         */
        if (fb) {
                mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
-               if (!mcde->video_mode)
-                       /* Send a single frame using software sync */
-                       mcde_display_send_one_frame(mcde);
+               if (!mcde->video_mode) {
+                       /*
+                        * Send a single frame using software sync if the flow
+                        * is not active yet.
+                        */
+                       if (mcde->flow_active == 0)
+                               mcde_display_send_one_frame(mcde);
+               }
                dev_info_once(mcde->dev, "sent first display update\n");
        } else {
                /*
index 519f998..800b775 100644 (file)
@@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
         */
        if (core->assign_windows) {
                core->func->wndw.owner(core);
-               core->func->update(core, interlock, false);
+               nv50_disp_atomic_commit_core(state, interlock);
                core->assign_windows = false;
                interlock[NV50_DISP_INTERLOCK_CORE] = 0;
        }
@@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
        if (disp->disp->object.oclass >= TU102_DISP)
                nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
        else
-       if (disp->disp->object.oclass >= GF110_DISP)
+       if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
                nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
        else
                nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
index 496c462..07373bb 100644 (file)
@@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
                   uint32_t *tile_mode,
                   uint8_t *kind)
 {
+       struct nouveau_display *disp = nouveau_display(drm->dev);
        BUG_ON(!tile_mode || !kind);
 
        if (modifier == DRM_FORMAT_MOD_LINEAR) {
@@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
                 * Extract the block height and kind from the corresponding
                 * modifier fields.  See drm_fourcc.h for details.
                 */
+
+               if ((modifier & (0xffull << 12)) == 0ull) {
+                       /* Legacy modifier.  Translate to this dev's 'kind.' */
+                       modifier |= disp->format_modifiers[0] & (0xffull << 12);
+               }
+
                *tile_mode = (uint32_t)(modifier & 0xF);
                *kind = (uint8_t)((modifier >> 12) & 0xFF);
 
@@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
        }
 }
 
+static const u64 legacy_modifiers[] = {
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+       DRM_FORMAT_MOD_INVALID
+};
+
 static int
 nouveau_validate_decode_mod(struct nouveau_drm *drm,
                            uint64_t modifier,
@@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
             (disp->format_modifiers[mod] != modifier);
             mod++);
 
-       if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
-               return -EINVAL;
+       if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
+               for (mod = 0;
+                    (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+                    (legacy_modifiers[mod] != modifier);
+                    mod++);
+               if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+                       return -EINVAL;
+       }
 
        nouveau_decode_mod(drm, modifier, tile_mode, kind);
 
index 3d11b84..d5c23d1 100644 (file)
@@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb;
        struct nouveau_channel *chan;
        struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd2 mode_cmd;
+       struct drm_mode_fb_cmd2 mode_cmd = {};
        int ret;
 
        mode_cmd.width = sizes->surface_width;
@@ -590,6 +590,7 @@ fini:
        drm_fb_helper_fini(&fbcon->helper);
 free:
        kfree(fbcon);
+       drm->fbcon = NULL;
        return ret;
 }
 
index dcf0824..dffcac2 100644 (file)
@@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
 {
        struct nvkm_ior *ior;
 
-       /* First preference is to reuse the OR that is currently armed
-        * on HW, if any, in order to prevent unnecessary switching.
-        */
-       list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->identity && !!ior->func->hda.hpd == hda &&
-                   !ior->asy.outp && ior->arm.outp == outp)
-                       return nvkm_outp_acquire_ior(outp, user, ior);
-       }
-
        /* Failing that, a completely unused OR is the next best thing. */
        list_for_each_entry(ior, &outp->disp->ior, head) {
                if (!ior->identity && !!ior->func->hda.hpd == hda &&
@@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
                return nvkm_outp_acquire_ior(outp, user, ior);
        }
 
+       /* First preference is to reuse the OR that is currently armed
+        * on HW, if any, in order to prevent unnecessary switching.
+        */
+       list_for_each_entry(ior, &outp->disp->ior, head) {
+               if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
+                       /*XXX: For various complicated reasons, we can't outright switch
+                        *     the boot-time OR on the first modeset without some fairly
+                        *     invasive changes.
+                        *
+                        *     The systems that were fixed by modifying the OR selection
+                        *     code to account for HDA support shouldn't regress here as
+                        *     the HDA-enabled ORs match the relevant output's pad macro
+                        *     index, and the firmware seems to select an OR this way.
+                        *
+                        *     This warning is to make it obvious if that proves wrong.
+                        */
+                       WARN_ON(hda && !ior->func->hda.hpd);
+                       return nvkm_outp_acquire_ior(outp, user, ior);
+               }
+       }
+
        /* If we don't need HDA, first try to acquire an OR that doesn't
         * support it to leave free the ones that do.
         */
index 46fe180..2649469 100644 (file)
@@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
 static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
        .clock = 157000,
        .hdisplay = 1200,
-       .hsync_start = 1200 + 80,
-       .hsync_end = 1200 + 80 + 24,
-       .htotal = 1200 + 80 + 24 + 36,
+       .hsync_start = 1200 + 60,
+       .hsync_end = 1200 + 60 + 24,
+       .htotal = 1200 + 60 + 24 + 56,
        .vdisplay = 1920,
        .vsync_start = 1920 + 16,
        .vsync_end = 1920 + 16 + 4,
index 5178f87..4aeb960 100644 (file)
@@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
                .height = 165,
        },
        .delay = {
-               .hpd_absent_delay = 200,
+               /*
+                * When power is first given to the panel there's a short
+                * spike on the HPD line.  It was explained that this spike
+                * was until the TCON data download was complete.  On
+                * one system this was measured at 8 ms.  We'll put 15 ms
+                * in the prepare delay just to be safe and take it away
+                * from the hpd_absent_delay (which would otherwise be 200 ms)
+                * to handle this.  That means:
+                * - If HPD isn't hooked up you still have 200 ms delay.
+                * - If HPD is hooked up we won't try to look at it for the
+                *   first 15 ms.
+                */
+               .prepare = 15,
+               .hpd_absent_delay = 185,
+
                .unprepare = 500,
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
index 5427f04..1589179 100644 (file)
@@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
 {
        int ret;
 
-       if (!client || !slave_cb) {
-               WARN(1, "insufficient data\n");
+       if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
                return -EINVAL;
-       }
 
        if (!(client->flags & I2C_CLIENT_SLAVE))
                dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
 {
        int ret;
 
+       if (IS_ERR_OR_NULL(client))
+               return -EINVAL;
+
        if (!client->adapter->algo->unreg_slave) {
                dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
                return -EOPNOTSUPP;
index 655795b..513825e 100644 (file)
@@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
        INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
 }
 
+static void rdma_dim_destroy(struct ib_cq *cq)
+{
+       if (!cq->dim)
+               return;
+
+       cancel_work_sync(&cq->dim->work);
+       kfree(cq->dim);
+}
+
 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
 {
        int rc;
@@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
        return cq;
 
 out_destroy_cq:
+       rdma_dim_destroy(cq);
        rdma_restrack_del(&cq->res);
        cq->device->ops.destroy_cq(cq, udata);
 out_free_wc:
@@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
                WARN_ON_ONCE(1);
        }
 
+       rdma_dim_destroy(cq);
        trace_cq_free(cq);
        rdma_restrack_del(&cq->res);
        cq->device->ops.destroy_cq(cq, udata);
-       if (cq->dim)
-               cancel_work_sync(&cq->dim->work);
-       kfree(cq->dim);
        kfree(cq->wc);
        kfree(cq);
 }
index 5b87eee..d03daca 100644 (file)
@@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
        size_t in_size;
        int ret;
 
+       if (in_len < offsetofend(typeof(cmd), reserved))
+               return -EINVAL;
        in_size = min_t(size_t, in_len, sizeof(cmd));
        if (copy_from_user(&cmd, inbuf, in_size))
                return -EFAULT;
@@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
        size_t in_size;
        int ret;
 
+       if (in_len < offsetofend(typeof(cmd), reserved))
+               return -EINVAL;
        in_size = min_t(size_t, in_len, sizeof(cmd));
        if (copy_from_user(&cmd, inbuf, in_size))
                return -EFAULT;
index 1ab676b..77dca1e 100644 (file)
@@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
                work->frags[i].mr =
                        get_prefetchable_mr(pd, advice, sg_list[i].lkey);
                if (!work->frags[i].mr) {
-                       work->num_sge = i - 1;
-                       if (i)
-                               destroy_prefetch_work(work);
+                       work->num_sge = i;
                        return false;
                }
 
@@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
        srcu_key = srcu_read_lock(&dev->odp_srcu);
        if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
                srcu_read_unlock(&dev->odp_srcu, srcu_key);
+               destroy_prefetch_work(work);
                return -EINVAL;
        }
        queue_work(system_unbound_wq, &work->work);
index e050ead..1225b8d 100644 (file)
@@ -1766,15 +1766,14 @@ err:
 }
 
 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+                                        struct mlx5_ib_qp *qp,
                                         struct ib_qp_init_attr *init_attr,
-                                        struct mlx5_ib_create_qp *ucmd,
                                         void *qpc)
 {
        int scqe_sz;
        bool allow_scat_cqe = false;
 
-       if (ucmd)
-               allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+       allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
 
        if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
                return;
@@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        u32 *in;
        int err;
 
-       mutex_init(&qp->mutex);
-
        if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
                qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
 
@@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        u32 *in;
        int err;
 
-       mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
 
@@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        }
        if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
            (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
-               configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+               configure_requester_scat_cqe(dev, qp, init_attr, qpc);
 
        if (qp->rq.wqe_cnt) {
                MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        u32 *in;
        int err;
 
-       mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
 
@@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
                return;
        }
 
-       if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+       switch (flag) {
+       case MLX5_QP_FLAG_SCATTER_CQE:
+       case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
                /*
-                * We don't return error if this flag was provided,
-                * and mlx5 doesn't have right capability.
-                */
-               *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+                        * We don't return error if these flags were provided,
+                        * and mlx5 doesn't have right capability.
+                        */
+               *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+                           MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
                return;
+       default:
+               break;
        }
        mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
 }
@@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
        process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
                            MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+       process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+                           MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
 
        if (qp->type == IB_QPT_RAW_PACKET) {
                cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
                goto free_ucmd;
        }
 
+       mutex_init(&qp->mutex);
        qp->type = type;
        if (udata) {
                err = process_vendor_flags(dev, qp, params.ucmd, attr);
index 7db35dd..332a8ba 100644 (file)
@@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
        qp->s_tail_ack_queue = 0;
        qp->s_acked_ack_queue = 0;
        qp->s_num_rd_atomic = 0;
-       if (qp->r_rq.kwq)
-               qp->r_rq.kwq->count = qp->r_rq.size;
        qp->r_sge.num_sge = 0;
        atomic_set(&qp->s_reserved_used, 0);
 }
@@ -2366,31 +2364,6 @@ bad_lkey:
        return 0;
 }
 
-/**
- * get_count - count numbers of request work queue entries
- * in circular buffer
- * @rq: data structure for request queue entry
- * @tail: tail indices of the circular buffer
- * @head: head indices of the circular buffer
- *
- * Return - total number of entries in the circular buffer
- */
-static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
-{
-       u32 count;
-
-       count = head;
-
-       if (count >= rq->size)
-               count = 0;
-       if (count < tail)
-               count += rq->size - tail;
-       else
-               count -= tail;
-
-       return count;
-}
-
 /**
  * get_rvt_head - get head indices of the circular buffer
  * @rq: data structure for request queue entry
@@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
 
        if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
                head = get_rvt_head(rq, ip);
-               kwq->count = get_count(rq, tail, head);
+               kwq->count = rvt_get_rq_count(rq, head, tail);
        }
        if (unlikely(kwq->count == 0)) {
                ret = 0;
@@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
                 * the number of remaining WQEs.
                 */
                if (kwq->count < srq->limit) {
-                       kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
+                       kwq->count =
+                               rvt_get_rq_count(rq,
+                                                get_rvt_head(rq, ip), tail);
                        if (kwq->count < srq->limit) {
                                struct ib_event ev;
 
index 977906c..c58735f 100644 (file)
@@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
                         * not atomic, which is OK, since the fuzziness is
                         * resolved as further ACKs go out.
                         */
-                       credits = head - tail;
-                       if ((int)credits < 0)
-                               credits += qp->r_rq.size;
+                       credits = rvt_get_rq_count(&qp->r_rq, head, tail);
                }
                /*
                 * Binary search the credit table to find the code to
index 3dd46cd..88e7900 100644 (file)
@@ -407,19 +407,34 @@ free_dst:
        return err;
 }
 
+static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
+{
+       if (bareudp->ethertype == proto)
+               return true;
+
+       if (!bareudp->multi_proto_mode)
+               return false;
+
+       if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
+           proto == htons(ETH_P_MPLS_MC))
+               return true;
+
+       if (bareudp->ethertype == htons(ETH_P_IP) &&
+           proto == htons(ETH_P_IPV6))
+               return true;
+
+       return false;
+}
+
 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct bareudp_dev *bareudp = netdev_priv(dev);
        struct ip_tunnel_info *info = NULL;
        int err;
 
-       if (skb->protocol != bareudp->ethertype) {
-               if (!bareudp->multi_proto_mode ||
-                   (skb->protocol !=  htons(ETH_P_MPLS_MC) &&
-                    skb->protocol !=  htons(ETH_P_IPV6))) {
-                       err = -EINVAL;
-                       goto tx_error;
-               }
+       if (!bareudp_proto_valid(bareudp, skb->protocol)) {
+               err = -EINVAL;
+               goto tx_error;
        }
 
        info = skb_tunnel_info(skb);
index 8d13ea3..66e67b2 100644 (file)
@@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
        port->reset = devm_reset_control_get_exclusive(dev, NULL);
        if (IS_ERR(port->reset)) {
                dev_err(dev, "no reset\n");
+               clk_disable_unprepare(port->pclk);
                return PTR_ERR(port->reset);
        }
        reset_control_reset(port->reset);
@@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
                                        IRQF_SHARED,
                                        port_names[port->id],
                                        port);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(port->pclk);
                return ret;
+       }
 
        ret = register_netdev(netdev);
        if (!ret) {
index 33c481d..71ed4c5 100644 (file)
@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
        int k, sizeoflast;
        dma_addr_t dma;
 
-       if (type == DESC_TYPE_SKB) {
-               struct sk_buff *skb = (struct sk_buff *)priv;
-               int ret;
-
-               ret = hns3_fill_skb_desc(ring, skb, desc);
-               if (unlikely(ret < 0))
-                       return ret;
-
-               dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
-       } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+       if (type == DESC_TYPE_FRAGLIST_SKB ||
+           type == DESC_TYPE_SKB) {
                struct sk_buff *skb = (struct sk_buff *)priv;
 
                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        next_to_use_head = ring->next_to_use;
 
+       ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+       if (unlikely(ret < 0))
+               goto fill_err;
+
        ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
        if (unlikely(ret < 0))
                goto fill_err;
@@ -4140,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
                return;
 
        if (linkup) {
-               netif_carrier_on(netdev);
                netif_tx_wake_all_queues(netdev);
+               netif_carrier_on(netdev);
                if (netif_msg_link(handle))
                        netdev_info(netdev, "link up\n");
        } else {
index bb4a632..36575e7 100644 (file)
@@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
        /* to avoid rule conflict, when user configure rule by ethtool,
         * we need to clear all arfs rules
         */
+       spin_lock_bh(&hdev->fd_rule_lock);
        hclge_clear_arfs_rules(handle);
 
-       spin_lock_bh(&hdev->fd_rule_lock);
        ret = hclge_fd_config_rule(hdev, rule);
 
        spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
        return ret;
 }
 
+/* make sure being called after lock up with fd_rule_lock */
 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
                                     bool clear_list)
 {
@@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
        if (!hnae3_dev_fd_supported(hdev))
                return;
 
-       spin_lock_bh(&hdev->fd_rule_lock);
        for_each_set_bit(location, hdev->fd_bmap,
                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
                hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
                bitmap_zero(hdev->fd_bmap,
                            hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
        }
-
-       spin_unlock_bh(&hdev->fd_rule_lock);
 }
 
 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
                                      u16 flow_id, struct flow_keys *fkeys)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_fd_rule_tuples new_tuples;
+       struct hclge_fd_rule_tuples new_tuples = {};
        struct hclge_dev *hdev = vport->back;
        struct hclge_fd_rule *rule;
        u16 tmp_queue_id;
@@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
        if (!hnae3_dev_fd_supported(hdev))
                return -EOPNOTSUPP;
 
-       memset(&new_tuples, 0, sizeof(new_tuples));
-       hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
-       spin_lock_bh(&hdev->fd_rule_lock);
-
        /* when there is already fd rule existed add by user,
         * arfs should not work
         */
+       spin_lock_bh(&hdev->fd_rule_lock);
        if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
                spin_unlock_bh(&hdev->fd_rule_lock);
                return -EOPNOTSUPP;
        }
 
+       hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
        /* check is there flow director filter existed for this flow,
         * if not, create a new filter for it;
         * if filter exist with different queue id, modify the filter;
@@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
 #endif
 }
 
+/* make sure being called after lock up with fd_rule_lock */
 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
 {
 #ifdef CONFIG_RFS_ACCEL
@@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
 
        hdev->fd_en = enable;
        clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
-       if (!enable)
+
+       if (!enable) {
+               spin_lock_bh(&hdev->fd_rule_lock);
                hclge_del_all_fd_entries(handle, clear);
-       else
+               spin_unlock_bh(&hdev->fd_rule_lock);
+       } else {
                hclge_restore_fd_entries(handle);
+       }
 }
 
 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        int i;
 
        set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+       spin_lock_bh(&hdev->fd_rule_lock);
        hclge_clear_arfs_rules(handle);
+       spin_unlock_bh(&hdev->fd_rule_lock);
 
        /* If it is not PF reset, the firmware will disable the MAC,
         * so it only need to stop phy here.
@@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
        bool writen_to_tbl = false;
        int ret = 0;
 
-       /* When device is resetting, firmware is unable to handle
-        * mailbox. Just record the vlan id, and remove it after
+       /* When device is resetting or reset failed, firmware is unable to
+        * handle mailbox. Just record the vlan id, and remove it after
         * reset finished.
         */
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+       if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+            test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
                set_bit(vlan_id, vport->vlan_del_fail_bmap);
                return -EBUSY;
        }
index a10b022..9162856 100644 (file)
@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
        if (proto != htons(ETH_P_8021Q))
                return -EPROTONOSUPPORT;
 
-       /* When device is resetting, firmware is unable to handle
-        * mailbox. Just record the vlan id, and remove it after
+       /* When device is resetting or reset failed, firmware is unable to
+        * handle mailbox. Just record the vlan id, and remove it after
         * reset finished.
         */
-       if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+       if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+            test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
                set_bit(vlan_id, hdev->vlan_del_fail_bmap);
                return -EBUSY;
        }
@@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
 {
        struct hnae3_handle *nic = &hdev->nic;
        struct hclge_vf_to_pf_msg send_msg;
+       int ret;
 
        rtnl_lock();
-       hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
-       rtnl_unlock();
+
+       if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+           test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+               dev_warn(&hdev->pdev->dev,
+                        "is resetting when updating port based vlan info\n");
+               rtnl_unlock();
+               return;
+       }
+
+       ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+       if (ret) {
+               rtnl_unlock();
+               return;
+       }
 
        /* send msg to PF and wait update port based vlan info */
        hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
                               HCLGE_MBX_PORT_BASE_VLAN_CFG);
        memcpy(send_msg.data, port_base_vlan_info, data_size);
-       hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
-       if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
-               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
-       else
-               nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+       ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+       if (!ret) {
+               if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+                       nic->port_base_vlan_state = state;
+               else
+                       nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+       }
 
-       rtnl_lock();
        hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
        rtnl_unlock();
 }
index 0fd7eae..5afb3c9 100644 (file)
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
 req_tx_irq_failed:
        for (j = 0; j < i; j++) {
                free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
-               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+               irq_dispose_mapping(adapter->tx_scrq[j]->irq);
        }
        release_sub_crqs(adapter, 1);
        return rc;
index f999cca..489bb5b 100644 (file)
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
         */
        hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
        ret_val = e1000_disable_ulp_lpt_lp(hw, true);
-       if (ret_val) {
+       if (ret_val)
                e_warn("Failed to disable ULP\n");
-               goto out;
-       }
 
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val) {
index 8bb3db2..6e5861b 100644 (file)
@@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
        struct igb_adapter *adapter;
        adapter = container_of(work, struct igb_adapter, reset_task);
 
+       rtnl_lock();
+       /* If we're already down or resetting, just bail */
+       if (test_bit(__IGB_DOWN, &adapter->state) ||
+           test_bit(__IGB_RESETTING, &adapter->state)) {
+               rtnl_unlock();
+               return;
+       }
+
        igb_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        igb_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index 6478656..75a8c40 100644 (file)
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
        if (!netif_running(pf->netdev))
                return;
 
+       rtnl_lock();
        otx2_stop(pf->netdev);
        pf->reset_count++;
        otx2_open(pf->netdev);
        netif_trans_update(pf->netdev);
+       rtnl_unlock();
 }
 
 static const struct net_device_ops otx2_netdev_ops = {
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
 
        pf = netdev_priv(netdev);
 
+       cancel_work_sync(&pf->reset_task);
        /* Disable link notifications */
        otx2_cgx_config_linkevents(pf, false);
 
index f422751..92a3db6 100644 (file)
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
 
        vf = netdev_priv(netdev);
 
+       cancel_work_sync(&vf->reset_task);
+       unregister_netdev(netdev);
        otx2vf_disable_mbox_intr(vf);
 
        otx2_detach_resources(&vf->mbox);
index f6a1f86..a1c45b3 100644 (file)
@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
        return 0;
 }
 
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+                                  phy_interface_t interface, int speed)
 {
        u32 val;
        int ret;
 
+       if (interface == PHY_INTERFACE_MODE_TRGMII) {
+               mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+               val = 500000000;
+               ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+               if (ret)
+                       dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+               return;
+       }
+
        val = (speed == SPEED_1000) ?
                INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
        mtk_w32(eth, val, INTF_MODE);
@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
                                                              state->interface))
                                        goto err_phy;
                        } else {
-                               if (state->interface !=
-                                   PHY_INTERFACE_MODE_TRGMII)
-                                       mtk_gmac0_rgmii_adjust(mac->hw,
-                                                              state->speed);
+                               mtk_gmac0_rgmii_adjust(mac->hw,
+                                                      state->interface,
+                                                      state->speed);
 
                                /* mt7623_pad_clk_setup */
                                for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        eth->netdev[id]->irq = eth->irq[0];
        eth->netdev[id]->dev.of_node = np;
 
+       eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
        return 0;
 
 free_netdev:
index 3d9aa7d..2d3e457 100644 (file)
@@ -4356,12 +4356,14 @@ end:
 static void mlx4_shutdown(struct pci_dev *pdev)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev *dev = persist->dev;
 
        mlx4_info(persist->dev, "mlx4_shutdown was called\n");
        mutex_lock(&persist->interface_state_mutex);
        if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
                mlx4_unload_one(pdev);
        mutex_unlock(&persist->interface_state_mutex);
+       mlx4_pci_disable_device(dev);
 }
 
 static const struct pci_error_handlers mlx4_err_handler = {
index bdb7133..3e44e4d 100644 (file)
@@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
 
 static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_rep_priv *rpriv = priv->ppriv;
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5e_priv *priv;
 
        /* A given netdev is not a representor or not a slave of LAG configuration */
        if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
                return false;
 
+       priv = netdev_priv(netdev);
+       rpriv = priv->ppriv;
+
        /* Egress acl forward to vport is supported only non-uplink representor */
        return rpriv->rep->vport != MLX5_VPORT_UPLINK;
 }
index eefeb1c..245a99f 100644 (file)
@@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
                }
        }
 
-       tun_dst = tun_rx_dst(enc_opts.key.len);
+       if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+               tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+                                          key.enc_ip.tos, key.enc_ip.ttl,
+                                          key.enc_tp.dst, TUNNEL_KEY,
+                                          key32_to_tunnel_id(key.enc_key_id.keyid),
+                                          enc_opts.key.len);
+       } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+                                            key.enc_ip.tos, key.enc_ip.ttl,
+                                            key.enc_tp.dst, 0, TUNNEL_KEY,
+                                            key32_to_tunnel_id(key.enc_key_id.keyid),
+                                            enc_opts.key.len);
+       } else {
+               netdev_dbg(priv->netdev,
+                          "Couldn't restore tunnel, unsupported addr_type: %d\n",
+                          key.enc_control.addr_type);
+               return false;
+       }
+
        if (!tun_dst) {
-               WARN_ON_ONCE(true);
+               netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
                return false;
        }
 
-       ip_tunnel_key_init(&tun_dst->u.tun_info.key,
-                          key.enc_ipv4.src, key.enc_ipv4.dst,
-                          key.enc_ip.tos, key.enc_ip.ttl,
-                          0, /* label */
-                          key.enc_tp.src, key.enc_tp.dst,
-                          key32_to_tunnel_id(key.enc_key_id.keyid),
-                          TUNNEL_KEY);
+       tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
 
        if (enc_opts.key.len)
                ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
index 951ea26..e472ed0 100644 (file)
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
                MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
        }
 
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
        return 0;
 }
 
index 58b1319..2805416 100644 (file)
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
                         gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
        }
 
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
        return 0;
 }
 
index 37b1768..038a0f1 100644 (file)
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
        MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
                 be32_to_cpu(enc_keyid.key->keyid));
 
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
        return 0;
 }
 
index 081f150..3b892ec 100644 (file)
@@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
                                        &rq->wq_ctrl);
                if (err)
-                       return err;
+                       goto err_rq_wq_destroy;
 
                rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
 
@@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
                                         &rq->wq_ctrl);
                if (err)
-                       return err;
+                       goto err_rq_wq_destroy;
 
                rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
 
@@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
        priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
 }
 
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
+                                    enum mlx5_port_status state)
+{
+       struct mlx5_eswitch *esw = mdev->priv.eswitch;
+       int vport_admin_state;
+
+       mlx5_set_port_admin_status(mdev, state);
+
+       if (!MLX5_ESWITCH_MANAGER(mdev) ||  mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+               return;
+
+       if (state == MLX5_PORT_UP)
+               vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+       else
+               vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
+       mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
+}
+
 int mlx5e_open_locked(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3101,7 +3120,7 @@ int mlx5e_open(struct net_device *netdev)
        mutex_lock(&priv->state_lock);
        err = mlx5e_open_locked(netdev);
        if (!err)
-               mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+               mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -3135,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
                return -ENODEV;
 
        mutex_lock(&priv->state_lock);
-       mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
+       mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
        err = mlx5e_close_locked(netdev);
        mutex_unlock(&priv->state_lock);
 
@@ -5182,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        /* Marking the link as currently not needed by the Driver */
        if (!netif_running(netdev))
-               mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+               mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
 
        mlx5e_set_netdev_mtu_boundaries(priv);
        mlx5e_set_dev_port_mtu(priv);
@@ -5390,6 +5409,8 @@ err_cleanup_tx:
        profile->cleanup_tx(priv);
 
 out:
+       set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+       cancel_work_sync(&priv->update_stats_work);
        return err;
 }
 
index 006807e..9519a61 100644 (file)
@@ -936,6 +936,7 @@ err_close_drop_rq:
 
 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 {
+       mlx5e_ethtool_cleanup_steering(priv);
        rep_vport_rx_rule_destroy(priv);
        mlx5e_destroy_rep_root_ft(priv);
        mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 
        mlx5e_rep_tc_enable(priv);
 
+       mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+                                     0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
        mlx5_lag_add(mdev, netdev);
        priv->events_nb.notifier_call = uplink_rep_async_event;
        mlx5_notifier_register(mdev, &priv->events_nb);
index cc84121..fcedb5b 100644 (file)
@@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                 match.key->vlan_priority);
 
                        *match_level = MLX5_MATCH_L2;
+                       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
                }
        }
 
index 1116ab9..43005ca 100644 (file)
@@ -1608,7 +1608,7 @@ abort:
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
        }
-
+       esw_destroy_tsar(esw);
        return err;
 }
 
@@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
        else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
                esw_offloads_disable(esw);
 
-       esw_destroy_tsar(esw);
-
        old_mode = esw->mode;
        esw->mode = MLX5_ESWITCH_NONE;
 
@@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
                mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
        }
+       esw_destroy_tsar(esw);
+
        if (clear_vf)
                mlx5_eswitch_clear_vf_vports_info(esw);
 }
@@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
                                 u16 vport, int link_state)
 {
        struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+       int other_vport = 1;
        int err = 0;
 
        if (!ESW_ALLOWED(esw))
@@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
        if (IS_ERR(evport))
                return PTR_ERR(evport);
 
+       if (vport == MLX5_VPORT_UPLINK) {
+               opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
+               other_vport = 0;
+               vport = 0;
+       }
        mutex_lock(&esw->state_lock);
 
-       err = mlx5_modify_vport_admin_state(esw->dev,
-                                           MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
-                                           vport, 1, link_state);
+       err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
        if (err) {
-               mlx5_core_warn(esw->dev,
-                              "Failed to set vport %d link state, err = %d",
-                              vport, err);
+               mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
+                              vport, opmod, err);
                goto unlock;
        }
 
@@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
-               return -EPERM;
        if (IS_ERR(evport))
                return PTR_ERR(evport);
        if (vlan > 4095 || qos > 7)
@@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        u8 set_flags = 0;
        int err;
 
+       if (!ESW_ALLOWED(esw))
+               return -EPERM;
+
        if (vlan || qos)
                set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
 
index a5175e9..5785596 100644 (file)
@@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
 {
        return ERR_PTR(-EOPNOTSUPP);
index 060354b..ed75353 100644 (file)
@@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
        return &esw->offloads.vport_reps[idx];
 }
 
+static void
+mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
+                                 struct mlx5_flow_spec *spec,
+                                 struct mlx5_esw_flow_attr *attr)
+{
+       if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+           attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
+               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
 
 static void
 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
                         mlx5_eswitch_get_vport_metadata_mask());
 
                spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
-               misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
-               if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
-                       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
        } else {
                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
                MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
 
                spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
        }
-
-       if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
-           attr->in_rep->vport == MLX5_VPORT_UPLINK)
-               spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
 }
 
 struct mlx5_flow_handle *
@@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
                goto err_esw_get;
        }
 
+       mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+
        if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
                rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
                                                     &flow_act, dest, i);
@@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
        i++;
 
        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+       mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
 
        if (attr->outer_match_level != MLX5_MATCH_NONE)
                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
index 13e2fb7..2569bb6 100644 (file)
@@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
        return ft;
 }
 
-/* If reverse if false then return the first flow table in next priority of
+/* If reverse is false then return the first flow table in next priority of
  * prio in the tree, else return the last flow table in the previous priority
  * of prio in the tree.
  */
@@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
        return find_closest_ft(prio, true);
 }
 
-static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
-                                       struct mlx5_flow_namespace *ns)
-{
-       struct mlx5_flow_namespace *root_ns = &root->ns;
-       struct fs_prio *iter_prio;
-       struct fs_prio *prio;
-
-       fs_get_obj(prio, ns->node.parent);
-       list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
-               if (iter_prio == prio &&
-                   !list_is_last(&prio->node.children, &iter_prio->node.list))
-                       return list_next_entry(iter_prio, node.list);
-       }
-       return NULL;
-}
-
 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
                                                struct mlx5_flow_act *flow_act)
 {
-       struct mlx5_flow_root_namespace *root = find_root(&ft->node);
        struct fs_prio *prio;
+       bool next_ns;
 
-       if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
-               prio = find_fwd_ns_prio(root, ft->ns);
-       else
-               fs_get_obj(prio, ft->node.parent);
+       next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+       fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
 
-       return (prio) ? find_next_chained_ft(prio) : NULL;
+       return find_next_chained_ft(prio);
 }
 
 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
index ef0706d..2d55b7c 100644 (file)
@@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
        if (rq->extts.index >= clock->ptp_info.n_pins)
                return -EINVAL;
 
+       pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+       if (pin < 0)
+               return -EBUSY;
+
        if (on) {
-               pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
-               if (pin < 0)
-                       return -EBUSY;
                pin_mode = MLX5_PIN_MODE_IN;
                pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
                field_select = MLX5_MTPPS_FS_PIN_MODE |
                               MLX5_MTPPS_FS_PATTERN |
                               MLX5_MTPPS_FS_ENABLE;
        } else {
-               pin = rq->extts.index;
                field_select = MLX5_MTPPS_FS_ENABLE;
        }
 
@@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
        if (rq->perout.index >= clock->ptp_info.n_pins)
                return -EINVAL;
 
-       if (on) {
-               pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
-                                  rq->perout.index);
-               if (pin < 0)
-                       return -EBUSY;
+       pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+                          rq->perout.index);
+       if (pin < 0)
+               return -EBUSY;
 
+       if (on) {
                pin_mode = MLX5_PIN_MODE_OUT;
                pattern = MLX5_OUT_PATTERN_PERIODIC;
                ts.tv_sec = rq->perout.period.sec;
@@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                               MLX5_MTPPS_FS_ENABLE |
                               MLX5_MTPPS_FS_TIME_STAMP;
        } else {
-               pin = rq->perout.index;
                field_select = MLX5_MTPPS_FS_ENABLE;
        }
 
@@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
        return 0;
 }
 
+enum {
+       MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+       MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
                           enum ptp_pin_function func, unsigned int chan)
 {
-       return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+       struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+                                               ptp_info);
+
+       switch (func) {
+       case PTP_PF_NONE:
+               return 0;
+       case PTP_PF_EXTTS:
+               return !(clock->pps_info.pin_caps[pin] &
+                        MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+       case PTP_PF_PEROUT:
+               return !(clock->pps_info.pin_caps[pin] &
+                        MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return -EOPNOTSUPP;
 }
 
 static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
        .verify         = NULL,
 };
 
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+                                    u32 *mtpps, u32 mtpps_size)
+{
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+
+       MLX5_SET(mtpps_reg, in, pin, pin);
+
+       return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+                                   mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+{
+       struct mlx5_core_dev *mdev = clock->mdev;
+       u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+       u8 mode;
+       int err;
+
+       err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
+       if (err || !MLX5_GET(mtpps_reg, out, enable))
+               return PTP_PF_NONE;
+
+       mode = MLX5_GET(mtpps_reg, out, pin_mode);
+
+       if (mode == MLX5_PIN_MODE_IN)
+               return PTP_PF_EXTTS;
+       else if (mode == MLX5_PIN_MODE_OUT)
+               return PTP_PF_PEROUT;
+
+       return PTP_PF_NONE;
+}
+
 static int mlx5_init_pin_config(struct mlx5_clock *clock)
 {
        int i;
@@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
                         sizeof(clock->ptp_info.pin_config[i].name),
                         "mlx5_pps%d", i);
                clock->ptp_info.pin_config[i].index = i;
-               clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
-               clock->ptp_info.pin_config[i].chan = i;
+               clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+               clock->ptp_info.pin_config[i].chan = 0;
        }
 
        return 0;
index d6d6fe6..71b6185 100644 (file)
@@ -1814,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
        err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
                                    bulk_list, cb, cb_priv, tid);
        if (err) {
-               kfree(trans);
+               kfree_rcu(trans, rcu);
                return err;
        }
        return 0;
@@ -2051,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
                        break;
                }
        }
-       rcu_read_unlock();
-       if (!found)
+       if (!found) {
+               rcu_read_unlock();
                goto drop;
+       }
 
        rxl->func(skb, local_port, rxl_item->priv);
+       rcu_read_unlock();
        return;
 
 drop:
index fcb88d4..8ac987c 100644 (file)
@@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
        MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+       MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
        MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
index 019ed50..0521e9d 100644 (file)
@@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
 
 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
 {
-       /* Packets with link-local destination IP arriving to the router
-        * are trapped to the CPU, so no need to program specific routes
-        * for them. Only allow prefix routes (usually one fe80::/64) so
-        * that packets are trapped for the right reason.
-        */
-       if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
-           (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
-               return true;
-
        /* Multicast routes aren't supported, so ignore them. Neighbour
         * Discovery packets are specifically trapped.
         */
@@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp->router = router;
        router->mlxsw_sp = mlxsw_sp;
 
-       router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
-       err = register_inetaddr_notifier(&router->inetaddr_nb);
-       if (err)
-               goto err_register_inetaddr_notifier;
-
-       router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
-       err = register_inet6addr_notifier(&router->inet6addr_nb);
-       if (err)
-               goto err_register_inet6addr_notifier;
-
        INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
        err = __mlxsw_sp_router_init(mlxsw_sp);
        if (err)
@@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_neigh_init;
 
-       mlxsw_sp->router->netevent_nb.notifier_call =
-               mlxsw_sp_router_netevent_event;
-       err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
-       if (err)
-               goto err_register_netevent_notifier;
-
        err = mlxsw_sp_mp_hash_init(mlxsw_sp);
        if (err)
                goto err_mp_hash_init;
@@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        if (err)
                goto err_dscp_init;
 
+       router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+       err = register_inetaddr_notifier(&router->inetaddr_nb);
+       if (err)
+               goto err_register_inetaddr_notifier;
+
+       router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+       err = register_inet6addr_notifier(&router->inet6addr_nb);
+       if (err)
+               goto err_register_inet6addr_notifier;
+
+       mlxsw_sp->router->netevent_nb.notifier_call =
+               mlxsw_sp_router_netevent_event;
+       err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+       if (err)
+               goto err_register_netevent_notifier;
+
        mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
        err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
                                    &mlxsw_sp->router->fib_nb,
@@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_register_fib_notifier:
-err_dscp_init:
-err_mp_hash_init:
        unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
 err_register_netevent_notifier:
+       unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+       unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+       mlxsw_core_flush_owq();
+err_dscp_init:
+err_mp_hash_init:
        mlxsw_sp_neigh_fini(mlxsw_sp);
 err_neigh_init:
        mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -8174,10 +8170,6 @@ err_ipips_init:
 err_rifs_init:
        __mlxsw_sp_router_fini(mlxsw_sp);
 err_router_init:
-       unregister_inet6addr_notifier(&router->inet6addr_nb);
-err_register_inet6addr_notifier:
-       unregister_inetaddr_notifier(&router->inetaddr_nb);
-err_register_inetaddr_notifier:
        mutex_destroy(&mlxsw_sp->router->lock);
        kfree(mlxsw_sp->router);
        return err;
@@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
        unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
                                &mlxsw_sp->router->fib_nb);
        unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+       unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+       unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+       mlxsw_core_flush_owq();
        mlxsw_sp_neigh_fini(mlxsw_sp);
        mlxsw_sp_vrs_fini(mlxsw_sp);
        mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
        mlxsw_sp_ipips_fini(mlxsw_sp);
        mlxsw_sp_rifs_fini(mlxsw_sp);
        __mlxsw_sp_router_fini(mlxsw_sp);
-       unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
-       unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
        mutex_destroy(&mlxsw_sp->router->lock);
        kfree(mlxsw_sp->router);
 }
index 157a42c..1e38dfe 100644 (file)
@@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
        {
                .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
        },
+       {
+               .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+       },
 };
 
 static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -421,6 +424,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
                .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
                .priority = 2,
        },
+       {
+               .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+               .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+               .priority = 1,
+       },
        {
                .group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
                .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
@@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
                },
        },
        {
-               .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+               .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
                                              TRAP),
                .listeners_arr = {
-                       MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
-                                         false),
+                       MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+                                         TRAP_TO_CPU, false),
                },
        },
        {
index 9cfe1fd..f17da67 100644 (file)
@@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
 
                spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
 
-               /* Next ts */
-               ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+               /* Get the h/w timestamp */
+               ocelot_get_hwtimestamp(ocelot, &ts);
 
                if (unlikely(!skb_match))
                        continue;
 
-               /* Get the h/w timestamp */
-               ocelot_get_hwtimestamp(ocelot, &ts);
-
                /* Set the timestamp into the skb */
                memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
                skb_tstamp_tx(skb_match, &shhwtstamps);
 
                dev_kfree_skb_any(skb_match);
+
+               /* Next ts */
+               ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
        }
 }
 EXPORT_SYMBOL(ocelot_get_txtstamp);
index d2708a5..4075f5e 100644 (file)
@@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
        netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
        err = nixge_of_get_resources(pdev);
        if (err)
-               return err;
+               goto free_netdev;
        __nixge_hw_set_mac_address(ndev);
 
        priv->tx_irq = platform_get_irq_byname(pdev, "tx");
        if (priv->tx_irq < 0) {
                netdev_err(ndev, "could not find 'tx' irq");
-               return priv->tx_irq;
+               err = priv->tx_irq;
+               goto free_netdev;
        }
 
        priv->rx_irq = platform_get_irq_byname(pdev, "rx");
        if (priv->rx_irq < 0) {
                netdev_err(ndev, "could not find 'rx' irq");
-               return priv->rx_irq;
+               err = priv->rx_irq;
+               goto free_netdev;
        }
 
        priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
index 5fd31ba..e55d415 100644 (file)
@@ -2001,7 +2001,7 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
                netif_device_detach(lif->netdev);
                err = ionic_stop(lif->netdev);
                if (err)
-                       return err;
+                       goto reset_out;
        }
 
        if (cb)
@@ -2011,6 +2011,8 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
                err = ionic_open(lif->netdev);
                netif_device_attach(lif->netdev);
        }
+
+reset_out:
        mutex_unlock(&lif->queue_lock);
 
        return err;
index 5f123a8..d2fdb54 100644 (file)
@@ -2261,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
 
        minor = get_free_serial_index();
        if (minor < 0)
-               goto exit;
+               goto exit2;
 
        /* register our minor number */
        serial->parent->dev = tty_port_register_device_attr(&serial->port,
                        tty_drv, minor, &serial->parent->interface->dev,
                        serial->parent, hso_serial_dev_groups);
+       if (IS_ERR(serial->parent->dev))
+               goto exit2;
 
        /* fill in specific data for later use */
        serial->minor = minor;
@@ -2311,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
        return 0;
 exit:
        hso_serial_tty_unregister(serial);
+exit2:
        hso_serial_common_free(serial);
        return -1;
 }
index eccbf4c..442507f 100644 (file)
@@ -377,10 +377,6 @@ struct lan78xx_net {
        struct tasklet_struct   bh;
        struct delayed_work     wq;
 
-       struct usb_host_endpoint *ep_blkin;
-       struct usb_host_endpoint *ep_blkout;
-       struct usb_host_endpoint *ep_intr;
-
        int                     msg_enable;
 
        struct urb              *urb_intr;
@@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
        return NETDEV_TX_OK;
 }
 
-static int
-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
-{
-       int tmp;
-       struct usb_host_interface *alt = NULL;
-       struct usb_host_endpoint *in = NULL, *out = NULL;
-       struct usb_host_endpoint *status = NULL;
-
-       for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
-               unsigned ep;
-
-               in = NULL;
-               out = NULL;
-               status = NULL;
-               alt = intf->altsetting + tmp;
-
-               for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
-                       struct usb_host_endpoint *e;
-                       int intr = 0;
-
-                       e = alt->endpoint + ep;
-                       switch (e->desc.bmAttributes) {
-                       case USB_ENDPOINT_XFER_INT:
-                               if (!usb_endpoint_dir_in(&e->desc))
-                                       continue;
-                               intr = 1;
-                               /* FALLTHROUGH */
-                       case USB_ENDPOINT_XFER_BULK:
-                               break;
-                       default:
-                               continue;
-                       }
-                       if (usb_endpoint_dir_in(&e->desc)) {
-                               if (!intr && !in)
-                                       in = e;
-                               else if (intr && !status)
-                                       status = e;
-                       } else {
-                               if (!out)
-                                       out = e;
-                       }
-               }
-               if (in && out)
-                       break;
-       }
-       if (!alt || !in || !out)
-               return -EINVAL;
-
-       dev->pipe_in = usb_rcvbulkpipe(dev->udev,
-                                      in->desc.bEndpointAddress &
-                                      USB_ENDPOINT_NUMBER_MASK);
-       dev->pipe_out = usb_sndbulkpipe(dev->udev,
-                                       out->desc.bEndpointAddress &
-                                       USB_ENDPOINT_NUMBER_MASK);
-       dev->ep_intr = status;
-
-       return 0;
-}
-
 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
 {
        struct lan78xx_priv *pdata = NULL;
        int ret;
        int i;
 
-       ret = lan78xx_get_endpoints(dev, intf);
-       if (ret) {
-               netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
-                           ret);
-               return ret;
-       }
-
        dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
 
        pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
 static int lan78xx_probe(struct usb_interface *intf,
                         const struct usb_device_id *id)
 {
+       struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
        struct lan78xx_net *dev;
        struct net_device *netdev;
        struct usb_device *udev;
@@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
 
        mutex_init(&dev->stats.access_lock);
 
+       if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+               ret = -ENODEV;
+               goto out2;
+       }
+
+       dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+       ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
+       if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
+               ret = -ENODEV;
+               goto out2;
+       }
+
+       dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+       ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
+       if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
+               ret = -ENODEV;
+               goto out2;
+       }
+
+       ep_intr = &intf->cur_altsetting->endpoint[2];
+       if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
+               ret = -ENODEV;
+               goto out2;
+       }
+
+       dev->pipe_intr = usb_rcvintpipe(dev->udev,
+                                       usb_endpoint_num(&ep_intr->desc));
+
        ret = lan78xx_bind(dev, intf);
        if (ret < 0)
                goto out2;
@@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
        netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
        netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
 
-       dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
-       dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
-       dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
-
-       dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
-       dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
-
-       dev->pipe_intr = usb_rcvintpipe(dev->udev,
-                                       dev->ep_intr->desc.bEndpointAddress &
-                                       USB_ENDPOINT_NUMBER_MASK);
-       period = dev->ep_intr->desc.bInterval;
-
+       period = ep_intr->desc.bInterval;
        maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
        buf = kmalloc(maxp, GFP_KERNEL);
        if (buf) {
@@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
                        usb_fill_int_urb(dev->urb_intr, dev->udev,
                                         dev->pipe_intr, buf, maxp,
                                         intr_complete, dev, period);
+                       dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
                }
        }
 
index 89d85dc..a7c3939 100644 (file)
@@ -1376,6 +1376,7 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
                struct vxlan_fdb *f;
 
+               rcu_read_lock();
                hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
                        struct vxlan_rdst *rd;
 
@@ -1387,8 +1388,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGH,
                                                     NLM_F_MULTI, NULL);
-                               if (err < 0)
+                               if (err < 0) {
+                                       rcu_read_unlock();
                                        goto out;
+                               }
 skip_nh:
                                *idx += 1;
                                continue;
@@ -1403,12 +1406,15 @@ skip_nh:
                                                     cb->nlh->nlmsg_seq,
                                                     RTM_NEWNEIGH,
                                                     NLM_F_MULTI, rd);
-                               if (err < 0)
+                               if (err < 0) {
+                                       rcu_read_unlock();
                                        goto out;
+                               }
 skip:
                                *idx += 1;
                        }
                }
+               rcu_read_unlock();
        }
 out:
        return err;
@@ -3070,8 +3076,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
                        if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
                                continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
-                       if (!is_zero_ether_addr(f->eth_addr))
-                               vxlan_fdb_destroy(vxlan, f, true, true);
+                       if (is_zero_ether_addr(f->eth_addr) &&
+                           f->vni == vxlan->cfg.vni)
+                               continue;
+                       vxlan_fdb_destroy(vxlan, f, true, true);
                }
                spin_unlock_bh(&vxlan->hash_lock[h]);
        }
index add0401..4ee2330 100644 (file)
@@ -1102,6 +1102,9 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
        int pos;
        int len;
 
+       if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST)
+               return 0;
+
        c.identify.opcode = nvme_admin_identify;
        c.identify.nsid = cpu_to_le32(nsid);
        c.identify.cns = NVME_ID_CNS_NS_DESC_LIST;
@@ -1115,18 +1118,6 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
        if (status) {
                dev_warn(ctrl->device,
                        "Identify Descriptors failed (%d)\n", status);
-                /*
-                 * Don't treat non-retryable errors as fatal, as we potentially
-                 * already have a NGUID or EUI-64.  If we failed with DNR set,
-                 * we want to silently ignore the error as we can still
-                 * identify the device, but if the status has DNR set, we want
-                 * to propagate the error back specifically for the disk
-                 * revalidation flow to make sure we don't abandon the
-                 * device just because of a temporal retry-able error (such
-                 * as path of transport errors).
-                 */
-               if (status > 0 && (status & NVME_SC_DNR))
-                       status = 0;
                goto free_data;
        }
 
index 1de3f9b..09ffc32 100644 (file)
@@ -129,6 +129,13 @@ enum nvme_quirks {
         * Don't change the value of the temperature threshold feature
         */
        NVME_QUIRK_NO_TEMP_THRESH_CHANGE        = (1 << 14),
+
+       /*
+        * The controller doesn't handle the Identify Namespace
+        * Identification Descriptor list subcommand despite claiming
+        * NVMe 1.3 compliance.
+        */
+       NVME_QUIRK_NO_NS_DESC_LIST              = (1 << 15),
 };
 
 /*
index b1d18f0..d4b1ff7 100644 (file)
@@ -3099,6 +3099,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS |
                                NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
+               .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
        { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
@@ -3122,6 +3124,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1c5c, 0x1504),   /* SK Hynix PC400 */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
                .driver_data = NVME_QUIRK_SINGLE_VECTOR },
index 79ef2b8..f3a9181 100644 (file)
@@ -1382,6 +1382,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        if (nctrl->opts->tos >= 0)
                ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
 
+       /* Set 10 seconds timeout for icresp recvmsg */
+       queue->sock->sk->sk_rcvtimeo = 10 * HZ;
+
        queue->sock->sk->sk_allocation = GFP_ATOMIC;
        nvme_tcp_set_queue_io_cpu(queue);
        queue->request = NULL;
index 812bfc3..2ea61ab 100644 (file)
@@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
 
+static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
+{
+       pci_info(dev, "Disabling ASPM L0s/L1\n");
+       pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+}
+
+/*
+ * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
+ * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
+ * disable both L0s and L1 for now to be safe.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+
 /*
  * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
  * Link bit cleared after starting the link retrain process to allow this
index ff1ee15..f8ff30c 100644 (file)
@@ -7,6 +7,8 @@ config PINCTRL_MSM
        select PINCONF
        select GENERIC_PINCONF
        select GPIOLIB_IRQCHIP
+       select IRQ_DOMAIN_HIERARCHY
+       select IRQ_FASTEOI_HIERARCHY_HANDLERS
 
 config PINCTRL_APQ8064
        tristate "Qualcomm APQ8064 pin controller driver"
index 83b7d64..c322f30 100644 (file)
@@ -832,6 +832,52 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
        msm_gpio_irq_clear_unmask(d, false);
 }
 
+/**
+ * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
+ * @d: The irq dta.
+ *
+ * This is much like msm_gpio_update_dual_edge_pos() but for IRQs that are
+ * normally handled by the parent irqchip.  The logic here is slightly
+ * different due to what's easy to do with our parent, but in principle it's
+ * the same.
+ */
+static void msm_gpio_update_dual_edge_parent(struct irq_data *d)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+       const struct msm_pingroup *g = &pctrl->soc->groups[d->hwirq];
+       int loop_limit = 100;
+       unsigned int val;
+       unsigned int type;
+
+       /* Read the value and make a guess about what edge we need to catch */
+       val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+       type = val ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+
+       do {
+               /* Set the parent to catch the next edge */
+               irq_chip_set_type_parent(d, type);
+
+               /*
+                * Possibly the line changed between when we last read "val"
+                * (and decided what edge we needed) and when set the edge.
+                * If the value didn't change (or changed and then changed
+                * back) then we're done.
+                */
+               val = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+               if (type == IRQ_TYPE_EDGE_RISING) {
+                       if (!val)
+                               return;
+                       type = IRQ_TYPE_EDGE_FALLING;
+               } else if (type == IRQ_TYPE_EDGE_FALLING) {
+                       if (val)
+                               return;
+                       type = IRQ_TYPE_EDGE_RISING;
+               }
+       } while (loop_limit-- > 0);
+       dev_warn_once(pctrl->dev, "dual-edge irq failed to stabilize\n");
+}
+
 static void msm_gpio_irq_ack(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -840,8 +886,11 @@ static void msm_gpio_irq_ack(struct irq_data *d)
        unsigned long flags;
        u32 val;
 
-       if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+       if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+               if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
+                       msm_gpio_update_dual_edge_parent(d);
                return;
+       }
 
        g = &pctrl->soc->groups[d->hwirq];
 
@@ -860,6 +909,17 @@ static void msm_gpio_irq_ack(struct irq_data *d)
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
+static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
+                                                      unsigned int type)
+{
+       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+       struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+
+       return type == IRQ_TYPE_EDGE_BOTH &&
+              pctrl->soc->wakeirq_dual_edge_errata && d->parent_data &&
+              test_bit(d->hwirq, pctrl->skip_wake_irqs);
+}
+
 static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -868,11 +928,21 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        unsigned long flags;
        u32 val;
 
+       if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
+               set_bit(d->hwirq, pctrl->dual_edge_irqs);
+               irq_set_handler_locked(d, handle_fasteoi_ack_irq);
+               msm_gpio_update_dual_edge_parent(d);
+               return 0;
+       }
+
        if (d->parent_data)
                irq_chip_set_type_parent(d, type);
 
-       if (test_bit(d->hwirq, pctrl->skip_wake_irqs))
+       if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
+               clear_bit(d->hwirq, pctrl->dual_edge_irqs);
+               irq_set_handler_locked(d, handle_fasteoi_irq);
                return 0;
+       }
 
        g = &pctrl->soc->groups[d->hwirq];
 
index 9452da1..7486fe0 100644 (file)
@@ -113,6 +113,9 @@ struct msm_gpio_wakeirq_map {
  * @pull_no_keeper: The SoC does not support keeper bias.
  * @wakeirq_map:    The map of wakeup capable GPIOs and the pin at PDC/MPM
  * @nwakeirq_map:   The number of entries in @wakeirq_map
+ * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
+ *                            to be aware that their parent can't handle dual
+ *                            edge interrupts.
  */
 struct msm_pinctrl_soc_data {
        const struct pinctrl_pin_desc *pins;
@@ -128,6 +131,7 @@ struct msm_pinctrl_soc_data {
        const int *reserved_gpios;
        const struct msm_gpio_wakeirq_map *wakeirq_map;
        unsigned int nwakeirq_map;
+       bool wakeirq_dual_edge_errata;
 };
 
 extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
index 1b6465a..1d9acad 100644 (file)
@@ -1147,6 +1147,7 @@ static const struct msm_pinctrl_soc_data sc7180_pinctrl = {
        .ntiles = ARRAY_SIZE(sc7180_tiles),
        .wakeirq_map = sc7180_pdc_map,
        .nwakeirq_map = ARRAY_SIZE(sc7180_pdc_map),
+       .wakeirq_dual_edge_errata = true,
 };
 
 static int sc7180_pinctrl_probe(struct platform_device *pdev)
index 6fb4d7e..b22adf0 100644 (file)
@@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        continue;
                }
 
-               switch (v_req.type) {
+               switch (vhost32_to_cpu(vq, v_req.type)) {
                case VIRTIO_SCSI_T_TMF:
                        vc.req = &v_req.tmf;
                        vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
index 5809e5f..5c92e4a 100644 (file)
@@ -85,7 +85,7 @@ config VIRTIO_MEM
        depends on VIRTIO
        depends on MEMORY_HOTPLUG_SPARSE
        depends on MEMORY_HOTREMOVE
-       select CONTIG_ALLOC
+       depends on CONTIG_ALLOC
        help
         This driver provides access to virtio-mem paravirtualized memory
         devices, allowing to hotplug and hotunplug memory.
index 1f157d2..8be02f3 100644 (file)
@@ -578,10 +578,14 @@ static int init_vqs(struct virtio_balloon *vb)
 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
 {
        if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
-                              &vb->config_read_bitmap))
+                              &vb->config_read_bitmap)) {
                virtio_cread(vb->vdev, struct virtio_balloon_config,
                             free_page_hint_cmd_id,
                             &vb->cmd_id_received_cache);
+               /* Legacy balloon config space is LE, unlike all other devices. */
+               if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+                       vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
+       }
 
        return vb->cmd_id_received_cache;
 }
@@ -974,6 +978,11 @@ static int virtballoon_probe(struct virtio_device *vdev)
                /*
                 * Let the hypervisor know that we are expecting a
                 * specific value to be written back in balloon pages.
+                *
+                * If the PAGE_POISON value was larger than a byte we would
+                * need to byte swap poison_val here to guarantee it is
+                * little-endian. However for now it is a single byte so we
+                * can pass it as-is.
                 */
                if (!want_init_on_free())
                        memset(&poison_val, PAGE_POISON, sizeof(poison_val));
index 32b0064..493e504 100644 (file)
@@ -4199,10 +4199,9 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
 
        hash_del(&req->hash_node);
        io_poll_complete(req, req->result, 0);
-       req->flags |= REQ_F_COMP_LOCKED;
-       io_put_req_find_next(req, nxt);
        spin_unlock_irq(&ctx->completion_lock);
 
+       io_put_req_find_next(req, nxt);
        io_cqring_ev_posted(ctx);
 }
 
@@ -4658,6 +4657,10 @@ static int io_poll_add(struct io_kiocb *req)
        struct io_poll_table ipt;
        __poll_t mask;
 
+       /* ->work is in union with hash_node and others */
+       io_req_work_drop_env(req);
+       req->flags &= ~REQ_F_WORK_INITIALIZED;
+
        INIT_HLIST_NODE(&req->hash_node);
        INIT_LIST_HEAD(&req->list);
        ipt.pt._qproc = io_poll_queue_proc;
index b0b163b..bdcac69 100644 (file)
@@ -415,6 +415,13 @@ struct acpi_table_tpm2 {
        /* Platform-specific data follows */
 };
 
+/* Optional trailer for revision 4 holding platform-specific data */
+struct acpi_tpm2_phy {
+       u8  start_method_specific[12];
+       u32 log_area_minimum_length;
+       u64 log_area_start_address;
+};
+
 /* Values for start_method above */
 
 #define ACPI_TPM2_NOT_ALLOWED                       0
index 8b1e020..30a3aab 100644 (file)
@@ -456,7 +456,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
 
 #if !defined(inb) && !defined(_inb)
 #define _inb _inb
-static inline u16 _inb(unsigned long addr)
+static inline u8 _inb(unsigned long addr)
 {
        u8 val;
 
@@ -482,7 +482,7 @@ static inline u16 _inw(unsigned long addr)
 
 #if !defined(inl) && !defined(_inl)
 #define _inl _inl
-static inline u16 _inl(unsigned long addr)
+static inline u32 _inl(unsigned long addr)
 {
        u32 val;
 
index 6c3ef49..e73dea5 100644 (file)
@@ -865,6 +865,18 @@ struct drm_mode_config {
         */
        bool prefer_shadow_fbdev;
 
+       /**
+        * @fbdev_use_iomem:
+        *
+        * Set to true if framebuffer reside in iomem.
+        * When set to true memcpy_toio() is used when copying the framebuffer in
+        * drm_fb_helper.drm_fb_helper_dirty_blit_real().
+        *
+        * FIXME: This should be replaced with a per-mapping is_iomem
+        * flag (like ttm does), and then used everywhere in fbdev code.
+        */
+       bool fbdev_use_iomem;
+
        /**
         * @quirk_addfb_prefer_xbgr_30bpp:
         *
index ee328cf..4e7714c 100644 (file)
@@ -1001,7 +1001,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
 static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
                                        int index, struct i2c_board_info *info)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
 {
index aff44d3..0d0d17a 100644 (file)
@@ -282,6 +282,24 @@ static inline int list_empty(const struct list_head *head)
        return READ_ONCE(head->next) == head;
 }
 
+/**
+ * list_del_init_careful - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ *
+ * This is the same as list_del_init(), except designed to be used
+ * together with list_empty_careful() in a way to guarantee ordering
+ * of other memory operations.
+ *
+ * Any memory operations done before a list_del_init_careful() are
+ * guaranteed to be visible after a list_empty_careful() test.
+ */
+static inline void list_del_init_careful(struct list_head *entry)
+{
+       __list_del_entry(entry);
+       entry->prev = entry;
+       smp_store_release(&entry->next, entry);
+}
+
 /**
  * list_empty_careful - tests whether a list is empty and not being modified
  * @head: the list to test
@@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head)
  */
 static inline int list_empty_careful(const struct list_head *head)
 {
-       struct list_head *next = head->next;
+       struct list_head *next = smp_load_acquire(&head->next);
        return (next == head) && (next == head->prev);
 }
 
index 073b79e..1340e02 100644 (file)
@@ -4381,6 +4381,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
 enum {
        MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT  = 0x0,
        MLX5_VPORT_STATE_OP_MOD_ESW_VPORT   = 0x1,
+       MLX5_VPORT_STATE_OP_MOD_UPLINK      = 0x2,
 };
 
 struct mlx5_ifc_arm_monitor_counter_in_bits {
index 45e1f8f..9ab7443 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/once.h>
+#include <asm/percpu.h>
 
 #include <uapi/linux/random.h>
 
@@ -119,6 +120,8 @@ struct rnd_state {
        __u32 s1, s2, s3, s4;
 };
 
+DECLARE_PER_CPU(struct rnd_state, net_rand_state);
+
 u32 prandom_u32_state(struct rnd_state *state);
 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
index d3432ee..68dab3e 100644 (file)
@@ -84,7 +84,7 @@ struct bucket_table {
 
        struct lockdep_map      dep_map;
 
-       struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
+       struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
 /*
@@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void *arg);
 void rhashtable_destroy(struct rhashtable *ht);
 
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
-                                          unsigned int hash);
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
-                                            unsigned int hash);
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
-                                                 struct bucket_table *tbl,
-                                                 unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested(
+       const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+       const struct bucket_table *tbl, unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+       struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash);
 
 #define rht_dereference(p, ht) \
        rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
 #define rht_entry(tpos, pos, member) \
        ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
 
-static inline struct rhash_lock_head *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
        const struct bucket_table *tbl, unsigned int hash)
 {
        return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
-static inline struct rhash_lock_head **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
        struct bucket_table *tbl, unsigned int hash)
 {
        return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
-static inline struct rhash_lock_head **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
        struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
 {
        return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert(
  */
 
 static inline void rht_lock(struct bucket_table *tbl,
-                           struct rhash_lock_head **bkt)
+                           struct rhash_lock_head __rcu **bkt)
 {
        local_bh_disable();
        bit_spin_lock(0, (unsigned long *)bkt);
@@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl,
 }
 
 static inline void rht_lock_nested(struct bucket_table *tbl,
-                                  struct rhash_lock_head **bucket,
+                                  struct rhash_lock_head __rcu **bucket,
                                   unsigned int subclass)
 {
        local_bh_disable();
@@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl,
 }
 
 static inline void rht_unlock(struct bucket_table *tbl,
-                             struct rhash_lock_head **bkt)
+                             struct rhash_lock_head __rcu **bkt)
 {
        lock_map_release(&tbl->dep_map);
        bit_spin_unlock(0, (unsigned long *)bkt);
        local_bh_enable();
 }
 
-static inline struct rhash_head __rcu *__rht_ptr(
-       struct rhash_lock_head *const *bkt)
+static inline struct rhash_head *__rht_ptr(
+       struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
 {
-       return (struct rhash_head __rcu *)
-               ((unsigned long)*bkt & ~BIT(0) ?:
+       return (struct rhash_head *)
+               ((unsigned long)p & ~BIT(0) ?:
                 (unsigned long)RHT_NULLS_MARKER(bkt));
 }
 
@@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr(
  *            access is guaranteed, such as when destroying the table.
  */
 static inline struct rhash_head *rht_ptr_rcu(
-       struct rhash_lock_head *const *bkt)
+       struct rhash_lock_head __rcu *const *bkt)
 {
-       struct rhash_head __rcu *p = __rht_ptr(bkt);
-
-       return rcu_dereference(p);
+       return __rht_ptr(rcu_dereference(*bkt), bkt);
 }
 
 static inline struct rhash_head *rht_ptr(
-       struct rhash_lock_head *const *bkt,
+       struct rhash_lock_head __rcu *const *bkt,
        struct bucket_table *tbl,
        unsigned int hash)
 {
-       return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
+       return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
 }
 
 static inline struct rhash_head *rht_ptr_exclusive(
-       struct rhash_lock_head *const *bkt)
+       struct rhash_lock_head __rcu *const *bkt)
 {
-       return rcu_dereference_protected(__rht_ptr(bkt), 1);
+       return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
 }
 
-static inline void rht_assign_locked(struct rhash_lock_head **bkt,
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
                                     struct rhash_head *obj)
 {
-       struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
        if (rht_is_a_nulls(obj))
                obj = NULL;
-       rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
+       rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0)));
 }
 
 static inline void rht_assign_unlock(struct bucket_table *tbl,
-                                    struct rhash_lock_head **bkt,
+                                    struct rhash_lock_head __rcu **bkt,
                                     struct rhash_head *obj)
 {
-       struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
-
        if (rht_is_a_nulls(obj))
                obj = NULL;
        lock_map_release(&tbl->dep_map);
-       rcu_assign_pointer(*p, obj);
+       rcu_assign_pointer(*bkt, (void *)obj);
        preempt_enable();
        __release(bitlock);
        local_bh_enable();
@@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup(
                .ht = ht,
                .key = key,
        };
-       struct rhash_lock_head *const *bkt;
+       struct rhash_lock_head __rcu *const *bkt;
        struct bucket_table *tbl;
        struct rhash_head *he;
        unsigned int hash;
@@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast(
                .ht = ht,
                .key = key,
        };
-       struct rhash_lock_head **bkt;
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct bucket_table *tbl;
        struct rhash_head *head;
@@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one(
        struct rhash_head *obj, const struct rhashtable_params params,
        bool rhlist)
 {
-       struct rhash_lock_head **bkt;
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
        unsigned int hash;
@@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast(
        struct rhash_head *obj_old, struct rhash_head *obj_new,
        const struct rhashtable_params params)
 {
-       struct rhash_lock_head **bkt;
+       struct rhash_lock_head __rcu **bkt;
        struct rhash_head __rcu **pprev;
        struct rhash_head *he;
        unsigned int hash;
index 03e9b18..8f4ff39 100644 (file)
@@ -96,6 +96,7 @@ struct tpm_space {
        u8 *context_buf;
        u32 session_tbl[3];
        u8 *session_buf;
+       u32 buf_size;
 };
 
 struct tpm_bios_log {
index 64356b1..739ba9a 100644 (file)
@@ -211,9 +211,16 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
 
        efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
 
-       /* Check if event is malformed. */
+       /*
+        * Perform validation of the event in order to identify malformed
+        * events. This function may be asked to parse arbitrary byte sequences
+        * immediately following a valid event log. The caller expects this
+        * function to recognize that the byte sequence is not a valid event
+        * and to return an event size of 0.
+        */
        if (memcmp(efispecid->signature, TCG_SPECID_SIG,
-                  sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
+                  sizeof(TCG_SPECID_SIG)) ||
+           !efispecid->num_algs || count != efispecid->num_algs) {
                size = 0;
                goto out;
        }
index fdb0710..8418b7d 100644 (file)
@@ -274,6 +274,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
 int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
+void __ipv6_sock_ac_close(struct sock *sk);
 void ipv6_sock_ac_close(struct sock *sk);
 
 int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr);
index 1df6dfe..95b0322 100644 (file)
@@ -718,6 +718,7 @@ enum devlink_trap_group_generic_id {
        DEVLINK_TRAP_GROUP_GENERIC_ID_PIM,
        DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB,
        DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY,
+       DEVLINK_TRAP_GROUP_GENERIC_ID_EXTERNAL_DELIVERY,
        DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6,
        DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT,
        DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
@@ -915,6 +916,8 @@ enum devlink_trap_group_generic_id {
        "uc_loopback"
 #define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \
        "local_delivery"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_EXTERNAL_DELIVERY \
+       "external_delivery"
 #define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \
        "ipv6"
 #define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \
index c7d213c..51f65d2 100644 (file)
@@ -941,7 +941,7 @@ struct xfrm_dst {
 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
 {
 #ifdef CONFIG_XFRM
-       if (dst->xfrm) {
+       if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
                const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
 
                return xdst->path;
@@ -953,7 +953,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
 {
 #ifdef CONFIG_XFRM
-       if (dst->xfrm) {
+       if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
                struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
                return xdst->child;
        }
@@ -1630,13 +1630,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
                     void *);
 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
-                                         u8 type, int dir,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
+                                         const struct xfrm_mark *mark,
+                                         u32 if_id, u8 type, int dir,
                                          struct xfrm_selector *sel,
                                          struct xfrm_sec_ctx *ctx, int delete,
                                          int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
-                                    int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net,
+                                    const struct xfrm_mark *mark, u32 if_id,
+                                    u8 type, int dir, u32 id, int delete,
+                                    int *err);
 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
 void xfrm_policy_hash_rebuild(struct net *net);
 u32 xfrm_get_acqseq(void);
index c4369a6..2f1fc23 100644 (file)
@@ -305,6 +305,25 @@ struct rvt_rq {
        spinlock_t lock ____cacheline_aligned_in_smp;
 };
 
+/**
+ * rvt_get_rq_count - count numbers of request work queue entries
+ * in circular buffer
+ * @rq: data structure for request queue entry
+ * @head: head indices of the circular buffer
+ * @tail: tail indices of the circular buffer
+ *
+ * Return - total number of entries in the Receive Queue
+ */
+
+static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
+{
+       u32 count = head - tail;
+
+       if ((s32)count < 0)
+               count += rq->size;
+       return count;
+}
+
 /*
  * This structure holds the information that the send tasklet needs
  * to send a RDMA read response or atomic operation.
index 8c201f4..b2301bd 100644 (file)
@@ -1851,7 +1851,6 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
        }
 
        audit_get_stamp(ab->ctx, &t, &serial);
-       audit_clear_dummy(ab->ctx);
        audit_log_format(ab, "audit(%llu.%03lu:%u): ",
                         (unsigned long long)t.tv_sec, t.tv_nsec/1000000, serial);
 
index f0233dc..ddc2287 100644 (file)
@@ -290,13 +290,6 @@ extern int audit_signal_info_syscall(struct task_struct *t);
 extern void audit_filter_inodes(struct task_struct *tsk,
                                struct audit_context *ctx);
 extern struct list_head *audit_killed_trees(void);
-
-static inline void audit_clear_dummy(struct audit_context *ctx)
-{
-       if (ctx)
-               ctx->dummy = 0;
-}
-
 #else /* CONFIG_AUDITSYSCALL */
 #define auditsc_get_stamp(c, t, s) 0
 #define audit_put_watch(w) {}
@@ -330,7 +323,6 @@ static inline int audit_signal_info_syscall(struct task_struct *t)
 }
 
 #define audit_filter_inodes(t, c) AUDIT_DISABLED
-#define audit_clear_dummy(c) {}
 #endif /* CONFIG_AUDITSYSCALL */
 
 extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
index 468a233..fd840c4 100644 (file)
@@ -1417,6 +1417,9 @@ static void audit_log_proctitle(void)
        struct audit_context *context = audit_context();
        struct audit_buffer *ab;
 
+       if (!context || context->dummy)
+               return;
+
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
        if (!ab)
                return; /* audit_panic or being filtered */
index 9a1a98d..0443600 100644 (file)
@@ -4058,6 +4058,11 @@ static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
        const char *tname, *sym;
        u32 btf_id, i;
 
+       if (!btf_vmlinux) {
+               bpf_log(log, "btf_vmlinux doesn't exist\n");
+               return -EINVAL;
+       }
+
        if (IS_ERR(btf_vmlinux)) {
                bpf_log(log, "btf_vmlinux is malformed\n");
                return -EINVAL;
index b4b288a..b32cc8c 100644 (file)
@@ -779,15 +779,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
        htab_elem_free(htab, l);
 }
 
-static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
 {
        struct bpf_map *map = &htab->map;
+       void *ptr;
 
        if (map->ops->map_fd_put_ptr) {
-               void *ptr = fd_htab_map_get_ptr(map, l);
-
+               ptr = fd_htab_map_get_ptr(map, l);
                map->ops->map_fd_put_ptr(ptr);
        }
+}
+
+static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
+{
+       htab_put_fd_value(htab, l);
 
        if (htab_is_prealloc(htab)) {
                __pcpu_freelist_push(&htab->freelist, &l->fnode);
@@ -839,6 +844,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                         */
                        pl_new = this_cpu_ptr(htab->extra_elems);
                        l_new = *pl_new;
+                       htab_put_fd_value(htab, old_elem);
                        *pl_new = old_elem;
                } else {
                        struct pcpu_freelist_node *l;
index ba059fb..01f5d30 100644 (file)
@@ -389,7 +389,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
        int ret = default_wake_function(wq_entry, mode, sync, key);
 
        if (ret)
-               list_del_init(&wq_entry->entry);
+               list_del_init_careful(&wq_entry->entry);
 
        return ret;
 }
index ee22ec7..6f16f7c 100644 (file)
@@ -719,7 +719,7 @@ static int dequeue_synchronous_signal(kernel_siginfo_t *info)
         * Return the first synchronous signal in the queue.
         */
        list_for_each_entry(q, &pending->list, list) {
-               /* Synchronous signals have a postive si_code */
+               /* Synchronous signals have a positive si_code */
                if ((q->info.si_code > SI_USER) &&
                    (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
                        sync = q;
index df1ff80..026ac01 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/sched/debug.h>
 #include <linux/slab.h>
 #include <linux/compat.h>
+#include <linux/random.h>
 
 #include <linux/uaccess.h>
 #include <asm/unistd.h>
@@ -1742,6 +1743,13 @@ void update_process_times(int user_tick)
        scheduler_tick();
        if (IS_ENABLED(CONFIG_POSIX_TIMERS))
                run_posix_cpu_timers();
+
+       /* The current CPU might make use of net randoms without receiving IRQs
+        * to renew them often enough. Let's update the net_rand_state from a
+        * non-constant value that's not affine to the number of calls to make
+        * sure it's updated when there's some activity (we don't care in idle).
+        */
+       this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
 }
 
 /**
index 763b920..3d749ab 100644 (file)
@@ -48,7 +48,7 @@ static inline void prandom_state_selftest(void)
 }
 #endif
 
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
+DEFINE_PER_CPU(struct rnd_state, net_rand_state);
 
 /**
  *     prandom_u32_state - seeded pseudo-random number generator.
index 9f6890a..c949c1e 100644 (file)
@@ -31,7 +31,7 @@
 
 union nested_table {
        union nested_table __rcu *table;
-       struct rhash_lock_head *bucket;
+       struct rhash_lock_head __rcu *bucket;
 };
 
 static u32 head_hashfn(struct rhashtable *ht,
@@ -222,7 +222,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
 }
 
 static int rhashtable_rehash_one(struct rhashtable *ht,
-                                struct rhash_lock_head **bkt,
+                                struct rhash_lock_head __rcu **bkt,
                                 unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -275,7 +275,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
                                    unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-       struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
+       struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
        int err;
 
        if (!bkt)
@@ -485,7 +485,7 @@ fail:
 }
 
 static void *rhashtable_lookup_one(struct rhashtable *ht,
-                                  struct rhash_lock_head **bkt,
+                                  struct rhash_lock_head __rcu **bkt,
                                   struct bucket_table *tbl, unsigned int hash,
                                   const void *key, struct rhash_head *obj)
 {
@@ -535,12 +535,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
        return ERR_PTR(-ENOENT);
 }
 
-static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
-                                                 struct rhash_lock_head **bkt,
-                                                 struct bucket_table *tbl,
-                                                 unsigned int hash,
-                                                 struct rhash_head *obj,
-                                                 void *data)
+static struct bucket_table *rhashtable_insert_one(
+       struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
+       struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
+       void *data)
 {
        struct bucket_table *new_tbl;
        struct rhash_head *head;
@@ -591,7 +589,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
 {
        struct bucket_table *new_tbl;
        struct bucket_table *tbl;
-       struct rhash_lock_head **bkt;
+       struct rhash_lock_head __rcu **bkt;
        unsigned int hash;
        void *data;
 
@@ -1173,8 +1171,8 @@ void rhashtable_destroy(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
-struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
-                                            unsigned int hash)
+struct rhash_lock_head __rcu **__rht_bucket_nested(
+       const struct bucket_table *tbl, unsigned int hash)
 {
        const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
        unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1202,10 +1200,10 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
 }
 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
 
-struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
-                                          unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested(
+       const struct bucket_table *tbl, unsigned int hash)
 {
-       static struct rhash_lock_head *rhnull;
+       static struct rhash_lock_head __rcu *rhnull;
 
        if (!rhnull)
                INIT_RHT_NULLS_HEAD(rhnull);
@@ -1213,9 +1211,8 @@ struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
 }
 EXPORT_SYMBOL_GPL(rht_bucket_nested);
 
-struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
-                                                 struct bucket_table *tbl,
-                                                 unsigned int hash)
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(
+       struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
 {
        const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
        unsigned int index = hash & ((1 << tbl->nest) - 1);
index 385759c..991503b 100644 (file)
@@ -1002,6 +1002,7 @@ struct wait_page_queue {
 
 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
 {
+       int ret;
        struct wait_page_key *key = arg;
        struct wait_page_queue *wait_page
                = container_of(wait, struct wait_page_queue, wait);
@@ -1014,17 +1015,35 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
                return 0;
 
        /*
-        * Stop walking if it's locked.
-        * Is this safe if put_and_wait_on_page_locked() is in use?
-        * Yes: the waker must hold a reference to this page, and if PG_locked
-        * has now already been set by another task, that task must also hold
-        * a reference to the *same usage* of this page; so there is no need
-        * to walk on to wake even the put_and_wait_on_page_locked() callers.
+        * If it's an exclusive wait, we get the bit for it, and
+        * stop walking if we can't.
+        *
+        * If it's a non-exclusive wait, then the fact that this
+        * wake function was called means that the bit already
+        * was cleared, and we don't care if somebody then
+        * re-took it.
         */
-       if (test_bit(key->bit_nr, &key->page->flags))
-               return -1;
+       ret = 0;
+       if (wait->flags & WQ_FLAG_EXCLUSIVE) {
+               if (test_and_set_bit(key->bit_nr, &key->page->flags))
+                       return -1;
+               ret = 1;
+       }
+       wait->flags |= WQ_FLAG_WOKEN;
 
-       return autoremove_wake_function(wait, mode, sync, key);
+       wake_up_state(wait->private, mode);
+
+       /*
+        * Ok, we have successfully done what we're waiting for,
+        * and we can unconditionally remove the wait entry.
+        *
+        * Note that this has to be the absolute last thing we do,
+        * since after list_del_init(&wait->entry) the wait entry
+        * might be de-allocated and the process might even have
+        * exited.
+        */
+       list_del_init_careful(&wait->entry);
+       return ret;
 }
 
 static void wake_up_page_bit(struct page *page, int bit_nr)
@@ -1103,16 +1122,31 @@ enum behavior {
                         */
 };
 
+/*
+ * Attempt to check (or get) the page bit, and mark the
+ * waiter woken if successful.
+ */
+static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
+                                       struct wait_queue_entry *wait)
+{
+       if (wait->flags & WQ_FLAG_EXCLUSIVE) {
+               if (test_and_set_bit(bit_nr, &page->flags))
+                       return false;
+       } else if (test_bit(bit_nr, &page->flags))
+               return false;
+
+       wait->flags |= WQ_FLAG_WOKEN;
+       return true;
+}
+
 static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        struct page *page, int bit_nr, int state, enum behavior behavior)
 {
        struct wait_page_queue wait_page;
        wait_queue_entry_t *wait = &wait_page.wait;
-       bool bit_is_set;
        bool thrashing = false;
        bool delayacct = false;
        unsigned long pflags;
-       int ret = 0;
 
        if (bit_nr == PG_locked &&
            !PageUptodate(page) && PageWorkingset(page)) {
@@ -1130,48 +1164,47 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
        wait_page.page = page;
        wait_page.bit_nr = bit_nr;
 
-       for (;;) {
-               spin_lock_irq(&q->lock);
+       /*
+        * Do one last check whether we can get the
+        * page bit synchronously.
+        *
+        * Do the SetPageWaiters() marking before that
+        * to let any waker we _just_ missed know they
+        * need to wake us up (otherwise they'll never
+        * even go to the slow case that looks at the
+        * page queue), and add ourselves to the wait
+        * queue if we need to sleep.
+        *
+        * This part needs to be done under the queue
+        * lock to avoid races.
+        */
+       spin_lock_irq(&q->lock);
+       SetPageWaiters(page);
+       if (!trylock_page_bit_common(page, bit_nr, wait))
+               __add_wait_queue_entry_tail(q, wait);
+       spin_unlock_irq(&q->lock);
 
-               if (likely(list_empty(&wait->entry))) {
-                       __add_wait_queue_entry_tail(q, wait);
-                       SetPageWaiters(page);
-               }
+       /*
+        * From now on, all the logic will be based on
+        * the WQ_FLAG_WOKEN flag, and the and the page
+        * bit testing (and setting) will be - or has
+        * already been - done by the wake function.
+        *
+        * We can drop our reference to the page.
+        */
+       if (behavior == DROP)
+               put_page(page);
 
+       for (;;) {
                set_current_state(state);
 
-               spin_unlock_irq(&q->lock);
-
-               bit_is_set = test_bit(bit_nr, &page->flags);
-               if (behavior == DROP)
-                       put_page(page);
-
-               if (likely(bit_is_set))
-                       io_schedule();
-
-               if (behavior == EXCLUSIVE) {
-                       if (!test_and_set_bit_lock(bit_nr, &page->flags))
-                               break;
-               } else if (behavior == SHARED) {
-                       if (!test_bit(bit_nr, &page->flags))
-                               break;
-               }
-
-               if (signal_pending_state(state, current)) {
-                       ret = -EINTR;
+               if (signal_pending_state(state, current))
                        break;
-               }
 
-               if (behavior == DROP) {
-                       /*
-                        * We can no longer safely access page->flags:
-                        * even if CONFIG_MEMORY_HOTREMOVE is not enabled,
-                        * there is a risk of waiting forever on a page reused
-                        * for something that keeps it locked indefinitely.
-                        * But best check for -EINTR above before breaking.
-                        */
+               if (wait->flags & WQ_FLAG_WOKEN)
                        break;
-               }
+
+               io_schedule();
        }
 
        finish_wait(q, wait);
@@ -1190,7 +1223,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
         * bother with signals either.
         */
 
-       return ret;
+       return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
 }
 
 void wait_on_page_bit(struct page *page, int bit_nr)
index 13cd683..12ecacf 100644 (file)
@@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work)
                if (m->rreq->status == REQ_STATUS_SENT) {
                        list_del(&m->rreq->req_list);
                        p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
+               } else if (m->rreq->status == REQ_STATUS_FLSHD) {
+                       /* Ignore replies associated with a cancelled request. */
+                       p9_debug(P9_DEBUG_TRANS,
+                                "Ignore replies associated with a cancelled request\n");
                } else {
                        spin_unlock(&m->client->lock);
                        p9_debug(P9_DEBUG_ERROR,
@@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
 {
        p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
 
+       spin_lock(&client->lock);
+       /* Ignore cancelled request if message has been received
+        * before lock.
+        */
+       if (req->status == REQ_STATUS_RCVD) {
+               spin_unlock(&client->lock);
+               return 0;
+       }
+
        /* we haven't received a response for oldreq,
         * remove it from the list.
         */
-       spin_lock(&client->lock);
        list_del(&req->req_list);
+       req->status = REQ_STATUS_FLSHD;
        spin_unlock(&client->lock);
        p9_req_put(req);
 
@@ -803,20 +816,28 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
                return -ENOMEM;
 
        ts->rd = fget(rfd);
+       if (!ts->rd)
+               goto out_free_ts;
+       if (!(ts->rd->f_mode & FMODE_READ))
+               goto out_put_rd;
        ts->wr = fget(wfd);
-       if (!ts->rd || !ts->wr) {
-               if (ts->rd)
-                       fput(ts->rd);
-               if (ts->wr)
-                       fput(ts->wr);
-               kfree(ts);
-               return -EIO;
-       }
+       if (!ts->wr)
+               goto out_put_rd;
+       if (!(ts->wr->f_mode & FMODE_WRITE))
+               goto out_put_wr;
 
        client->trans = ts;
        client->status = Connected;
 
        return 0;
+
+out_put_wr:
+       fput(ts->wr);
+out_put_rd:
+       fput(ts->rd);
+out_free_ts:
+       kfree(ts);
+       return -EIO;
 }
 
 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
index cfeaee3..af9d7f2 100644 (file)
@@ -1338,6 +1338,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
 {
        struct discovery_state *d = &hdev->discovery;
 
+       if (len > HCI_MAX_AD_LENGTH)
+               return;
+
        bacpy(&d->last_adv_addr, bdaddr);
        d->last_adv_addr_type = bdaddr_type;
        d->last_adv_rssi = rssi;
@@ -5355,7 +5358,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
 
 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                               u8 bdaddr_type, bdaddr_t *direct_addr,
-                              u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
+                              u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
+                              bool ext_adv)
 {
        struct discovery_state *d = &hdev->discovery;
        struct smp_irk *irk;
@@ -5377,6 +5381,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                return;
        }
 
+       if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
+               bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
+               return;
+       }
+
        /* Find the end of the data in case the report contains padded zero
         * bytes at the end causing an invalid length value.
         *
@@ -5437,7 +5446,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
         */
        conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
                                                                direct_addr);
-       if (conn && type == LE_ADV_IND) {
+       if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
                /* Store report for later inclusion by
                 * mgmt_device_connected
                 */
@@ -5491,7 +5500,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
         * event or send an immediate device found event if the data
         * should not be stored for later.
         */
-       if (!has_pending_adv_report(hdev)) {
+       if (!ext_adv && !has_pending_adv_report(hdev)) {
                /* If the report will trigger a SCAN_REQ store it for
                 * later merging.
                 */
@@ -5526,7 +5535,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                /* If the new report will trigger a SCAN_REQ store it for
                 * later merging.
                 */
-               if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
+               if (!ext_adv && (type == LE_ADV_IND ||
+                                type == LE_ADV_SCAN_IND)) {
                        store_pending_adv_report(hdev, bdaddr, bdaddr_type,
                                                 rssi, flags, data, len);
                        return;
@@ -5566,7 +5576,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        rssi = ev->data[ev->length];
                        process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
                                           ev->bdaddr_type, NULL, 0, rssi,
-                                          ev->data, ev->length);
+                                          ev->data, ev->length, false);
                } else {
                        bt_dev_err(hdev, "Dropping invalid advertising data");
                }
@@ -5638,7 +5648,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
                if (legacy_evt_type != LE_ADV_INVALID) {
                        process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
                                           ev->bdaddr_type, NULL, 0, ev->rssi,
-                                          ev->data, ev->length);
+                                          ev->data, ev->length,
+                                          !(evt_type & LE_EXT_ADV_LEGACY_PDU));
                }
 
                ptr += sizeof(*ev) + ev->length;
@@ -5836,7 +5847,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
 
                process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
                                   ev->bdaddr_type, &ev->direct_addr,
-                                  ev->direct_addr_type, ev->rssi, NULL, 0);
+                                  ev->direct_addr_type, ev->rssi, NULL, 0,
+                                  false);
 
                ptr += sizeof(*ev);
        }
index 1905e01..4494ea6 100644 (file)
@@ -39,7 +39,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
 {
        struct mbox_request req;
        struct mbox_reply reply;
-       loff_t pos;
+       loff_t pos = 0;
        ssize_t n;
        int ret = -EFAULT;
 
index 5e3041a..434838b 100644 (file)
@@ -202,7 +202,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
 
                /* Advance. */
                kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp);
-               ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
+               ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len);
        }
 
        /*
index 2cafbc8..47f14a2 100644 (file)
@@ -1065,7 +1065,9 @@ static int devlink_nl_cmd_sb_pool_get_dumpit(struct sk_buff *msg,
                                                   devlink_sb,
                                                   NETLINK_CB(cb->skb).portid,
                                                   cb->nlh->nlmsg_seq);
-                       if (err && err != -EOPNOTSUPP) {
+                       if (err == -EOPNOTSUPP) {
+                               err = 0;
+                       } else if (err) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -1266,7 +1268,9 @@ static int devlink_nl_cmd_sb_port_pool_get_dumpit(struct sk_buff *msg,
                                                        devlink, devlink_sb,
                                                        NETLINK_CB(cb->skb).portid,
                                                        cb->nlh->nlmsg_seq);
-                       if (err && err != -EOPNOTSUPP) {
+                       if (err == -EOPNOTSUPP) {
+                               err = 0;
+                       } else if (err) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -1498,7 +1502,9 @@ devlink_nl_cmd_sb_tc_pool_bind_get_dumpit(struct sk_buff *msg,
                                                           devlink_sb,
                                                           NETLINK_CB(cb->skb).portid,
                                                           cb->nlh->nlmsg_seq);
-                       if (err && err != -EOPNOTSUPP) {
+                       if (err == -EOPNOTSUPP) {
+                               err = 0;
+                       } else if (err) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -3299,7 +3305,9 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI);
-                       if (err && err != -EOPNOTSUPP) {
+                       if (err == -EOPNOTSUPP) {
+                               err = 0;
+                       } else if (err) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -3569,7 +3577,9 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
                                                NETLINK_CB(cb->skb).portid,
                                                cb->nlh->nlmsg_seq,
                                                NLM_F_MULTI);
-                               if (err && err != -EOPNOTSUPP) {
+                               if (err == -EOPNOTSUPP) {
+                                       err = 0;
+                               } else if (err) {
                                        mutex_unlock(&devlink->lock);
                                        goto out;
                                }
@@ -4518,7 +4528,9 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           cb->extack);
                mutex_unlock(&devlink->lock);
-               if (err && err != -EOPNOTSUPP)
+               if (err == -EOPNOTSUPP)
+                       err = 0;
+               else if (err)
                        break;
                idx++;
        }
@@ -8567,6 +8579,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = {
        DEVLINK_TRAP_GROUP(PIM),
        DEVLINK_TRAP_GROUP(UC_LB),
        DEVLINK_TRAP_GROUP(LOCAL_DELIVERY),
+       DEVLINK_TRAP_GROUP(EXTERNAL_DELIVERY),
        DEVLINK_TRAP_GROUP(IPV6),
        DEVLINK_TRAP_GROUP(PTP_EVENT),
        DEVLINK_TRAP_GROUP(PTP_GENERAL),
index 248f1c1..3c65f71 100644 (file)
@@ -1864,7 +1864,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
        while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
                struct key_vector *local_l = NULL, *local_tp;
 
-               hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+               hlist_for_each_entry(fa, &l->leaf, fa_list) {
                        struct fib_alias *new_fa;
 
                        if (local_tb->tb_id != fa->tb_id)
index 8932612..dacdea7 100644 (file)
@@ -183,7 +183,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
        return 0;
 }
 
-void ipv6_sock_ac_close(struct sock *sk)
+void __ipv6_sock_ac_close(struct sock *sk)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct net_device *dev = NULL;
@@ -191,10 +191,7 @@ void ipv6_sock_ac_close(struct sock *sk)
        struct net *net = sock_net(sk);
        int     prev_index;
 
-       if (!np->ipv6_ac_list)
-               return;
-
-       rtnl_lock();
+       ASSERT_RTNL();
        pac = np->ipv6_ac_list;
        np->ipv6_ac_list = NULL;
 
@@ -211,6 +208,16 @@ void ipv6_sock_ac_close(struct sock *sk)
                sock_kfree_s(sk, pac, sizeof(*pac));
                pac = next;
        }
+}
+
+void ipv6_sock_ac_close(struct sock *sk)
+{
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       if (!np->ipv6_ac_list)
+               return;
+       rtnl_lock();
+       __ipv6_sock_ac_close(sk);
        rtnl_unlock();
 }
 
index c435927..52c2f06 100644 (file)
@@ -805,10 +805,17 @@ int esp6_input_done2(struct sk_buff *skb, int err)
 
        if (x->encap) {
                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+               int offset = skb_network_offset(skb) + sizeof(*ip6h);
                struct xfrm_encap_tmpl *encap = x->encap;
-               struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len);
-               struct tcphdr *th = (void *)(skb_network_header(skb) + hdr_len);
-               __be16 source;
+               u8 nexthdr = ip6h->nexthdr;
+               __be16 frag_off, source;
+               struct udphdr *uh;
+               struct tcphdr *th;
+
+               offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+               uh = (void *)(skb->data + offset);
+               th = (void *)(skb->data + offset);
+               hdr_len += offset;
 
                switch (x->encap->encap_type) {
                case TCP_ENCAP_ESPINTCP:
index 20576e8..76f9e41 100644 (file)
@@ -240,6 +240,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
 
                        fl6_free_socklist(sk);
                        __ipv6_sock_mc_close(sk);
+                       __ipv6_sock_ac_close(sk);
 
                        /*
                         * Sock is moving from IPv6 to IPv4 (sk_prot), so
index f327981..4c36bd0 100644 (file)
@@ -3685,14 +3685,14 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        rt->fib6_src.plen = cfg->fc_src_len;
 #endif
        if (nh) {
-               if (!nexthop_get(nh)) {
-                       NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
-                       goto out;
-               }
                if (rt->fib6_src.plen) {
                        NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
                        goto out;
                }
+               if (!nexthop_get(nh)) {
+                       NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
+                       goto out;
+               }
                rt->nh = nh;
                fib6_nh = nexthop_fib6_nh(rt->nh);
        } else {
index b67ed3a..a915bc8 100644 (file)
@@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
                struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
 
+               if ((xfilter->sadb_x_filter_splen >=
+                       (sizeof(xfrm_address_t) << 3)) ||
+                   (xfilter->sadb_x_filter_dplen >=
+                       (sizeof(xfrm_address_t) << 3))) {
+                       mutex_unlock(&pfk->dump_lock);
+                       return -EINVAL;
+               }
                filter = kmalloc(sizeof(*filter), GFP_KERNEL);
                if (filter == NULL) {
                        mutex_unlock(&pfk->dump_lock);
@@ -2400,7 +2407,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
                        return err;
        }
 
-       xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
+       xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
                                   pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
                                   1, &err);
        security_xfrm_policy_free(pol_ctx);
@@ -2651,7 +2658,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
                return -EINVAL;
 
        delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
-       xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
+       xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
                              dir, pol->sadb_x_policy_id, delete, &err);
        if (xp == NULL)
                return -ENOENT;
index 9b36054..1079a07 100644 (file)
@@ -2166,6 +2166,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
        ieee80211_stop_mesh(sdata);
        mutex_lock(&sdata->local->mtx);
        ieee80211_vif_release_channel(sdata);
+       kfree(sdata->u.mesh.ie);
        mutex_unlock(&sdata->local->mtx);
 
        return 0;
index 5f1ca25..e88beb3 100644 (file)
@@ -617,6 +617,19 @@ int mesh_add_he_oper_ie(struct ieee80211_sub_if_data *sdata,
 int mesh_add_he_6ghz_cap_ie(struct ieee80211_sub_if_data *sdata,
                            struct sk_buff *skb)
 {
+       struct ieee80211_supported_band *sband;
+       const struct ieee80211_sband_iftype_data *iftd;
+
+       sband = ieee80211_get_sband(sdata);
+       if (!sband)
+               return -EINVAL;
+
+       iftd = ieee80211_get_sband_iftype_data(sband,
+                                              NL80211_IFTYPE_MESH_POINT);
+       /* The device doesn't support HE in mesh mode or at all */
+       if (!iftd)
+               return 0;
+
        ieee80211_ie_build_he_6ghz_cap(sdata, skb);
        return 0;
 }
index 117519b..aca608a 100644 (file)
@@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
        del_timer_sync(&mpath->timer);
        atomic_dec(&sdata->u.mesh.mpaths);
        atomic_dec(&tbl->entries);
+       mesh_path_flush_pending(mpath);
        kfree_rcu(mpath, rcu);
 }
 
index cd8487b..af4cc5f 100644 (file)
@@ -1923,9 +1923,7 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local,
        if (sta) {
                tx_pending = atomic_sub_return(tx_airtime,
                                               &sta->airtime[ac].aql_tx_pending);
-               if (WARN_ONCE(tx_pending < 0,
-                             "STA %pM AC %d txq pending airtime underflow: %u, %u",
-                             sta->addr, ac, tx_pending, tx_airtime))
+               if (tx_pending < 0)
                        atomic_cmpxchg(&sta->airtime[ac].aql_tx_pending,
                                       tx_pending, 0);
        }
index 1a2941e..3529d13 100644 (file)
@@ -4230,11 +4230,12 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
            test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
                goto out_free;
 
+       memset(info, 0, sizeof(*info));
+
        if (unlikely(!multicast && skb->sk &&
                     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
-               ieee80211_store_ack_skb(local, skb, &info->flags, NULL);
-
-       memset(info, 0, sizeof(*info));
+               info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
+                                                            &info->flags, NULL);
 
        if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
                if (sdata->control_port_no_encrypt)
index 21c9409..dd9f5c7 100644 (file)
@@ -2878,6 +2878,10 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!iftd))
                return;
 
+       /* Check for device HE 6 GHz capability before adding element */
+       if (!iftd->he_6ghz_capa.capa)
+               return;
+
        cap = le16_to_cpu(iftd->he_6ghz_capa.capa);
        cap &= ~IEEE80211_HE_6GHZ_CAP_SM_PS;
 
index 3980fbb..c0abe73 100644 (file)
@@ -1833,7 +1833,7 @@ do_connect:
        /* on successful connect, the msk state will be moved to established by
         * subflow_finish_connect()
         */
-       if (!err || err == EINPROGRESS)
+       if (!err || err == -EINPROGRESS)
                mptcp_copy_inaddrs(sock->sk, ssock->sk);
        else
                inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
index c840497..aba4afe 100644 (file)
@@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
 {
        struct rds_notifier *notifier;
-       struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
+       struct rds_rdma_notify cmsg;
        unsigned int count = 0, max_messages = ~0U;
        unsigned long flags;
        LIST_HEAD(copy);
        int err = 0;
 
+       memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
 
        /* put_cmsg copies to user space and thus may sleep. We can't do this
         * with rs_lock held, so first grab as many notifications as we can stuff
index f079702..38a4616 100644 (file)
@@ -288,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
         */
        ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
        if (ret < 0)
-               goto error;
+               goto error_attached_to_socket;
 
        trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
                         atomic_read(&call->usage), here, NULL);
@@ -308,18 +308,29 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
 error_dup_user_ID:
        write_unlock(&rx->call_lock);
        release_sock(&rx->sk);
-       ret = -EEXIST;
-
-error:
        __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
-                                   RX_CALL_DEAD, ret);
+                                   RX_CALL_DEAD, -EEXIST);
        trace_rxrpc_call(call->debug_id, rxrpc_call_error,
-                        atomic_read(&call->usage), here, ERR_PTR(ret));
+                        atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
        rxrpc_release_call(rx, call);
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put);
-       _leave(" = %d", ret);
-       return ERR_PTR(ret);
+       _leave(" = -EEXIST");
+       return ERR_PTR(-EEXIST);
+
+       /* We got an error, but the call is attached to the socket and is in
+        * need of release.  However, we might now race with recvmsg() when
+        * completing the call queues it.  Return 0 from sys_sendmsg() and
+        * leave the error to recvmsg() to deal with.
+        */
+error_attached_to_socket:
+       trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+                        atomic_read(&call->usage), here, ERR_PTR(ret));
+       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+                                   RX_CALL_DEAD, ret);
+       _leave(" = c=%08x [err]", call->debug_id);
+       return call;
 }
 
 /*
index 19e141e..8cbe0bf 100644 (file)
@@ -212,9 +212,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 
        call->peer->cong_cwnd = call->cong_cwnd;
 
-       spin_lock_bh(&conn->params.peer->lock);
-       hlist_del_rcu(&call->error_link);
-       spin_unlock_bh(&conn->params.peer->lock);
+       if (!hlist_unhashed(&call->error_link)) {
+               spin_lock_bh(&call->peer->lock);
+               hlist_del_rcu(&call->error_link);
+               spin_unlock_bh(&call->peer->lock);
+       }
 
        if (rxrpc_is_client_call(call))
                return rxrpc_disconnect_client_call(call);
index 490b192..efecc5a 100644 (file)
@@ -620,7 +620,7 @@ try_again:
                        goto error_unlock_call;
        }
 
-       if (msg->msg_name) {
+       if (msg->msg_name && call->peer) {
                struct sockaddr_rxrpc *srx = msg->msg_name;
                size_t len = sizeof(call->peer->srx);
 
index 03a30d0..f3f6da6 100644 (file)
@@ -681,6 +681,9 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                if (IS_ERR(call))
                        return PTR_ERR(call);
                /* ... and we have the call lock. */
+               ret = 0;
+               if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
+                       goto out_put_unlock;
        } else {
                switch (READ_ONCE(call->state)) {
                case RXRPC_CALL_UNINITIALISED:
index 5928efb..6ed1652 100644 (file)
@@ -1543,10 +1543,10 @@ static int __init ct_init_module(void)
 
        return 0;
 
-err_tbl_init:
-       destroy_workqueue(act_ct_wq);
 err_register:
        tcf_ct_flow_tables_uninit();
+err_tbl_init:
+       destroy_workqueue(act_ct_wq);
        return err;
 }
 
index 0e07fb8..7fbca08 100644 (file)
@@ -13266,13 +13266,13 @@ static int nl80211_vendor_cmd(struct sk_buff *skb, struct genl_info *info)
                                if (!wdev_running(wdev))
                                        return -ENETDOWN;
                        }
-
-                       if (!vcmd->doit)
-                               return -EOPNOTSUPP;
                } else {
                        wdev = NULL;
                }
 
+               if (!vcmd->doit)
+                       return -EOPNOTSUPP;
+
                if (info->attrs[NL80211_ATTR_VENDOR_DATA]) {
                        data = nla_data(info->attrs[NL80211_ATTR_VENDOR_DATA]);
                        len = nla_len(info->attrs[NL80211_ATTR_VENDOR_DATA]);
index 100e296..827ccdf 100644 (file)
@@ -15,6 +15,7 @@ static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
 {
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf ||
            !sk_rmem_schedule(sk, skb, skb->truesize)) {
+               XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR);
                kfree_skb(skb);
                return;
        }
@@ -49,23 +50,51 @@ static void espintcp_rcv(struct strparser *strp, struct sk_buff *skb)
        struct espintcp_ctx *ctx = container_of(strp, struct espintcp_ctx,
                                                strp);
        struct strp_msg *rxm = strp_msg(skb);
+       int len = rxm->full_len - 2;
        u32 nonesp_marker;
        int err;
 
+       /* keepalive packet? */
+       if (unlikely(len == 1)) {
+               u8 data;
+
+               err = skb_copy_bits(skb, rxm->offset + 2, &data, 1);
+               if (err < 0) {
+                       XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
+                       kfree_skb(skb);
+                       return;
+               }
+
+               if (data == 0xff) {
+                       kfree_skb(skb);
+                       return;
+               }
+       }
+
+       /* drop other short messages */
+       if (unlikely(len <= sizeof(nonesp_marker))) {
+               XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
+               kfree_skb(skb);
+               return;
+       }
+
        err = skb_copy_bits(skb, rxm->offset + 2, &nonesp_marker,
                            sizeof(nonesp_marker));
        if (err < 0) {
+               XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR);
                kfree_skb(skb);
                return;
        }
 
        /* remove header, leave non-ESP marker/SPI */
        if (!__pskb_pull(skb, rxm->offset + 2)) {
+               XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
                kfree_skb(skb);
                return;
        }
 
        if (pskb_trim(skb, rxm->full_len - 2) != 0) {
+               XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR);
                kfree_skb(skb);
                return;
        }
@@ -91,7 +120,7 @@ static int espintcp_parse(struct strparser *strp, struct sk_buff *skb)
                return err;
 
        len = be16_to_cpu(blen);
-       if (len < 6)
+       if (len < 2)
                return -EINVAL;
 
        return len;
@@ -109,8 +138,11 @@ static int espintcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        flags |= nonblock ? MSG_DONTWAIT : 0;
 
        skb = __skb_recv_datagram(sk, &ctx->ike_queue, flags, &off, &err);
-       if (!skb)
+       if (!skb) {
+               if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN)
+                       return 0;
                return err;
+       }
 
        copied = len;
        if (copied > skb->len)
@@ -213,7 +245,7 @@ retry:
        return 0;
 }
 
-static int espintcp_push_msgs(struct sock *sk)
+static int espintcp_push_msgs(struct sock *sk, int flags)
 {
        struct espintcp_ctx *ctx = espintcp_getctx(sk);
        struct espintcp_msg *emsg = &ctx->partial;
@@ -227,12 +259,12 @@ static int espintcp_push_msgs(struct sock *sk)
        ctx->tx_running = 1;
 
        if (emsg->skb)
-               err = espintcp_sendskb_locked(sk, emsg, 0);
+               err = espintcp_sendskb_locked(sk, emsg, flags);
        else
-               err = espintcp_sendskmsg_locked(sk, emsg, 0);
+               err = espintcp_sendskmsg_locked(sk, emsg, flags);
        if (err == -EAGAIN) {
                ctx->tx_running = 0;
-               return 0;
+               return flags & MSG_DONTWAIT ? -EAGAIN : 0;
        }
        if (!err)
                memset(emsg, 0, sizeof(*emsg));
@@ -257,7 +289,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb)
        offset = skb_transport_offset(skb);
        len = skb->len - offset;
 
-       espintcp_push_msgs(sk);
+       espintcp_push_msgs(sk, 0);
 
        if (emsg->len) {
                kfree_skb(skb);
@@ -270,7 +302,7 @@ int espintcp_push_skb(struct sock *sk, struct sk_buff *skb)
        emsg->len = len;
        emsg->skb = skb;
 
-       espintcp_push_msgs(sk);
+       espintcp_push_msgs(sk, 0);
 
        return 0;
 }
@@ -287,7 +319,7 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        char buf[2] = {0};
        int err, end;
 
-       if (msg->msg_flags)
+       if (msg->msg_flags & ~MSG_DONTWAIT)
                return -EOPNOTSUPP;
 
        if (size > MAX_ESPINTCP_MSG)
@@ -298,9 +330,10 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        lock_sock(sk);
 
-       err = espintcp_push_msgs(sk);
+       err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
        if (err < 0) {
-               err = -ENOBUFS;
+               if (err != -EAGAIN || !(msg->msg_flags & MSG_DONTWAIT))
+                       err = -ENOBUFS;
                goto unlock;
        }
 
@@ -337,10 +370,9 @@ static int espintcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        tcp_rate_check_app_limited(sk);
 
-       err = espintcp_push_msgs(sk);
+       err = espintcp_push_msgs(sk, msg->msg_flags & MSG_DONTWAIT);
        /* this message could be partially sent, keep it */
-       if (err < 0)
-               goto unlock;
+
        release_sock(sk);
 
        return size;
@@ -374,7 +406,7 @@ static void espintcp_tx_work(struct work_struct *work)
 
        lock_sock(sk);
        if (!ctx->tx_running)
-               espintcp_push_msgs(sk);
+               espintcp_push_msgs(sk, 0);
        release_sock(sk);
 }
 
index 564aa64..19c5e0f 100644 (file)
@@ -39,7 +39,7 @@
 #ifdef CONFIG_XFRM_STATISTICS
 #include <net/snmp.h>
 #endif
-#ifdef CONFIG_INET_ESPINTCP
+#ifdef CONFIG_XFRM_ESPINTCP
 #include <net/espintcp.h>
 #endif
 
@@ -1433,14 +1433,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
        spin_unlock_bh(&pq->hold_queue.lock);
 }
 
-static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
-                                  struct xfrm_policy *pol)
+static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
+                                         struct xfrm_policy *pol)
 {
-       if (policy->mark.v == pol->mark.v &&
-           policy->priority == pol->priority)
-               return true;
-
-       return false;
+       return mark->v == pol->mark.v && mark->m == pol->mark.m;
 }
 
 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
@@ -1503,7 +1499,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
                if (pol->type == policy->type &&
                    pol->if_id == policy->if_id &&
                    !selector_cmp(&pol->selector, &policy->selector) &&
-                   xfrm_policy_mark_match(policy, pol) &&
+                   xfrm_policy_mark_match(&policy->mark, pol) &&
                    xfrm_sec_ctx_match(pol->security, policy->security) &&
                    !WARN_ON(delpol)) {
                        delpol = pol;
@@ -1538,7 +1534,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
                if (pol->type == policy->type &&
                    pol->if_id == policy->if_id &&
                    !selector_cmp(&pol->selector, &policy->selector) &&
-                   xfrm_policy_mark_match(policy, pol) &&
+                   xfrm_policy_mark_match(&policy->mark, pol) &&
                    xfrm_sec_ctx_match(pol->security, policy->security) &&
                    !WARN_ON(delpol)) {
                        if (excl)
@@ -1610,9 +1606,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
 EXPORT_SYMBOL(xfrm_policy_insert);
 
 static struct xfrm_policy *
-__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
-                       u8 type, int dir,
-                       struct xfrm_selector *sel,
+__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
+                       u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
                        struct xfrm_sec_ctx *ctx)
 {
        struct xfrm_policy *pol;
@@ -1623,7 +1618,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
        hlist_for_each_entry(pol, chain, bydst) {
                if (pol->type == type &&
                    pol->if_id == if_id &&
-                   (mark & pol->mark.m) == pol->mark.v &&
+                   xfrm_policy_mark_match(mark, pol) &&
                    !selector_cmp(sel, &pol->selector) &&
                    xfrm_sec_ctx_match(ctx, pol->security))
                        return pol;
@@ -1632,11 +1627,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
        return NULL;
 }
 
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
-                                         u8 type, int dir,
-                                         struct xfrm_selector *sel,
-                                         struct xfrm_sec_ctx *ctx, int delete,
-                                         int *err)
+struct xfrm_policy *
+xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
+                     u8 type, int dir, struct xfrm_selector *sel,
+                     struct xfrm_sec_ctx *ctx, int delete, int *err)
 {
        struct xfrm_pol_inexact_bin *bin = NULL;
        struct xfrm_policy *pol, *ret = NULL;
@@ -1703,9 +1697,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
 }
 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
 
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
-                                    u8 type, int dir, u32 id, int delete,
-                                    int *err)
+struct xfrm_policy *
+xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
+                u8 type, int dir, u32 id, int delete, int *err)
 {
        struct xfrm_policy *pol, *ret;
        struct hlist_head *chain;
@@ -1720,8 +1714,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
        ret = NULL;
        hlist_for_each_entry(pol, chain, byidx) {
                if (pol->type == type && pol->index == id &&
-                   pol->if_id == if_id &&
-                   (mark & pol->mark.m) == pol->mark.v) {
+                   pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
                        xfrm_pol_hold(pol);
                        if (delete) {
                                *err = security_xfrm_policy_delete(
@@ -4156,7 +4149,7 @@ void __init xfrm_init(void)
        seqcount_init(&xfrm_policy_hash_generation);
        xfrm_input_init();
 
-#ifdef CONFIG_INET_ESPINTCP
+#ifdef CONFIG_XFRM_ESPINTCP
        espintcp_init();
 #endif
 
index e6cfaa6..fbb7d9d 100644 (file)
@@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int delete;
        struct xfrm_mark m;
-       u32 mark = xfrm_mark_get(attrs, &m);
        u32 if_id = 0;
 
        p = nlmsg_data(nlh);
@@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (attrs[XFRMA_IF_ID])
                if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
 
+       xfrm_mark_get(attrs, &m);
+
        if (p->index)
-               xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
+               xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
+                                     p->index, delete, &err);
        else {
                struct nlattr *rt = attrs[XFRMA_SEC_CTX];
                struct xfrm_sec_ctx *ctx;
@@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                        if (err)
                                return err;
                }
-               xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
-                                          ctx, delete, &err);
+               xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
+                                          &p->sel, ctx, delete, &err);
                security_xfrm_policy_free(ctx);
        }
        if (xp == NULL)
@@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        u8 type = XFRM_POLICY_TYPE_MAIN;
        int err = -ENOENT;
        struct xfrm_mark m;
-       u32 mark = xfrm_mark_get(attrs, &m);
        u32 if_id = 0;
 
        err = copy_from_user_policy_type(&type, attrs);
@@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (attrs[XFRMA_IF_ID])
                if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
 
+       xfrm_mark_get(attrs, &m);
+
        if (p->index)
-               xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
+               xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
+                                     0, &err);
        else {
                struct nlattr *rt = attrs[XFRMA_SEC_CTX];
                struct xfrm_sec_ctx *ctx;
@@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
                        if (err)
                                return err;
                }
-               xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
+               xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
                                           &p->sel, ctx, 0, &err);
                security_xfrm_policy_free(ctx);
        }
index 3651cbf..f54b6ac 100644 (file)
@@ -124,9 +124,6 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
 
-PHONY += FORCE
-FORCE:
-
 endif
 
 .PHONY: $(PHONY)
index 12a67fd..c3d537c 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-*.moc
+/qconf-moc.cc
 *conf-cfg
 
 #
index 426881e..52b59bf 100644 (file)
@@ -181,19 +181,22 @@ $(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/mconf-cfg
 
 # qconf: Used for the xconfig target based on Qt
 hostprogs      += qconf
-qconf-cxxobjs  := qconf.o
+qconf-cxxobjs  := qconf.o qconf-moc.o
 qconf-objs     := images.o $(common-objs)
 
 HOSTLDLIBS_qconf       = $(shell . $(obj)/qconf-cfg && echo $$libs)
 HOSTCXXFLAGS_qconf.o   = $(shell . $(obj)/qconf-cfg && echo $$cflags)
+HOSTCXXFLAGS_qconf-moc.o = $(shell . $(obj)/qconf-cfg && echo $$cflags)
 
-$(obj)/qconf.o: $(obj)/qconf-cfg $(obj)/qconf.moc
+$(obj)/qconf.o: $(obj)/qconf-cfg
 
 quiet_cmd_moc = MOC     $@
-      cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) -i $< -o $@
+      cmd_moc = $(shell . $(obj)/qconf-cfg && echo $$moc) $< -o $@
 
-$(obj)/%.moc: $(src)/%.h $(obj)/qconf-cfg
-       $(call cmd,moc)
+$(obj)/qconf-moc.cc: $(src)/qconf.h $(obj)/qconf-cfg FORCE
+       $(call if_changed,moc)
+
+targets += qconf-moc.cc
 
 # gconf: Used for the gconfig target based on GTK+
 hostprogs      += gconf
index 4a61612..23d1cb0 100644 (file)
@@ -23,7 +23,6 @@
 #include "lkc.h"
 #include "qconf.h"
 
-#include "qconf.moc"
 #include "images.h"
 
 
@@ -308,10 +307,7 @@ ConfigList::ConfigList(ConfigView* p, const char *name)
        setVerticalScrollMode(ScrollPerPixel);
        setHorizontalScrollMode(ScrollPerPixel);
 
-       if (mode == symbolMode)
-               setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value");
-       else
-               setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value");
+       setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value");
 
        connect(this, SIGNAL(itemSelectionChanged(void)),
                SLOT(updateSelection(void)));
@@ -392,11 +388,6 @@ void ConfigList::updateSelection(void)
        struct menu *menu;
        enum prop_type type;
 
-       if (mode == symbolMode)
-               setHeaderLabels(QStringList() << "Item" << "Name" << "N" << "M" << "Y" << "Value");
-       else
-               setHeaderLabels(QStringList() << "Option" << "Name" << "N" << "M" << "Y" << "Value");
-
        if (selectedItems().count() == 0)
                return;
 
@@ -437,14 +428,13 @@ void ConfigList::updateList(ConfigItem* item)
        if (rootEntry != &rootmenu && (mode == singleMode ||
            (mode == symbolMode && rootEntry->parent != &rootmenu))) {
                item = (ConfigItem *)topLevelItem(0);
-               if (!item && mode != symbolMode) {
+               if (!item)
                        item = new ConfigItem(this, 0, true);
-                       last = item;
-               }
+               last = item;
        }
        if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
            rootEntry->sym && rootEntry->prompt) {
-               item = last ? last->nextSibling() : firstChild();
+               item = last ? last->nextSibling() : nullptr;
                if (!item)
                        item = new ConfigItem(this, last, rootEntry, true);
                else
@@ -1239,7 +1229,7 @@ void ConfigInfoView::clicked(const QUrl &url)
 
        if (count < 1) {
                qInfo() << "Clicked link is empty";
-               delete data;
+               delete[] data;
                return;
        }
 
@@ -1252,7 +1242,7 @@ void ConfigInfoView::clicked(const QUrl &url)
        result = sym_re_search(data);
        if (!result) {
                qInfo() << "Clicked symbol is invalid:" << data;
-               delete data;
+               delete[] data;
                return;
        }
 
@@ -1735,7 +1725,6 @@ void ConfigMainWindow::listFocusChanged(void)
 
 void ConfigMainWindow::goBack(void)
 {
-qInfo() << __FUNCTION__;
        if (configList->rootEntry == &rootmenu)
                return;
 
index fb9e972..5eeab4a 100644 (file)
@@ -92,10 +92,6 @@ public:
        {
                return this;
        }
-       ConfigItem* firstChild() const
-       {
-               return (ConfigItem *)children().first();
-       }
        void addColumn(colIdx idx)
        {
                showColumn(idx);
index 45f2ab2..69341b3 100644 (file)
@@ -144,6 +144,7 @@ char *get_line(char **stringp)
        if (!orig || *orig == '\0')
                return NULL;
 
+       /* don't use strsep here, it is not available everywhere */
        next = strchr(orig, '\n');
        if (next)
                *next++ = '\0';
index 7e3ae45..803978d 100644 (file)
@@ -2935,6 +2935,10 @@ static int hda_codec_runtime_suspend(struct device *dev)
        struct hda_codec *codec = dev_to_hda_codec(dev);
        unsigned int state;
 
+       /* Nothing to do if card registration fails and the component driver never probes */
+       if (!codec->card)
+               return 0;
+
        cancel_delayed_work_sync(&codec->jackpoll_work);
        state = hda_call_codec_suspend(codec);
        if (codec->link_down_at_suspend ||
@@ -2949,6 +2953,10 @@ static int hda_codec_runtime_resume(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
+       /* Nothing to do if card registration fails and the component driver never probes */
+       if (!codec->card)
+               return 0;
+
        codec_display_power(codec, true);
        snd_hdac_codec_link_up(&codec->core);
        hda_call_codec_resume(codec);
index 82e2644..a356fb0 100644 (file)
@@ -41,7 +41,7 @@
 /* 24 unused */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
-/* 27 unused */
+#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 #define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
 #define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use separate stream tag */
index 3565e2a..3fbba2e 100644 (file)
@@ -298,7 +298,8 @@ enum {
 /* PCH for HSW/BDW; with runtime PM */
 /* no i915 binding for this as HSW/BDW has another controller for HDMI */
 #define AZX_DCAPS_INTEL_PCH \
-       (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
+       (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+        AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
 
 /* HSW HDMI */
 #define AZX_DCAPS_INTEL_HASWELL \
@@ -1028,7 +1029,14 @@ static int azx_suspend(struct device *dev)
        chip = card->private_data;
        bus = azx_bus(chip);
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
-       pm_runtime_force_suspend(dev);
+       /* An ugly workaround: direct call of __azx_runtime_suspend() and
+        * __azx_runtime_resume() for old Intel platforms that suffer from
+        * spurious wakeups after S3 suspend
+        */
+       if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+               __azx_runtime_suspend(chip);
+       else
+               pm_runtime_force_suspend(dev);
        if (bus->irq >= 0) {
                free_irq(bus->irq, chip);
                bus->irq = -1;
@@ -1057,7 +1065,10 @@ static int azx_resume(struct device *dev)
        if (azx_acquire_irq(chip, 1) < 0)
                return -EIO;
 
-       pm_runtime_force_resume(dev);
+       if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
+               __azx_runtime_resume(chip, false);
+       else
+               pm_runtime_force_resume(dev);
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
 
        trace_azx_resume(chip);
index 41eaa89..cd46247 100644 (file)
@@ -2440,6 +2440,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
        mutex_lock(&spec->bind_lock);
        spec->use_acomp_notifier = use_acomp;
        spec->codec->relaxed_resume = use_acomp;
+       spec->codec->bus->keep_power = 0;
        /* reprogram each jack detection logic depending on the notifier */
        for (i = 0; i < spec->num_pins; i++)
                reprogram_jack_detect(spec->codec,
@@ -2534,7 +2535,6 @@ static void generic_acomp_init(struct hda_codec *codec,
        if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
                                 match_bound_vga, 0)) {
                spec->acomp_registered = true;
-               codec->bus->keep_power = 0;
        }
 }
 
index 1b2d8e5..29f5878 100644 (file)
@@ -5975,6 +5975,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
                snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
 }
 
+static void  alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
+                             const struct hda_fixup *fix, int action)
+{
+       if (action != HDA_FIXUP_ACT_INIT)
+               return;
+
+       msleep(100);
+       alc_write_coef_idx(codec, 0x65, 0x0);
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -6152,8 +6162,10 @@ enum {
        ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
        ALC269VC_FIXUP_ACER_HEADSET_MIC,
        ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
-       ALC289_FIXUP_ASUS_G401,
+       ALC289_FIXUP_ASUS_GA401,
+       ALC289_FIXUP_ASUS_GA502,
        ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
+       ALC285_FIXUP_HP_GPIO_AMP_INIT,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7363,7 +7375,14 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
-       [ALC289_FIXUP_ASUS_G401] = {
+       [ALC289_FIXUP_ASUS_GA401] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+                       { }
+               },
+       },
+       [ALC289_FIXUP_ASUS_GA502] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
                        { 0x19, 0x03a11020 }, /* headset mic with jack detect */
@@ -7379,6 +7398,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
        },
+       [ALC285_FIXUP_HP_GPIO_AMP_INIT] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_gpio_amp_init,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_GPIO_LED
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7529,7 +7554,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -7561,7 +7586,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
-       SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401),
+       SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+       SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -7581,7 +7607,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
-       SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
+       SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
index 40b7cd1..a69d9e7 100644 (file)
@@ -367,6 +367,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
        case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+       case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
        case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
        case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
                ep = 0x81;
index 5b36c58..ba4f338 100644 (file)
@@ -2861,6 +2861,7 @@ process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
        if (read_expected(TEP_EVENT_DELIM, ")") < 0)
                goto out_err;
 
+       free_token(token);
        type = read_token(&token);
        *tok = token;
 
index 349bb81..680d883 100644 (file)
@@ -197,7 +197,7 @@ define do_generate_dynamic_list_file
        xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
        if [ "$$symbol_type" = "U W" ];then                             \
                (echo '{';                                              \
-               $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
+               $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
                echo '};';                                              \
                ) > $2;                                                 \
        else                                                            \
index 0a6e75b..28a5d0c 100644 (file)
@@ -56,7 +56,7 @@ struct auxtrace_record
        struct perf_pmu *cs_etm_pmu;
        struct evsel *evsel;
        bool found_etm = false;
-       bool found_spe = false;
+       struct perf_pmu *found_spe = NULL;
        static struct perf_pmu **arm_spe_pmus = NULL;
        static int nr_spes = 0;
        int i = 0;
@@ -74,12 +74,12 @@ struct auxtrace_record
                    evsel->core.attr.type == cs_etm_pmu->type)
                        found_etm = true;
 
-               if (!nr_spes)
+               if (!nr_spes || found_spe)
                        continue;
 
                for (i = 0; i < nr_spes; i++) {
                        if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
-                               found_spe = true;
+                               found_spe = arm_spe_pmus[i];
                                break;
                        }
                }
@@ -96,7 +96,7 @@ struct auxtrace_record
 
 #if defined(__aarch64__)
        if (found_spe)
-               return arm_spe_recording_init(err, arm_spe_pmus[i]);
+               return arm_spe_recording_init(err, found_spe);
 #endif
 
        /*
index 63a91ec..045723b 100755 (executable)
@@ -12,7 +12,8 @@ skip_if_no_z_record() {
 
 collect_z_record() {
        echo "Collecting compressed record file:"
-       $perf_tool record -o $trace_file -g -z -F 5000 -- \
+       [[ "$(uname -m)" != s390x ]] && gflag='-g'
+       $perf_tool record -o $trace_file $gflag -z -F 5000 -- \
                dd count=500 if=/dev/urandom of=/dev/null
 }
 
index f7ee8fa..6ccecbd 100644 (file)
@@ -5,10 +5,60 @@
 
 #include "test_btf_map_in_map.skel.h"
 
+static int duration;
+
+static __u32 bpf_map_id(struct bpf_map *map)
+{
+       struct bpf_map_info info;
+       __u32 info_len = sizeof(info);
+       int err;
+
+       memset(&info, 0, info_len);
+       err = bpf_obj_get_info_by_fd(bpf_map__fd(map), &info, &info_len);
+       if (err)
+               return 0;
+       return info.id;
+}
+
+/*
+ * Trigger synchronize_rcu() in kernel.
+ *
+ * ARRAY_OF_MAPS/HASH_OF_MAPS lookup/update operations trigger synchronize_rcu()
+ * if looking up an existing non-NULL element or updating the map with a valid
+ * inner map FD. Use this fact to trigger synchronize_rcu(): create map-in-map,
+ * create a trivial ARRAY map, update map-in-map with ARRAY inner map. Then
+ * cleanup. At the end, at least one synchronize_rcu() would be called.
+ */
+static int kern_sync_rcu(void)
+{
+       int inner_map_fd, outer_map_fd, err, zero = 0;
+
+       inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 4, 1, 0);
+       if (CHECK(inner_map_fd < 0, "inner_map_create", "failed %d\n", -errno))
+               return -1;
+
+       outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
+                                            sizeof(int), inner_map_fd, 1, 0);
+       if (CHECK(outer_map_fd < 0, "outer_map_create", "failed %d\n", -errno)) {
+               close(inner_map_fd);
+               return -1;
+       }
+
+       err = bpf_map_update_elem(outer_map_fd, &zero, &inner_map_fd, 0);
+       if (err)
+               err = -errno;
+       CHECK(err, "outer_map_update", "failed %d\n", err);
+       close(inner_map_fd);
+       close(outer_map_fd);
+       return err;
+}
+
 void test_btf_map_in_map(void)
 {
-       int duration = 0, err, key = 0, val;
-       struct test_btf_map_in_map* skel;
+       int err, key = 0, val, i;
+       struct test_btf_map_in_map *skel;
+       int outer_arr_fd, outer_hash_fd;
+       int fd, map1_fd, map2_fd, map1_id, map2_id;
 
        skel = test_btf_map_in_map__open_and_load();
        if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -18,32 +68,78 @@ void test_btf_map_in_map(void)
        if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
                goto cleanup;
 
+       map1_fd = bpf_map__fd(skel->maps.inner_map1);
+       map2_fd = bpf_map__fd(skel->maps.inner_map2);
+       outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
+       outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
+
        /* inner1 = input, inner2 = input + 1 */
-       val = bpf_map__fd(skel->maps.inner_map1);
-       bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
-       val = bpf_map__fd(skel->maps.inner_map2);
-       bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
+       map1_fd = bpf_map__fd(skel->maps.inner_map1);
+       bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
+       map2_fd = bpf_map__fd(skel->maps.inner_map2);
+       bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
        skel->bss->input = 1;
        usleep(1);
 
-       bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
+       bpf_map_lookup_elem(map1_fd, &key, &val);
        CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
-       bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
+       bpf_map_lookup_elem(map2_fd, &key, &val);
        CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
 
        /* inner1 = input + 1, inner2 = input */
-       val = bpf_map__fd(skel->maps.inner_map2);
-       bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &key, &val, 0);
-       val = bpf_map__fd(skel->maps.inner_map1);
-       bpf_map_update_elem(bpf_map__fd(skel->maps.outer_hash), &key, &val, 0);
+       bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
+       bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
        skel->bss->input = 3;
        usleep(1);
 
-       bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map1), &key, &val);
+       bpf_map_lookup_elem(map1_fd, &key, &val);
        CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
-       bpf_map_lookup_elem(bpf_map__fd(skel->maps.inner_map2), &key, &val);
+       bpf_map_lookup_elem(map2_fd, &key, &val);
        CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
 
+       for (i = 0; i < 5; i++) {
+               val = i % 2 ? map1_fd : map2_fd;
+               err = bpf_map_update_elem(outer_hash_fd, &key, &val, 0);
+               if (CHECK_FAIL(err)) {
+                       printf("failed to update hash_of_maps on iter #%d\n", i);
+                       goto cleanup;
+               }
+               err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
+               if (CHECK_FAIL(err)) {
+                       printf("failed to update hash_of_maps on iter #%d\n", i);
+                       goto cleanup;
+               }
+       }
+
+       map1_id = bpf_map_id(skel->maps.inner_map1);
+       map2_id = bpf_map_id(skel->maps.inner_map2);
+       CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
+       CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
+
+       test_btf_map_in_map__destroy(skel);
+       skel = NULL;
+
+       /* we need to either wait for or force synchronize_rcu(), before
+        * checking for "still exists" condition, otherwise map could still be
+        * resolvable by ID, causing false positives.
+        *
+        * Older kernels (5.8 and earlier) freed map only after two
+        * synchronize_rcu()s, so trigger two, to be entirely sure.
+        */
+       CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+       CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
+
+       fd = bpf_map_get_fd_by_id(map1_id);
+       if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
+               close(fd);
+               goto cleanup;
+       }
+       fd = bpf_map_get_fd_by_id(map2_id);
+       if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
+               close(fd);
+               goto cleanup;
+       }
+
 cleanup:
        test_btf_map_in_map__destroy(skel);
 }
index 8294ae3..43c9cda 100755 (executable)
@@ -318,6 +318,9 @@ class DebugfsDir:
                 continue
 
             if os.path.isfile(p):
+                # We need to init trap_flow_action_cookie before read it
+                if f == "trap_flow_action_cookie":
+                    cmd('echo deadbeef > %s/%s' % (path, f))
                 _, out = cmd('cat %s/%s' % (path, f))
                 dfs[f] = out.strip()
             elif os.path.isdir(p):
index 99f8f58..c5e8059 100644 (file)
        "perfevent for cgroup sockopt",
        .insns =  { __PERF_EVENT_INSNS__ },
        .prog_type = BPF_PROG_TYPE_CGROUP_SOCKOPT,
+       .expected_attach_type = BPF_CGROUP_SETSOCKOPT,
        .fixup_map_event_output = { 4 },
        .result = ACCEPT,
        .retval = 1,
index 54cdefd..d59f3eb 100644 (file)
@@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state)
 void set_default_vmx_state(struct kvm_nested_state *state, int size)
 {
        memset(state, 0, size);
-       state->flags = KVM_STATE_NESTED_GUEST_MODE  |
-                       KVM_STATE_NESTED_RUN_PENDING;
        if (have_evmcs)
-               state->flags |= KVM_STATE_NESTED_EVMCS;
+               state->flags = KVM_STATE_NESTED_EVMCS;
        state->format = 0;
        state->size = size;
        state->hdr.vmx.vmxon_pa = 0x1000;
@@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm)
        state->hdr.vmx.smm.flags = 1;
        test_nested_state_expect_einval(vm, state);
 
+       /* Invalid flags are rejected. */
+       set_default_vmx_state(state, state_sz);
+       state->hdr.vmx.flags = ~0;
+       test_nested_state_expect_einval(vm, state);
+
        /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
        set_default_vmx_state(state, state_sz);
        state->hdr.vmx.vmxon_pa = -1ull;
@@ -185,20 +188,41 @@ void test_vmx_nested_state(struct kvm_vm *vm)
        state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
        test_nested_state_expect_einval(vm, state);
 
-       /* Size must be large enough to fit kvm_nested_state and vmcs12. */
+       /*
+        * Size must be large enough to fit kvm_nested_state and vmcs12
+        * if VMCS12 physical address is set
+        */
        set_default_vmx_state(state, state_sz);
        state->size = sizeof(*state);
+       state->flags = 0;
+       test_nested_state_expect_einval(vm, state);
+
+       set_default_vmx_state(state, state_sz);
+       state->size = sizeof(*state);
+       state->flags = 0;
+       state->hdr.vmx.vmcs12_pa = -1;
        test_nested_state(vm, state);
 
-       /* vmxon_pa cannot be the same address as vmcs_pa. */
+       /*
+        * KVM_SET_NESTED_STATE succeeds with invalid VMCS
+        * contents but L2 not running.
+        */
        set_default_vmx_state(state, state_sz);
-       state->hdr.vmx.vmxon_pa = 0;
-       state->hdr.vmx.vmcs12_pa = 0;
+       state->flags = 0;
+       test_nested_state(vm, state);
+
+       /* Invalid flags are rejected, even if no VMCS loaded. */
+       set_default_vmx_state(state, state_sz);
+       state->size = sizeof(*state);
+       state->flags = 0;
+       state->hdr.vmx.vmcs12_pa = -1;
+       state->hdr.vmx.flags = ~0;
        test_nested_state_expect_einval(vm, state);
 
-       /* The revision id for vmcs12 must be VMCS12_REVISION. */
+       /* vmxon_pa cannot be the same address as vmcs_pa. */
        set_default_vmx_state(state, state_sz);
-       set_revision_id_for_vmcs12(state, 0);
+       state->hdr.vmx.vmxon_pa = 0;
+       state->hdr.vmx.vmcs12_pa = 0;
        test_nested_state_expect_einval(vm, state);
 
        /*
index eb8e2a2..43a948f 100755 (executable)
@@ -252,8 +252,6 @@ check_highest_speed_is_chosen()
        fi
 
        local -a speeds_arr=($(common_speeds_get $h1 $h2 0 1))
-       # Remove the first speed, h1 does not advertise this speed.
-       unset speeds_arr[0]
 
        max_speed=${speeds_arr[0]}
        for current in ${speeds_arr[@]}; do
index 8c8c7d7..2c522f7 100644 (file)
@@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
        int fds[2], fds_udp[2][2], ret;
 
        fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
-               typeflags, PORT_BASE, PORT_BASE + port_off);
+               typeflags, (uint16_t)PORT_BASE,
+               (uint16_t)(PORT_BASE + port_off));
 
        fds[0] = sock_fanout_open(typeflags, 0);
        fds[1] = sock_fanout_open(typeflags, 0);
index 422e776..bcb79ba 100644 (file)
@@ -329,8 +329,7 @@ int main(int argc, char **argv)
        bool all_tests = true;
        int arg_index = 0;
        int failures = 0;
-       int s, t;
-       char opt;
+       int s, t, opt;
 
        while ((opt = getopt_long(argc, argv, "", long_options,
                                  &arg_index)) != -1) {
index ceaad78..3155fbb 100644 (file)
@@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
        if (rbuf[0] != ts->data)
                error(1, 0, "payload mismatch. expected %c", ts->data);
 
-       if (labs(tstop - texpect) > cfg_variance_us)
+       if (llabs(tstop - texpect) > cfg_variance_us)
                error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
 
        return false;
index 4555f88..a61b7b3 100644 (file)
@@ -344,7 +344,7 @@ int main(int argc, char *argv[])
 {
        struct sockaddr_storage listenaddr, addr;
        unsigned int max_pacing_rate = 0;
-       size_t total = 0;
+       uint64_t total = 0;
        char *host = NULL;
        int fd, c, on = 1;
        char *buffer;
@@ -473,12 +473,12 @@ int main(int argc, char *argv[])
                zflg = 0;
        }
        while (total < FILE_SZ) {
-               ssize_t wr = FILE_SZ - total;
+               int64_t wr = FILE_SZ - total;
 
                if (wr > chunk_size)
                        wr = chunk_size;
                /* Note : we just want to fill the pipe with 0 bytes */
-               wr = send(fd, buffer, wr, zflg ? MSG_ZEROCOPY : 0);
+               wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0);
                if (wr <= 0)
                        break;
                total += wr;