Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <kuba@kernel.org>
Thu, 3 Dec 2020 23:42:13 +0000 (15:42 -0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 3 Dec 2020 23:44:09 +0000 (15:44 -0800)
Conflicts:
drivers/net/ethernet/ibm/ibmvnic.c

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
177 files changed:
Documentation/admin-guide/bootconfig.rst
Documentation/devicetree/bindings/net/can/tcan4x5x.txt
Documentation/devicetree/bindings/net/nfc/nxp-nci.txt
Documentation/devicetree/bindings/net/nfc/pn544.txt
Documentation/kbuild/llvm.rst
MAINTAINERS
Makefile
arch/alpha/kernel/process.c
arch/arm/kernel/process.c
arch/arm64/include/asm/daifflags.h
arch/arm64/include/asm/exception.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/irq.c
arch/arm64/kernel/process.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/traps.c
arch/arm64/mm/fault.c
arch/csky/kernel/process.c
arch/h8300/kernel/process.c
arch/hexagon/kernel/process.c
arch/ia64/kernel/process.c
arch/microblaze/kernel/process.c
arch/mips/kernel/idle.c
arch/nios2/kernel/process.c
arch/openrisc/kernel/process.c
arch/parisc/kernel/process.c
arch/powerpc/kernel/idle.c
arch/riscv/include/asm/vdso/processor.h
arch/riscv/kernel/process.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/vdso/Makefile
arch/s390/kernel/entry.S
arch/s390/kernel/idle.c
arch/s390/lib/delay.c
arch/s390/pci/pci_irq.c
arch/sh/kernel/idle.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/um/kernel/process.c
arch/x86/include/asm/mwait.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/process.c
drivers/Makefile
drivers/firmware/efi/Kconfig
drivers/firmware/efi/efi.c
drivers/idle/intel_idle.c
drivers/interconnect/core.c
drivers/interconnect/qcom/msm8916.c
drivers/interconnect/qcom/msm8974.c
drivers/interconnect/qcom/qcs404.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-sni-exiu.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/net/can/c_can/c_can.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/tcan4x5x.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sun4i_can.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/geneve.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/realtek/rtw88/debug.c
drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c
drivers/phy/intel/Kconfig
drivers/phy/mediatek/Kconfig
drivers/phy/motorola/phy-cpcap-usb.c
drivers/phy/qualcomm/Kconfig
drivers/phy/qualcomm/phy-qcom-qmp.c
drivers/phy/tegra/xusb.c
drivers/pwm/pwm-sl28cpld.c
drivers/usb/cdns3/gadget.c
drivers/usb/core/devio.c
drivers/usb/core/quirks.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/typec/Kconfig
drivers/usb/typec/stusb160x.c
drivers/vdpa/Kconfig
drivers/vhost/scsi.c
drivers/vhost/vdpa.c
drivers/vhost/vringh.c
fs/9p/vfs_file.c
fs/cifs/connect.c
fs/cifs/transport.c
fs/efivarfs/inode.c
fs/efivarfs/super.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/rgrp.c
include/linux/bootconfig.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/net/inet_ecn.h
include/net/netfilter/nf_tables_offload.h
include/net/xdp_sock.h
include/uapi/linux/stat.h
init/main.c
kernel/sched/idle.c
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_hwlat.c
lib/syscall.c
net/batman-adv/fragmentation.c
net/batman-adv/hard-interface.c
net/bridge/br_netfilter_hooks.c
net/core/dev.c
net/core/skbuff.c
net/ipv4/route.c
net/ipv6/ip6_gre.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_cmp.c
net/netfilter/nft_meta.c
net/netfilter/nft_payload.c
net/openvswitch/actions.c
net/sched/act_mpls.c
net/tipc/node.c
net/x25/af_x25.c
net/xdp/xdp_umem.c
net/xdp/xdp_umem.h
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
samples/ftrace/ftrace-direct-modify.c
samples/ftrace/ftrace-direct-too.c
samples/ftrace/ftrace-direct.c
scripts/package/builddeb
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/patch_realtek.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-lpaif-reg.h
sound/soc/qcom/lpass-platform.c
sound/soc/qcom/lpass.h
sound/usb/mixer_us16x08.c
tools/bootconfig/main.c
tools/bootconfig/test-bootconfig.sh
tools/bpf/bpftool/btf.c
tools/perf/builtin-diff.c
tools/perf/util/dwarf-aux.c
tools/perf/util/hashmap.h
tools/perf/util/probe-finder.c
tools/perf/util/stat-display.c
tools/perf/util/synthetic-events.c
tools/testing/selftests/tc-testing/config

index a22024f..9b90efc 100644 (file)
@@ -137,15 +137,24 @@ Boot Kernel With a Boot Config
 ==============================
 
 Since the boot configuration file is loaded with initrd, it will be added
-to the end of the initrd (initramfs) image file with size, checksum and
-12-byte magic word as below.
+to the end of the initrd (initramfs) image file with padding, size,
+checksum and 12-byte magic word as below.
 
-[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
+[initrd][bootconfig][padding][size(le32)][checksum(le32)][#BOOTCONFIG\n]
+
+The size and checksum fields are unsigned 32bit little endian value.
+
+When the boot configuration is added to the initrd image, the total
+file size is aligned to 4 bytes. To fill the gap, null characters
+(``\0``) will be added. Thus the ``size`` is the length of the bootconfig
+file + padding bytes.
 
 The Linux kernel decodes the last part of the initrd image in memory to
 get the boot configuration data.
 Because of this "piggyback" method, there is no need to change or
-update the boot loader and the kernel image itself.
+update the boot loader and the kernel image itself as long as the boot
+loader passes the correct initrd file size. If by any chance, the boot
+loader passes a longer size, the kernel feils to find the bootconfig data.
 
 To do this operation, Linux kernel provides "bootconfig" command under
 tools/bootconfig, which allows admin to apply or delete the config file
@@ -176,7 +185,8 @@ up to 512 key-value pairs. If keys contains 3 words in average, it can
 contain 256 key-value pairs. In most cases, the number of config items
 will be under 100 entries and smaller than 8KB, so it would be enough.
 If the node number exceeds 1024, parser returns an error even if the file
-size is smaller than 32KB.
+size is smaller than 32KB. (Note that this maximum size is not including
+the padding null characters.)
 Anyway, since bootconfig command verifies it when appending a boot config
 to initrd image, user can notice it before boot.
 
index 3613c2c..0968b40 100644 (file)
@@ -33,7 +33,7 @@ tcan4x5x: tcan4x5x@0 {
                spi-max-frequency = <10000000>;
                bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
                interrupt-parent = <&gpio1>;
-               interrupts = <14 GPIO_ACTIVE_LOW>;
+               interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
                device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
                device-wake-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
                reset-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
index cb2385c..285a37c 100644 (file)
@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with NPC100 NFC controller on I2C2):
                clock-frequency = <100000>;
 
                interrupt-parent = <&gpio1>;
-               interrupts = <29 GPIO_ACTIVE_HIGH>;
+               interrupts = <29 IRQ_TYPE_LEVEL_HIGH>;
 
                enable-gpios = <&gpio0 30 GPIO_ACTIVE_HIGH>;
                firmware-gpios = <&gpio0 31 GPIO_ACTIVE_HIGH>;
index 92f399e..2bd8256 100644 (file)
@@ -25,7 +25,7 @@ Example (for ARM-based BeagleBone with PN544 on I2C2):
                clock-frequency = <400000>;
 
                interrupt-parent = <&gpio1>;
-               interrupts = <17 GPIO_ACTIVE_HIGH>;
+               interrupts = <17 IRQ_TYPE_LEVEL_HIGH>;
 
                enable-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
                firmware-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
index cf3ca23..21c8478 100644 (file)
@@ -57,9 +57,8 @@ to enable them. ::
 They can be enabled individually. The full list of the parameters: ::
 
        make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
-         OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \
-         READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \
-         HOSTLD=ld.lld
+         OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
+         HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
 
 Currently, the integrated assembler is disabled by default. You can pass
 ``LLVM_IAS=1`` to enable it.
index a7bdebf..061e64b 100644 (file)
@@ -1724,11 +1724,13 @@ F:      arch/arm/mach-ep93xx/micro9.c
 
 ARM/CORESIGHT FRAMEWORK AND DRIVERS
 M:     Mathieu Poirier <mathieu.poirier@linaro.org>
-R:     Suzuki K Poulose <suzuki.poulose@arm.com>
+M:     Suzuki K Poulose <suzuki.poulose@arm.com>
 R:     Mike Leach <mike.leach@linaro.org>
+R:     Leo Yan <leo.yan@linaro.org>
 L:     coresight@lists.linaro.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/coresight/linux.git
 F:     Documentation/ABI/testing/sysfs-bus-coresight-devices-*
 F:     Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt
 F:     Documentation/devicetree/bindings/arm/coresight-cti.yaml
@@ -3355,6 +3357,17 @@ S:       Supported
 F:     arch/x86/net/
 X:     arch/x86/net/bpf_jit_comp32.c
 
+BPF LSM (Security Audit and Enforcement using BPF)
+M:     KP Singh <kpsingh@chromium.org>
+R:     Florent Revest <revest@chromium.org>
+R:     Brendan Jackman <jackmanb@chromium.org>
+L:     bpf@vger.kernel.org
+S:     Maintained
+F:     Documentation/bpf/bpf_lsm.rst
+F:     include/linux/bpf_lsm.h
+F:     kernel/bpf/bpf_lsm.c
+F:     security/bpf/
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -9070,10 +9083,7 @@ S:       Supported
 F:     drivers/net/wireless/intel/iwlegacy/
 
 INTEL WIRELESS WIFI LINK (iwlwifi)
-M:     Johannes Berg <johannes.berg@intel.com>
-M:     Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 M:     Luca Coelho <luciano.coelho@intel.com>
-M:     Intel Linux Wireless <linuxwifi@intel.com>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@@ -19096,12 +19106,17 @@ L:    netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Supported
 F:     include/net/xdp.h
+F:     include/net/xdp_priv.h
 F:     include/trace/events/xdp.h
 F:     kernel/bpf/cpumap.c
 F:     kernel/bpf/devmap.c
 F:     net/core/xdp.c
-N:     xdp
-K:     xdp
+F:     samples/bpf/xdp*
+F:     tools/testing/selftests/bpf/*xdp*
+F:     tools/testing/selftests/bpf/*/*xdp*
+F:     drivers/net/ethernet/*/*/*/*/*xdp*
+F:     drivers/net/ethernet/*/*/*xdp*
+K:     (?:\b|_)xdp(?:\b|_)
 
 XDP SOCKETS (AF_XDP)
 M:     Björn Töpel <bjorn.topel@intel.com>
@@ -19110,9 +19125,12 @@ R:     Jonathan Lemon <jonathan.lemon@gmail.com>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
+F:     Documentation/networking/af_xdp.rst
 F:     include/net/xdp_sock*
 F:     include/net/xsk_buff_pool.h
 F:     include/uapi/linux/if_xdp.h
+F:     include/uapi/linux/xdp_diag.h
+F:     include/net/netns/xdp.h
 F:     net/xdp/
 F:     samples/bpf/xdpsock*
 F:     tools/lib/bpf/xsk*
index ed081e3..43ecede 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -433,7 +433,6 @@ NM          = llvm-nm
 OBJCOPY                = llvm-objcopy
 OBJDUMP                = llvm-objdump
 READELF                = llvm-readelf
-OBJSIZE                = llvm-size
 STRIP          = llvm-strip
 else
 CC             = $(CROSS_COMPILE)gcc
@@ -443,7 +442,6 @@ NM          = $(CROSS_COMPILE)nm
 OBJCOPY                = $(CROSS_COMPILE)objcopy
 OBJDUMP                = $(CROSS_COMPILE)objdump
 READELF                = $(CROSS_COMPILE)readelf
-OBJSIZE                = $(CROSS_COMPILE)size
 STRIP          = $(CROSS_COMPILE)strip
 endif
 PAHOLE         = pahole
@@ -509,7 +507,7 @@ KBUILD_LDFLAGS :=
 CLANG_FLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
+export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
 export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
index 7462a79..4c7b041 100644 (file)
@@ -57,7 +57,7 @@ EXPORT_SYMBOL(pm_power_off);
 void arch_cpu_idle(void)
 {
        wtint(0);
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 void arch_cpu_idle_dead(void)
index 8e6ace0..9f199b1 100644 (file)
@@ -71,7 +71,7 @@ void arch_cpu_idle(void)
                arm_pm_idle();
        else
                cpu_do_idle();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 void arch_cpu_idle_prepare(void)
index ec213b4..1c26d7b 100644 (file)
@@ -128,6 +128,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
 {
        unsigned long flags = regs->pstate & DAIF_MASK;
 
+       if (interrupts_enabled(regs))
+               trace_hardirqs_on();
+
        /*
         * We can't use local_daif_restore(regs->pstate) here as
         * system_has_prio_mask_debugging() won't restore the I bit if it can
index 99b9383..0756191 100644 (file)
@@ -31,7 +31,12 @@ static inline u32 disr_to_esr(u64 disr)
        return esr;
 }
 
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
 asmlinkage void enter_from_user_mode(void);
+asmlinkage void exit_to_user_mode(void);
+void arm64_enter_nmi(struct pt_regs *regs);
+void arm64_exit_nmi(struct pt_regs *regs);
 void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
 void do_undefinstr(struct pt_regs *regs);
 void do_bti(struct pt_regs *regs);
index 997cf8c..28c85b8 100644 (file)
@@ -193,6 +193,10 @@ struct pt_regs {
        /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
        u64 pmr_save;
        u64 stackframe[2];
+
+       /* Only valid for some EL1 exceptions. */
+       u64 lockdep_hardirqs;
+       u64 exit_rcu;
 };
 
 static inline bool in_syscall(struct pt_regs const *regs)
index e2ef4c2..801861d 100644 (file)
 #define SYS_TFSR_EL1_TF0_SHIFT 0
 #define SYS_TFSR_EL1_TF1_SHIFT 1
 #define SYS_TFSR_EL1_TF0       (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
-#define SYS_TFSR_EL1_TF1       (UK(2) << SYS_TFSR_EL1_TF1_SHIFT)
+#define SYS_TFSR_EL1_TF1       (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
 
 /* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
 #define SYS_MPIDR_SAFE_VAL     (BIT(31))
index 43d4c32..70e0a75 100644 (file)
 #include <asm/mmu.h>
 #include <asm/sysreg.h>
 
-static void notrace el1_abort(struct pt_regs *regs, unsigned long esr)
+/*
+ * This is intended to match the logic in irqentry_enter(), handling the kernel
+ * mode transitions only.
+ */
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+       regs->exit_rcu = false;
+
+       if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+               lockdep_hardirqs_off(CALLER_ADDR0);
+               rcu_irq_enter();
+               trace_hardirqs_off_finish();
+
+               regs->exit_rcu = true;
+               return;
+       }
+
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       rcu_irq_enter_check_tick();
+       trace_hardirqs_off_finish();
+}
+
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+       lockdep_assert_irqs_disabled();
+
+       if (interrupts_enabled(regs)) {
+               if (regs->exit_rcu) {
+                       trace_hardirqs_on_prepare();
+                       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+                       rcu_irq_exit();
+                       lockdep_hardirqs_on(CALLER_ADDR0);
+                       return;
+               }
+
+               trace_hardirqs_on();
+       } else {
+               if (regs->exit_rcu)
+                       rcu_irq_exit();
+       }
+}
+
+void noinstr arm64_enter_nmi(struct pt_regs *regs)
+{
+       regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+       __nmi_enter();
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       lockdep_hardirq_enter();
+       rcu_nmi_enter();
+
+       trace_hardirqs_off_finish();
+       ftrace_nmi_enter();
+}
+
+void noinstr arm64_exit_nmi(struct pt_regs *regs)
+{
+       bool restore = regs->lockdep_hardirqs;
+
+       ftrace_nmi_exit();
+       if (restore) {
+               trace_hardirqs_on_prepare();
+               lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+       }
+
+       rcu_nmi_exit();
+       lockdep_hardirq_exit();
+       if (restore)
+               lockdep_hardirqs_on(CALLER_ADDR0);
+       __nmi_exit();
+}
+
+asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+{
+       if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+               arm64_enter_nmi(regs);
+       else
+               enter_from_kernel_mode(regs);
+}
+
+asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+{
+       if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+               arm64_exit_nmi(regs);
+       else
+               exit_to_kernel_mode(regs);
+}
+
+static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
+       enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        far = untagged_addr(far);
        do_mem_abort(far, esr, regs);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
 }
-NOKPROBE_SYMBOL(el1_abort);
 
-static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
+       enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        do_sp_pc_abort(far, esr, regs);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
 }
-NOKPROBE_SYMBOL(el1_pc);
 
-static void notrace el1_undef(struct pt_regs *regs)
+static void noinstr el1_undef(struct pt_regs *regs)
 {
+       enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        do_undefinstr(regs);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
 }
-NOKPROBE_SYMBOL(el1_undef);
 
-static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
 {
+       enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        bad_mode(regs, 0, esr);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
 }
-NOKPROBE_SYMBOL(el1_inv);
 
-static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
+{
+       regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
+
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       rcu_nmi_enter();
+
+       trace_hardirqs_off_finish();
+}
+
+static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
+{
+       bool restore = regs->lockdep_hardirqs;
+
+       if (restore) {
+               trace_hardirqs_on_prepare();
+               lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+       }
+
+       rcu_nmi_exit();
+       if (restore)
+               lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
@@ -62,18 +186,21 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
+       arm64_enter_el1_dbg(regs);
        do_debug_exception(far, esr, regs);
+       arm64_exit_el1_dbg(regs);
 }
-NOKPROBE_SYMBOL(el1_dbg);
 
-static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
 {
+       enter_from_kernel_mode(regs);
        local_daif_inherit(regs);
        do_ptrauth_fault(regs, esr);
+       local_daif_mask();
+       exit_to_kernel_mode(regs);
 }
-NOKPROBE_SYMBOL(el1_fpac);
 
-asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -106,20 +233,34 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
                el1_inv(regs, esr);
        }
 }
-NOKPROBE_SYMBOL(el1_sync_handler);
 
-static void notrace el0_da(struct pt_regs *regs, unsigned long esr)
+asmlinkage void noinstr enter_from_user_mode(void)
+{
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       CT_WARN_ON(ct_state() != CONTEXT_USER);
+       user_exit_irqoff();
+       trace_hardirqs_off_finish();
+}
+
+asmlinkage void noinstr exit_to_user_mode(void)
+{
+       trace_hardirqs_on_prepare();
+       lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+       user_enter_irqoff();
+       lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        far = untagged_addr(far);
        do_mem_abort(far, esr, regs);
 }
-NOKPROBE_SYMBOL(el0_da);
 
-static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
@@ -131,90 +272,80 @@ static void notrace el0_ia(struct pt_regs *regs, unsigned long esr)
        if (!is_ttbr0_addr(far))
                arm64_apply_bp_hardening();
 
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_mem_abort(far, esr, regs);
 }
-NOKPROBE_SYMBOL(el0_ia);
 
-static void notrace el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_fpsimd_acc(esr, regs);
 }
-NOKPROBE_SYMBOL(el0_fpsimd_acc);
 
-static void notrace el0_sve_acc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_sve_acc(esr, regs);
 }
-NOKPROBE_SYMBOL(el0_sve_acc);
 
-static void notrace el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_fpsimd_exc(esr, regs);
 }
-NOKPROBE_SYMBOL(el0_fpsimd_exc);
 
-static void notrace el0_sys(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_sysinstr(esr, regs);
 }
-NOKPROBE_SYMBOL(el0_sys);
 
-static void notrace el0_pc(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
 {
        unsigned long far = read_sysreg(far_el1);
 
        if (!is_ttbr0_addr(instruction_pointer(regs)))
                arm64_apply_bp_hardening();
 
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_sp_pc_abort(far, esr, regs);
 }
-NOKPROBE_SYMBOL(el0_pc);
 
-static void notrace el0_sp(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_sp_pc_abort(regs->sp, esr, regs);
 }
-NOKPROBE_SYMBOL(el0_sp);
 
-static void notrace el0_undef(struct pt_regs *regs)
+static void noinstr el0_undef(struct pt_regs *regs)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_undefinstr(regs);
 }
-NOKPROBE_SYMBOL(el0_undef);
 
-static void notrace el0_bti(struct pt_regs *regs)
+static void noinstr el0_bti(struct pt_regs *regs)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_bti(regs);
 }
-NOKPROBE_SYMBOL(el0_bti);
 
-static void notrace el0_inv(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        bad_el0_sync(regs, 0, esr);
 }
-NOKPROBE_SYMBOL(el0_inv);
 
-static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
 {
        /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
        unsigned long far = read_sysreg(far_el1);
@@ -222,30 +353,28 @@ static void notrace el0_dbg(struct pt_regs *regs, unsigned long esr)
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-       user_exit_irqoff();
+       enter_from_user_mode();
        do_debug_exception(far, esr, regs);
        local_daif_restore(DAIF_PROCCTX_NOIRQ);
 }
-NOKPROBE_SYMBOL(el0_dbg);
 
-static void notrace el0_svc(struct pt_regs *regs)
+static void noinstr el0_svc(struct pt_regs *regs)
 {
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
+       enter_from_user_mode();
        do_el0_svc(regs);
 }
-NOKPROBE_SYMBOL(el0_svc);
 
-static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_ptrauth_fault(regs, esr);
 }
-NOKPROBE_SYMBOL(el0_fpac);
 
-asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -297,27 +426,25 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
                el0_inv(regs, esr);
        }
 }
-NOKPROBE_SYMBOL(el0_sync_handler);
 
 #ifdef CONFIG_COMPAT
-static void notrace el0_cp15(struct pt_regs *regs, unsigned long esr)
+static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
 {
-       user_exit_irqoff();
+       enter_from_user_mode();
        local_daif_restore(DAIF_PROCCTX);
        do_cp15instr(esr, regs);
 }
-NOKPROBE_SYMBOL(el0_cp15);
 
-static void notrace el0_svc_compat(struct pt_regs *regs)
+static void noinstr el0_svc_compat(struct pt_regs *regs)
 {
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
+       enter_from_user_mode();
        do_el0_svc_compat(regs);
 }
-NOKPROBE_SYMBOL(el0_svc_compat);
 
-asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
 {
        unsigned long esr = read_sysreg(esr_el1);
 
@@ -360,5 +487,4 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
                el0_inv(regs, esr);
        }
 }
-NOKPROBE_SYMBOL(el0_sync_compat_handler);
 #endif /* CONFIG_COMPAT */
index b295fb9..d72c818 100644 (file)
 #include <asm/unistd.h>
 
 /*
- * Context tracking subsystem.  Used to instrument transitions
- * between user and kernel mode.
+ * Context tracking and irqflag tracing need to instrument transitions between
+ * user and kernel mode.
  */
-       .macro ct_user_exit_irqoff
-#ifdef CONFIG_CONTEXT_TRACKING
+       .macro user_exit_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
        bl      enter_from_user_mode
 #endif
        .endm
 
-       .macro ct_user_enter
-#ifdef CONFIG_CONTEXT_TRACKING
-       bl      context_tracking_user_enter
+       .macro user_enter_irqoff
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+       bl      exit_to_user_mode
 #endif
        .endm
 
@@ -298,9 +298,6 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 alternative_else_nop_endif
 
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
-       .if     \el == 0
-       ct_user_enter
-       .endif
 
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 alternative_if_not ARM64_HAS_PAN
@@ -637,16 +634,8 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
        gic_prio_irq_setup pmr=x20, tmp=x1
        enable_da_f
 
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-       test_irqs_unmasked      res=x0, pmr=x20
-       cbz     x0, 1f
-       bl      asm_nmi_enter
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
+       mov     x0, sp
+       bl      enter_el1_irq_or_nmi
 
        irq_handler
 
@@ -665,26 +654,8 @@ alternative_else_nop_endif
 1:
 #endif
 
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-       /*
-        * When using IRQ priority masking, we can get spurious interrupts while
-        * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
-        * section with interrupts disabled. Skip tracing in those cases.
-        */
-       test_irqs_unmasked      res=x0, pmr=x20
-       cbz     x0, 1f
-       bl      asm_nmi_exit
-1:
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-#ifdef CONFIG_ARM64_PSEUDO_NMI
-       test_irqs_unmasked      res=x0, pmr=x20
-       cbnz    x0, 1f
-#endif
-       bl      trace_hardirqs_on
-1:
-#endif
+       mov     x0, sp
+       bl      exit_el1_irq_or_nmi
 
        kernel_exit 1
 SYM_CODE_END(el1_irq)
@@ -726,21 +697,14 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
        kernel_entry 0
 el0_irq_naked:
        gic_prio_irq_setup pmr=x20, tmp=x0
-       ct_user_exit_irqoff
+       user_exit_irqoff
        enable_da_f
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_off
-#endif
-
        tbz     x22, #55, 1f
        bl      do_el0_irq_bp_hardening
 1:
        irq_handler
 
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_on
-#endif
        b       ret_to_user
 SYM_CODE_END(el0_irq)
 
@@ -759,7 +723,7 @@ SYM_CODE_START_LOCAL(el0_error)
 el0_error_naked:
        mrs     x25, esr_el1
        gic_prio_kentry_setup tmp=x2
-       ct_user_exit_irqoff
+       user_exit_irqoff
        enable_dbg
        mov     x0, sp
        mov     x1, x25
@@ -774,13 +738,17 @@ SYM_CODE_END(el0_error)
 SYM_CODE_START_LOCAL(ret_to_user)
        disable_daif
        gic_prio_kentry_setup tmp=x3
-       ldr     x1, [tsk, #TSK_TI_FLAGS]
-       and     x2, x1, #_TIF_WORK_MASK
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bl      trace_hardirqs_off
+#endif
+       ldr     x19, [tsk, #TSK_TI_FLAGS]
+       and     x2, x19, #_TIF_WORK_MASK
        cbnz    x2, work_pending
 finish_ret_to_user:
+       user_enter_irqoff
        /* Ignore asynchronous tag check faults in the uaccess routines */
        clear_mte_async_tcf
-       enable_step_tsk x1, x2
+       enable_step_tsk x19, x2
 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
        bl      stackleak_erase
 #endif
@@ -791,11 +759,9 @@ finish_ret_to_user:
  */
 work_pending:
        mov     x0, sp                          // 'regs'
+       mov     x1, x19
        bl      do_notify_resume
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bl      trace_hardirqs_on               // enabled while in userspace
-#endif
-       ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for single-step
+       ldr     x19, [tsk, #TSK_TI_FLAGS]       // re-check for single-step
        b       finish_ret_to_user
 SYM_CODE_END(ret_to_user)
 
index 9cf2fb8..60456a6 100644 (file)
@@ -67,18 +67,3 @@ void __init init_IRQ(void)
                local_daif_restore(DAIF_PROCCTX_NOIRQ);
        }
 }
-
-/*
- * Stubs to make nmi_enter/exit() code callable from ASM
- */
-asmlinkage void notrace asm_nmi_enter(void)
-{
-       nmi_enter();
-}
-NOKPROBE_SYMBOL(asm_nmi_enter);
-
-asmlinkage void notrace asm_nmi_exit(void)
-{
-       nmi_exit();
-}
-NOKPROBE_SYMBOL(asm_nmi_exit);
index a47a40e..ed919f6 100644 (file)
@@ -72,13 +72,13 @@ EXPORT_SYMBOL_GPL(pm_power_off);
 
 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 
-static void __cpu_do_idle(void)
+static void noinstr __cpu_do_idle(void)
 {
        dsb(sy);
        wfi();
 }
 
-static void __cpu_do_idle_irqprio(void)
+static void noinstr __cpu_do_idle_irqprio(void)
 {
        unsigned long pmr;
        unsigned long daif_bits;
@@ -108,7 +108,7 @@ static void __cpu_do_idle_irqprio(void)
  *     ensure that interrupts are not masked at the PMR (because the core will
  *     not wake up if we block the wake up signal in the interrupt controller).
  */
-void cpu_do_idle(void)
+void noinstr cpu_do_idle(void)
 {
        if (system_uses_irq_prio_masking())
                __cpu_do_idle_irqprio();
@@ -119,14 +119,14 @@ void cpu_do_idle(void)
 /*
  * This is our default idle handler.
  */
-void arch_cpu_idle(void)
+void noinstr arch_cpu_idle(void)
 {
        /*
         * This should do all the clock switching and wait for interrupt
         * tricks
         */
        cpu_do_idle();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 7689f20..793c46d 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/alternative.h>
+#include <asm/exception.h>
 #include <asm/kprobes.h>
 #include <asm/mmu.h>
 #include <asm/ptrace.h>
@@ -223,16 +224,16 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
 }
 
 
-asmlinkage __kprobes notrace unsigned long
+asmlinkage noinstr unsigned long
 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
 {
        unsigned long ret;
 
-       nmi_enter();
+       arm64_enter_nmi(regs);
 
        ret = _sdei_handler(regs, arg);
 
-       nmi_exit();
+       arm64_exit_nmi(regs);
 
        return ret;
 }
index e4c0dad..f8f758e 100644 (file)
@@ -121,7 +121,6 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
 
        cortex_a76_erratum_1463225_svc_handler();
        local_daif_restore(DAIF_PROCCTX);
-       user_exit();
 
        if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
                /*
index 8af4e0e..2059d8f 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/exception.h>
 #include <asm/extable.h>
 #include <asm/insn.h>
 #include <asm/kprobes.h>
@@ -753,8 +754,10 @@ const char *esr_get_class_string(u32 esr)
  * bad_mode handles the impossible case in the exception vector. This is always
  * fatal.
  */
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
+asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
+       arm64_enter_nmi(regs);
+
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
@@ -786,7 +789,7 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
        __aligned(16);
 
-asmlinkage void handle_bad_stack(struct pt_regs *regs)
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
 {
        unsigned long tsk_stk = (unsigned long)current->stack;
        unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
@@ -794,6 +797,8 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
        unsigned int esr = read_sysreg(esr_el1);
        unsigned long far = read_sysreg(far_el1);
 
+       arm64_enter_nmi(regs);
+
        console_verbose();
        pr_emerg("Insufficient stack space to handle exception!");
 
@@ -865,23 +870,16 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
        }
 }
 
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
 {
-       nmi_enter();
+       arm64_enter_nmi(regs);
 
        /* non-RAS errors are not containable */
        if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
                arm64_serror_panic(regs, esr);
 
-       nmi_exit();
-}
-
-asmlinkage void enter_from_user_mode(void)
-{
-       CT_WARN_ON(ct_state() != CONTEXT_USER);
-       user_exit_irqoff();
+       arm64_exit_nmi(regs);
 }
-NOKPROBE_SYMBOL(enter_from_user_mode);
 
 /* GENERIC_BUG traps */
 
index 1ee9400..795d224 100644 (file)
@@ -789,25 +789,6 @@ void __init hook_debug_fault_code(int nr,
  */
 static void debug_exception_enter(struct pt_regs *regs)
 {
-       /*
-        * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
-        * already disabled to preserve the last enabled/disabled addresses.
-        */
-       if (interrupts_enabled(regs))
-               trace_hardirqs_off();
-
-       if (user_mode(regs)) {
-               RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-       } else {
-               /*
-                * We might have interrupted pretty much anything.  In
-                * fact, if we're a debug exception, we can even interrupt
-                * NMI processing. We don't want this code makes in_nmi()
-                * to return true, but we need to notify RCU.
-                */
-               rcu_nmi_enter();
-       }
-
        preempt_disable();
 
        /* This code is a bit fragile.  Test it. */
@@ -818,12 +799,6 @@ NOKPROBE_SYMBOL(debug_exception_enter);
 static void debug_exception_exit(struct pt_regs *regs)
 {
        preempt_enable_no_resched();
-
-       if (!user_mode(regs))
-               rcu_nmi_exit();
-
-       if (interrupts_enabled(regs))
-               trace_hardirqs_on();
 }
 NOKPROBE_SYMBOL(debug_exception_exit);
 
index f730869..69af6bc 100644 (file)
@@ -102,6 +102,6 @@ void arch_cpu_idle(void)
 #ifdef CONFIG_CPU_PM_STOP
        asm volatile("stop\n");
 #endif
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 #endif
index aea0a40..bc1364d 100644 (file)
@@ -57,7 +57,7 @@ asmlinkage void ret_from_kernel_thread(void);
  */
 void arch_cpu_idle(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
        __asm__("sleep");
 }
 
index 5a0a95d..67767c5 100644 (file)
@@ -44,7 +44,7 @@ void arch_cpu_idle(void)
 {
        __vmwait();
        /*  interrupts wake us up, but irqs are still disabled */
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /*
index 6b61a70..c9ff879 100644 (file)
@@ -239,7 +239,7 @@ void arch_cpu_idle(void)
        if (mark_idle)
                (*mark_idle)(1);
 
-       safe_halt();
+       raw_safe_halt();
 
        if (mark_idle)
                (*mark_idle)(0);
index a9e46e5..f998607 100644 (file)
@@ -149,5 +149,5 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
 
 void arch_cpu_idle(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
 }
index 5bc3b04..18e69eb 100644 (file)
@@ -33,19 +33,19 @@ static void __cpuidle r3081_wait(void)
 {
        unsigned long cfg = read_c0_conf();
        write_c0_conf(cfg | R30XX_CONF_HALT);
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 static void __cpuidle r39xx_wait(void)
 {
        if (!need_resched())
                write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 void __cpuidle r4k_wait(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
        __r4k_wait();
 }
 
@@ -64,7 +64,7 @@ void __cpuidle r4k_wait_irqoff(void)
                "       .set    arch=r4000      \n"
                "       wait                    \n"
                "       .set    pop             \n");
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /*
@@ -84,7 +84,7 @@ static void __cpuidle rm7k_wait_irqoff(void)
                "       wait                                            \n"
                "       mtc0    $1, $12         # stalls until W stage  \n"
                "       .set    pop                                     \n");
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /*
@@ -257,7 +257,7 @@ void arch_cpu_idle(void)
        if (cpu_wait)
                cpu_wait();
        else
-               local_irq_enable();
+               raw_local_irq_enable();
 }
 
 #ifdef CONFIG_CPU_IDLE
index 4ffe857..50b4eb1 100644 (file)
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(pm_power_off);
 
 void arch_cpu_idle(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /*
index 0ff391f..3c98728 100644 (file)
@@ -79,7 +79,7 @@ void machine_power_off(void)
  */
 void arch_cpu_idle(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
        if (mfspr(SPR_UPR) & SPR_UPR_PMP)
                mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
 }
index f196d96..a92a23d 100644 (file)
@@ -169,7 +169,7 @@ void __cpuidle arch_cpu_idle_dead(void)
 
 void __cpuidle arch_cpu_idle(void)
 {
-       local_irq_enable();
+       raw_local_irq_enable();
 
        /* nop on real hardware, qemu will idle sleep. */
        asm volatile("or %%r10,%%r10,%%r10\n":::);
index ae0e263..1f83553 100644 (file)
@@ -52,9 +52,9 @@ void arch_cpu_idle(void)
                 * interrupts enabled, some don't.
                 */
                if (irqs_disabled())
-                       local_irq_enable();
+                       raw_local_irq_enable();
        } else {
-               local_irq_enable();
+               raw_local_irq_enable();
                /*
                 * Go into low thread priority and possibly
                 * low power mode.
index 82a5693..134388c 100644 (file)
@@ -4,6 +4,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/barrier.h>
+
 static inline void cpu_relax(void)
 {
 #ifdef __riscv_muldiv
index 19225ec..dd5f985 100644 (file)
@@ -36,7 +36,7 @@ extern asmlinkage void ret_from_kernel_thread(void);
 void arch_cpu_idle(void)
 {
        wait_for_interrupt();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 void show_regs(struct pt_regs *regs)
index c424cc6..117f321 100644 (file)
@@ -75,6 +75,7 @@ void __init setup_arch(char **cmdline_p)
        *cmdline_p = boot_command_line;
 
        early_ioremap_setup();
+       jump_label_init();
        parse_early_param();
 
        efi_init();
index cb8f9e4..0cfd6da 100644 (file)
@@ -44,7 +44,7 @@ SYSCFLAGS_vdso.so.dbg = $(c_flags)
 $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
        $(call if_changed,vdsold)
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-       -Wl,--build-id -Wl,--hash-style=both
+       -Wl,--build-id=sha1 -Wl,--hash-style=both
 
 # We also create a special relocatable object that should mirror the symbol
 # table and layout of the linked DSO. With ld --just-symbols we can then
index 26bb060..92beb14 100644 (file)
@@ -763,12 +763,7 @@ ENTRY(io_int_handler)
        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
        TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
        jo      .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
-       tmhh    %r8,0x300
-       jz      1f
        TRACE_IRQS_OFF
-1:
-#endif
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 .Lio_loop:
        lgr     %r2,%r11                # pass pointer to pt_regs
@@ -791,12 +786,7 @@ ENTRY(io_int_handler)
        TSTMSK  __LC_CPU_FLAGS,_CIF_WORK
        jnz     .Lio_work
 .Lio_restore:
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
-       tm      __PT_PSW(%r11),3
-       jno     0f
        TRACE_IRQS_ON
-0:
-#endif
        mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
        tm      __PT_PSW+1(%r11),0x01   # returning to user ?
        jno     .Lio_exit_kernel
@@ -976,12 +966,7 @@ ENTRY(ext_int_handler)
        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
        TSTMSK  __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
        jo      .Lio_restore
-#if IS_ENABLED(CONFIG_TRACE_IRQFLAGS)
-       tmhh    %r8,0x300
-       jz      1f
        TRACE_IRQS_OFF
-1:
-#endif
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        lgr     %r2,%r11                # pass pointer to pt_regs
        lghi    %r3,EXT_INTERRUPT
index f7f1e64..2b85096 100644 (file)
@@ -33,10 +33,10 @@ void enabled_wait(void)
                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        clear_cpu_flag(CIF_NOHZ_DELAY);
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        /* Call the assembler magic in entry.S */
        psw_idle(idle, psw_mask);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        /* Account time spent with enabled wait psw loaded as idle time. */
        raw_write_seqcount_begin(&idle->seqcount);
@@ -123,7 +123,7 @@ void arch_cpu_idle_enter(void)
 void arch_cpu_idle(void)
 {
        enabled_wait();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 void arch_cpu_idle_exit(void)
index daca7ba..8c0c68e 100644 (file)
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(__delay);
 
 static void __udelay_disabled(unsigned long long usecs)
 {
-       unsigned long cr0, cr0_new, psw_mask, flags;
+       unsigned long cr0, cr0_new, psw_mask;
        struct s390_idle_data idle;
        u64 end;
 
@@ -45,9 +45,8 @@ static void __udelay_disabled(unsigned long long usecs)
        psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT;
        set_clock_comparator(end);
        set_cpu_flag(CIF_IGNORE_IRQ);
-       local_irq_save(flags);
        psw_idle(&idle, psw_mask);
-       local_irq_restore(flags);
+       trace_hardirqs_off();
        clear_cpu_flag(CIF_IGNORE_IRQ);
        set_clock_comparator(S390_lowcore.clock_comparator);
        __ctl_load(cr0, 0, 0);
index 743f257..75217fb 100644 (file)
@@ -103,9 +103,10 @@ static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *de
 {
        struct msi_desc *entry = irq_get_msi_desc(data->irq);
        struct msi_msg msg = entry->msg;
+       int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
 
        msg.address_lo &= 0xff0000ff;
-       msg.address_lo |= (cpumask_first(dest) << 8);
+       msg.address_lo |= (cpu_addr << 8);
        pci_write_msi_msg(data->irq, &msg);
 
        return IRQ_SET_MASK_OK;
@@ -238,6 +239,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        unsigned long bit;
        struct msi_desc *msi;
        struct msi_msg msg;
+       int cpu_addr;
        int rc, irq;
 
        zdev->aisb = -1UL;
@@ -287,9 +289,15 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
                                         handle_percpu_irq);
                msg.data = hwirq - bit;
                if (irq_delivery == DIRECTED) {
+                       if (msi->affinity)
+                               cpu = cpumask_first(&msi->affinity->mask);
+                       else
+                               cpu = 0;
+                       cpu_addr = smp_cpu_get_cpu_address(cpu);
+
                        msg.address_lo = zdev->msi_addr & 0xff0000ff;
-                       msg.address_lo |= msi->affinity ?
-                               (cpumask_first(&msi->affinity->mask) << 8) : 0;
+                       msg.address_lo |= (cpu_addr << 8);
+
                        for_each_possible_cpu(cpu) {
                                airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
                        }
index 0dc0f52..f598149 100644 (file)
@@ -22,7 +22,7 @@ static void (*sh_idle)(void);
 void default_idle(void)
 {
        set_bl_bit();
-       local_irq_enable();
+       raw_local_irq_enable();
        /* Isn't this racy ? */
        cpu_sleep();
        clear_bl_bit();
index 065e2d4..396f46b 100644 (file)
@@ -50,7 +50,7 @@ static void pmc_leon_idle_fixup(void)
        register unsigned int address = (unsigned int)leon3_irqctrl_regs;
 
        /* Interrupts need to be enabled to not hang the CPU */
-       local_irq_enable();
+       raw_local_irq_enable();
 
        __asm__ __volatile__ (
                "wr     %%g0, %%asr19\n"
@@ -66,7 +66,7 @@ static void pmc_leon_idle_fixup(void)
 static void pmc_leon_idle(void)
 {
        /* Interrupts need to be enabled to not hang the CPU */
-       local_irq_enable();
+       raw_local_irq_enable();
 
        /* For systems without power-down, this will be no-op */
        __asm__ __volatile__ ("wr       %g0, %asr19\n\t");
index adfcaea..a023637 100644 (file)
@@ -74,7 +74,7 @@ void arch_cpu_idle(void)
 {
        if (sparc_idle)
                (*sparc_idle)();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
index a75093b..6f8c782 100644 (file)
@@ -62,11 +62,11 @@ void arch_cpu_idle(void)
 {
        if (tlb_type != hypervisor) {
                touch_nmi_watchdog();
-               local_irq_enable();
+               raw_local_irq_enable();
        } else {
                unsigned long pstate;
 
-               local_irq_enable();
+               raw_local_irq_enable();
 
                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
                  * the cpu sleep hypervisor call.
index 3bed095..9505a7e 100644 (file)
@@ -217,7 +217,7 @@ void arch_cpu_idle(void)
 {
        cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
        um_idle_sleep();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 int __cant_sleep(void) {
index e039a93..29dd27b 100644 (file)
@@ -88,8 +88,6 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
 
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
-       trace_hardirqs_on();
-
        mds_idle_clear_cpu_buffers();
        /* "mwait %eax, %ecx;" */
        asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
index 581fb72..d41b70f 100644 (file)
@@ -739,11 +739,13 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
        if (boot_cpu_has(X86_FEATURE_IBPB)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
 
+               spectre_v2_user_ibpb = mode;
                switch (cmd) {
                case SPECTRE_V2_USER_CMD_FORCE:
                case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
                case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
                        static_branch_enable(&switch_mm_always_ibpb);
+                       spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
                        break;
                case SPECTRE_V2_USER_CMD_PRCTL:
                case SPECTRE_V2_USER_CMD_AUTO:
@@ -757,8 +759,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
                pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
                        static_key_enabled(&switch_mm_always_ibpb) ?
                        "always-on" : "conditional");
-
-               spectre_v2_user_ibpb = mode;
        }
 
        /*
index 4102b86..32b7099 100644 (file)
@@ -1384,8 +1384,10 @@ noinstr void do_machine_check(struct pt_regs *regs)
         * When there's any problem use only local no_way_out state.
         */
        if (!lmce) {
-               if (mce_end(order) < 0)
-                       no_way_out = worst >= MCE_PANIC_SEVERITY;
+               if (mce_end(order) < 0) {
+                       if (!no_way_out)
+                               no_way_out = worst >= MCE_PANIC_SEVERITY;
+               }
        } else {
                /*
                 * If there was a fatal machine check we should have
index af323e2..6f4ca4b 100644 (file)
@@ -507,6 +507,24 @@ unlock:
        return ret ?: nbytes;
 }
 
+/**
+ * rdtgroup_remove - the helper to remove resource group safely
+ * @rdtgrp: resource group to remove
+ *
+ * On resource group creation via a mkdir, an extra kernfs_node reference is
+ * taken to ensure that the rdtgroup structure remains accessible for the
+ * rdtgroup_kn_unlock() calls where it is removed.
+ *
+ * Drop the extra reference here, then free the rdtgroup structure.
+ *
+ * Return: void
+ */
+static void rdtgroup_remove(struct rdtgroup *rdtgrp)
+{
+       kernfs_put(rdtgrp->kn);
+       kfree(rdtgrp);
+}
+
 struct task_move_callback {
        struct callback_head    work;
        struct rdtgroup         *rdtgrp;
@@ -529,7 +547,7 @@ static void move_myself(struct callback_head *head)
            (rdtgrp->flags & RDT_DELETED)) {
                current->closid = 0;
                current->rmid = 0;
-               kfree(rdtgrp);
+               rdtgroup_remove(rdtgrp);
        }
 
        if (unlikely(current->flags & PF_EXITING))
@@ -1769,7 +1787,6 @@ static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
        if (IS_ERR(kn_subdir))
                return PTR_ERR(kn_subdir);
 
-       kernfs_get(kn_subdir);
        ret = rdtgroup_kn_set_ugid(kn_subdir);
        if (ret)
                return ret;
@@ -1792,7 +1809,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
        kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
        if (IS_ERR(kn_info))
                return PTR_ERR(kn_info);
-       kernfs_get(kn_info);
 
        ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
        if (ret)
@@ -1813,12 +1829,6 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
                        goto out_destroy;
        }
 
-       /*
-        * This extra ref will be put in kernfs_remove() and guarantees
-        * that @rdtgrp->kn is always accessible.
-        */
-       kernfs_get(kn_info);
-
        ret = rdtgroup_kn_set_ugid(kn_info);
        if (ret)
                goto out_destroy;
@@ -1847,12 +1857,6 @@ mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
        if (dest_kn)
                *dest_kn = kn;
 
-       /*
-        * This extra ref will be put in kernfs_remove() and guarantees
-        * that @rdtgrp->kn is always accessible.
-        */
-       kernfs_get(kn);
-
        ret = rdtgroup_kn_set_ugid(kn);
        if (ret)
                goto out_destroy;
@@ -2079,8 +2083,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
                    rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
                        rdtgroup_pseudo_lock_remove(rdtgrp);
                kernfs_unbreak_active_protection(kn);
-               kernfs_put(rdtgrp->kn);
-               kfree(rdtgrp);
+               rdtgroup_remove(rdtgrp);
        } else {
                kernfs_unbreak_active_protection(kn);
        }
@@ -2139,13 +2142,11 @@ static int rdt_get_tree(struct fs_context *fc)
                                          &kn_mongrp);
                if (ret < 0)
                        goto out_info;
-               kernfs_get(kn_mongrp);
 
                ret = mkdir_mondata_all(rdtgroup_default.kn,
                                        &rdtgroup_default, &kn_mondata);
                if (ret < 0)
                        goto out_mongrp;
-               kernfs_get(kn_mondata);
                rdtgroup_default.mon.mon_data_kn = kn_mondata;
        }
 
@@ -2357,7 +2358,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
                if (atomic_read(&sentry->waitcount) != 0)
                        sentry->flags = RDT_DELETED;
                else
-                       kfree(sentry);
+                       rdtgroup_remove(sentry);
        }
 }
 
@@ -2399,7 +2400,7 @@ static void rmdir_all_sub(void)
                if (atomic_read(&rdtgrp->waitcount) != 0)
                        rdtgrp->flags = RDT_DELETED;
                else
-                       kfree(rdtgrp);
+                       rdtgroup_remove(rdtgrp);
        }
        /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
        update_closid_rmid(cpu_online_mask, &rdtgroup_default);
@@ -2499,11 +2500,6 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
        if (IS_ERR(kn))
                return PTR_ERR(kn);
 
-       /*
-        * This extra ref will be put in kernfs_remove() and guarantees
-        * that kn is always accessible.
-        */
-       kernfs_get(kn);
        ret = rdtgroup_kn_set_ugid(kn);
        if (ret)
                goto out_destroy;
@@ -2838,8 +2834,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
        /*
         * kernfs_remove() will drop the reference count on "kn" which
         * will free it. But we still need it to stick around for the
-        * rdtgroup_kn_unlock(kn} call below. Take one extra reference
-        * here, which will be dropped inside rdtgroup_kn_unlock().
+        * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
+        * which will be dropped by kernfs_put() in rdtgroup_remove().
         */
        kernfs_get(kn);
 
@@ -2880,6 +2876,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
 out_idfree:
        free_rmid(rdtgrp->mon.rmid);
 out_destroy:
+       kernfs_put(rdtgrp->kn);
        kernfs_remove(rdtgrp->kn);
 out_free_rgrp:
        kfree(rdtgrp);
@@ -2892,7 +2889,7 @@ static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
 {
        kernfs_remove(rgrp->kn);
        free_rmid(rgrp->mon.rmid);
-       kfree(rgrp);
+       rdtgroup_remove(rgrp);
 }
 
 /*
@@ -3049,11 +3046,6 @@ static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
        WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
        list_del(&rdtgrp->mon.crdtgrp_list);
 
-       /*
-        * one extra hold on this, will drop when we kfree(rdtgrp)
-        * in rdtgroup_kn_unlock()
-        */
-       kernfs_get(kn);
        kernfs_remove(rdtgrp->kn);
 
        return 0;
@@ -3065,11 +3057,6 @@ static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
        rdtgrp->flags = RDT_DELETED;
        list_del(&rdtgrp->rdtgroup_list);
 
-       /*
-        * one extra hold on this, will drop when we kfree(rdtgrp)
-        * in rdtgroup_kn_unlock()
-        */
-       kernfs_get(kn);
        kernfs_remove(rdtgrp->kn);
        return 0;
 }
index ba4593a..145a7ac 100644 (file)
@@ -685,7 +685,7 @@ void arch_cpu_idle(void)
  */
 void __cpuidle default_idle(void)
 {
-       safe_halt();
+       raw_safe_halt();
 }
 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
 EXPORT_SYMBOL(default_idle);
@@ -736,6 +736,8 @@ void stop_this_cpu(void *dummy)
 /*
  * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
  * states (local apic timer and TSC stop).
+ *
+ * XXX this function is completely buggered vs RCU and tracing.
  */
 static void amd_e400_idle(void)
 {
@@ -757,9 +759,9 @@ static void amd_e400_idle(void)
         * The switch back from broadcast mode needs to be called with
         * interrupts disabled.
         */
-       local_irq_disable();
+       raw_local_irq_disable();
        tick_broadcast_exit();
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /*
@@ -801,9 +803,9 @@ static __cpuidle void mwait_idle(void)
                if (!need_resched())
                        __sti_mwait(0, 0);
                else
-                       local_irq_enable();
+                       raw_local_irq_enable();
        } else {
-               local_irq_enable();
+               raw_local_irq_enable();
        }
        __current_clr_polling();
 }
index c0cd1b9..5762280 100644 (file)
@@ -145,6 +145,7 @@ obj-$(CONFIG_OF)            += of/
 obj-$(CONFIG_SSB)              += ssb/
 obj-$(CONFIG_BCMA)             += bcma/
 obj-$(CONFIG_VHOST_RING)       += vhost/
+obj-$(CONFIG_VHOST_IOTLB)      += vhost/
 obj-$(CONFIG_VHOST)            += vhost/
 obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_GREYBUS)          += greybus/
index 36ec1f7..d989549 100644 (file)
@@ -270,7 +270,7 @@ config EFI_DEV_PATH_PARSER
 
 config EFI_EARLYCON
        def_bool y
-       depends on SERIAL_EARLYCON && !ARM && !IA64
+       depends on EFI && SERIAL_EARLYCON && !ARM && !IA64
        select FONT_SUPPORT
        select ARCH_USE_MEMREMAP_PROT
 
index 5e5480a..6c6eec0 100644 (file)
@@ -390,10 +390,10 @@ static int __init efisubsys_init(void)
 
        if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
                                      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
-               efivar_ssdt_load();
                error = generic_ops_register();
                if (error)
                        goto err_put;
+               efivar_ssdt_load();
                platform_device_register_simple("efivars", 0, NULL, 0);
        }
 
index 01bace4..7ee7ffe 100644 (file)
@@ -126,26 +126,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
        struct cpuidle_state *state = &drv->states[index];
        unsigned long eax = flg2MWAIT(state->flags);
        unsigned long ecx = 1; /* break on interrupt flag */
-       bool tick;
-
-       if (!static_cpu_has(X86_FEATURE_ARAT)) {
-               /*
-                * Switch over to one-shot tick broadcast if the target C-state
-                * is deeper than C1.
-                */
-               if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
-                       tick = true;
-                       tick_broadcast_enter();
-               } else {
-                       tick = false;
-               }
-       }
 
        mwait_idle_with_hints(eax, ecx);
 
-       if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
-               tick_broadcast_exit();
-
        return index;
 }
 
@@ -1227,6 +1210,20 @@ static bool __init intel_idle_acpi_cst_extract(void)
        return false;
 }
 
+static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state)
+{
+       unsigned long eax = flg2MWAIT(state->flags);
+
+       if (boot_cpu_has(X86_FEATURE_ARAT))
+               return false;
+
+       /*
+        * Switch over to one-shot tick broadcast if the target C-state
+        * is deeper than C1.
+        */
+       return !!((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK);
+}
+
 static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
 {
        int cstate, limit = min_t(int, CPUIDLE_STATE_MAX, acpi_state_table.count);
@@ -1269,6 +1266,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
                if (disabled_states_mask & BIT(cstate))
                        state->flags |= CPUIDLE_FLAG_OFF;
 
+               if (intel_idle_state_needs_timer_stop(state))
+                       state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
                state->enter = intel_idle;
                state->enter_s2idle = intel_idle_s2idle;
        }
@@ -1507,6 +1507,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
                     !(cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_ALWAYS_ENABLE)))
                        drv->states[drv->state_count].flags |= CPUIDLE_FLAG_OFF;
 
+               if (intel_idle_state_needs_timer_stop(&drv->states[drv->state_count]))
+                       drv->states[drv->state_count].flags |= CPUIDLE_FLAG_TIMER_STOP;
+
                drv->state_count++;
        }
 
index 974a667..5ad519c 100644 (file)
@@ -1083,7 +1083,6 @@ static int of_count_icc_providers(struct device_node *np)
                        count++;
                count += of_count_icc_providers(child);
        }
-       of_node_put(np);
 
        return count;
 }
index 42c6c55..e8371d4 100644 (file)
@@ -182,7 +182,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8916_MASTER_SDCC_1, 8, -1, -1, MSM8916_PNOC_IN
 DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8916_MASTER_SDCC_2, 8, -1, -1, MSM8916_PNOC_INT_1);
 DEFINE_QNODE(mas_qdss_bam, MSM8916_MASTER_QDSS_BAM, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
 DEFINE_QNODE(mas_qdss_etr, MSM8916_MASTER_QDSS_ETR, 8, -1, -1, MSM8916_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, 20, -1, MSM8916_SNOC_QDSS_INT);
+DEFINE_QNODE(mas_snoc_cfg, MSM8916_MASTER_SNOC_CFG, 4, -1, -1, MSM8916_SNOC_QDSS_INT);
 DEFINE_QNODE(mas_spdm, MSM8916_MASTER_SPDM, 4, -1, -1, MSM8916_PNOC_MAS_0);
 DEFINE_QNODE(mas_tcu0, MSM8916_MASTER_TCU0, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
 DEFINE_QNODE(mas_tcu1, MSM8916_MASTER_TCU1, 8, -1, -1, MSM8916_SLAVE_EBI_CH0, MSM8916_BIMC_SNOC_MAS, MSM8916_SLAVE_AMPSS_L2);
@@ -208,14 +208,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC
 DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
 DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);
 DEFINE_QNODE(slv_apps_l2, MSM8916_SLAVE_AMPSS_L2, 8, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8916_SLAVE_APSS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_audio, MSM8916_SLAVE_LPASS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_bimc_cfg, MSM8916_SLAVE_BIMC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_blsp_1, MSM8916_SLAVE_BLSP_1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_boot_rom, MSM8916_SLAVE_BOOT_ROM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_camera_cfg, MSM8916_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8916_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8916_SLAVE_OCMEM_64, 8, -1, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, MSM8916_SLAVE_CLK_CTL, 4, -1, -1, 0);
 DEFINE_QNODE(slv_crypto_0_cfg, MSM8916_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_dehr_cfg, MSM8916_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -239,7 +239,7 @@ DEFINE_QNODE(slv_sdcc_2, MSM8916_SLAVE_SDCC_2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_security, MSM8916_SLAVE_SECURITY, 4, -1, -1, 0);
 DEFINE_QNODE(slv_snoc_cfg, MSM8916_SLAVE_SNOC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_spdm, MSM8916_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8916_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
 DEFINE_QNODE(slv_tcsr, MSM8916_SLAVE_TCSR, 4, -1, -1, 0);
 DEFINE_QNODE(slv_tlmm, MSM8916_SLAVE_TLMM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs, MSM8916_SLAVE_USB_HS, 4, -1, -1, 0);
@@ -249,7 +249,7 @@ DEFINE_QNODE(snoc_bimc_0_slv, MSM8916_SNOC_BIMC_0_SLV, 8, -1, 24, MSM8916_SLAVE_
 DEFINE_QNODE(snoc_bimc_1_mas, MSM8916_SNOC_BIMC_1_MAS, 16, -1, -1, MSM8916_SNOC_BIMC_1_SLV);
 DEFINE_QNODE(snoc_bimc_1_slv, MSM8916_SNOC_BIMC_1_SLV, 8, -1, -1, MSM8916_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_int_0, MSM8916_SNOC_INT_0, 8, 99, 130, MSM8916_SLAVE_QDSS_STM, MSM8916_SLAVE_IMEM, MSM8916_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, 100, 131, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8916_SNOC_INT_1, 8, -1, -1, MSM8916_SLAVE_APSS, MSM8916_SLAVE_CATS_128, MSM8916_SLAVE_OCMEM_64);
 DEFINE_QNODE(snoc_int_bimc, MSM8916_SNOC_INT_BIMC, 8, 101, 132, MSM8916_SNOC_BIMC_0_MAS);
 DEFINE_QNODE(snoc_pcnoc_mas, MSM8916_SNOC_PNOC_MAS, 8, -1, -1, MSM8916_SNOC_PNOC_SLV);
 DEFINE_QNODE(snoc_pcnoc_slv, MSM8916_SNOC_PNOC_SLV, 8, -1, -1, MSM8916_PNOC_INT_0);
index 3a313e1..da68ce3 100644 (file)
@@ -618,6 +618,8 @@ static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
 
        do_div(rate, src_qn->buswidth);
 
+       rate = min_t(u32, rate, INT_MAX);
+
        if (src_qn->rate == rate)
                return 0;
 
@@ -635,6 +637,14 @@ static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
        return 0;
 }
 
+static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
+{
+       *avg = 0;
+       *peak = 0;
+
+       return 0;
+}
+
 static int msm8974_icc_probe(struct platform_device *pdev)
 {
        const struct msm8974_icc_desc *desc;
@@ -688,6 +698,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
        provider->aggregate = icc_std_aggregate;
        provider->xlate = of_icc_xlate_onecell;
        provider->data = data;
+       provider->get_bw = msm8974_get_bw;
 
        ret = icc_provider_add(provider);
        if (ret) {
@@ -758,6 +769,7 @@ static struct platform_driver msm8974_noc_driver = {
        .driver = {
                .name = "qnoc-msm8974",
                .of_match_table = msm8974_noc_of_match,
+               .sync_state = icc_sync_state,
        },
 };
 module_platform_driver(msm8974_noc_driver);
index d4769a5..9820709 100644 (file)
@@ -157,8 +157,8 @@ struct qcom_icc_desc {
        }
 
 DEFINE_QNODE(mas_apps_proc, QCS404_MASTER_AMPSS_M0, 8, 0, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, 6, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
-DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, 8, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_oxili, QCS404_MASTER_GRAPHICS_3D, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
+DEFINE_QNODE(mas_mdp, QCS404_MASTER_MDP_PORT0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
 DEFINE_QNODE(mas_snoc_bimc_1, QCS404_SNOC_BIMC_1_MAS, 8, 76, -1, QCS404_SLAVE_EBI_CH0);
 DEFINE_QNODE(mas_tcu_0, QCS404_MASTER_TCU_0, 8, -1, -1, QCS404_SLAVE_EBI_CH0, QCS404_BIMC_SNOC_SLV);
 DEFINE_QNODE(mas_spdm, QCS404_MASTER_SPDM, 4, -1, -1, QCS404_PNOC_INT_3);
index 0fec319..4069c21 100644 (file)
@@ -42,7 +42,6 @@
 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING          (1ULL << 0)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375      (1ULL << 1)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144      (1ULL << 2)
-#define ITS_FLAGS_SAVE_SUSPEND_STATE           (1ULL << 3)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED     (1 << 1)
@@ -4741,9 +4740,6 @@ static int its_save_disable(void)
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
 
-               if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
-                       continue;
-
                base = its->base;
                its->ctlr_save = readl_relaxed(base + GITS_CTLR);
                err = its_force_quiescent(base);
@@ -4762,9 +4758,6 @@ err:
                list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
                        void __iomem *base;
 
-                       if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
-                               continue;
-
                        base = its->base;
                        writel_relaxed(its->ctlr_save, base + GITS_CTLR);
                }
@@ -4784,9 +4777,6 @@ static void its_restore_enable(void)
                void __iomem *base;
                int i;
 
-               if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
-                       continue;
-
                base = its->base;
 
                /*
@@ -4794,7 +4784,10 @@ static void its_restore_enable(void)
                 * don't restore it since writing to CBASER or BASER<n>
                 * registers is undefined according to the GIC v3 ITS
                 * Specification.
+                *
+                * Firmware resuming with the ITS enabled is terminally broken.
                 */
+               WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
                ret = its_force_quiescent(base);
                if (ret) {
                        pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
@@ -5074,9 +5067,6 @@ static int __init its_probe_one(struct resource *res,
                ctlr |= GITS_CTLR_ImDe;
        writel_relaxed(ctlr, its->base + GITS_CTLR);
 
-       if (GITS_TYPER_HCC(typer))
-               its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
-
        err = its_init_domain(handle, its);
        if (err)
                goto out_free_tables;
index 1d02762..abd011f 100644 (file)
@@ -136,7 +136,7 @@ static int exiu_domain_translate(struct irq_domain *domain,
                if (fwspec->param_count != 2)
                        return -EINVAL;
                *hwirq = fwspec->param[0];
-               *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+               *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
        }
        return 0;
 }
index 2519a34..7ea6b43 100644 (file)
@@ -5436,6 +5436,8 @@ static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
                params.num_memories = 33;
                params.derr = true;
                params.disable_clock_gating = true;
+               extract_info_from_fw = false;
+               break;
        default:
                return;
        }
index 0420f09..63f48b0 100644 (file)
@@ -1295,12 +1295,22 @@ int c_can_power_up(struct net_device *dev)
                                time_after(time_out, jiffies))
                cpu_relax();
 
-       if (time_after(jiffies, time_out))
-               return -ETIMEDOUT;
+       if (time_after(jiffies, time_out)) {
+               ret = -ETIMEDOUT;
+               goto err_out;
+       }
 
        ret = c_can_start(dev);
-       if (!ret)
-               c_can_irq_control(priv, true);
+       if (ret)
+               goto err_out;
+
+       c_can_irq_control(priv, true);
+
+       return 0;
+
+err_out:
+       c_can_reset_ram(priv, false);
+       c_can_pm_runtime_put_sync(priv);
 
        return ret;
 }
index 1bafa61..969cedb 100644 (file)
@@ -692,8 +692,10 @@ static int kvaser_pciefd_open(struct net_device *netdev)
                return err;
 
        err = kvaser_pciefd_bus_on(can);
-       if (err)
+       if (err) {
+               close_candev(netdev);
                return err;
+       }
 
        return 0;
 }
index 483a78d..a0fecc3 100644 (file)
@@ -479,18 +479,18 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        spi->bits_per_word = 32;
        ret = spi_setup(spi);
        if (ret)
-               goto out_clk;
+               goto out_m_can_class_free_dev;
 
        priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
                                        &spi->dev, &tcan4x5x_regmap);
        if (IS_ERR(priv->regmap)) {
                ret = PTR_ERR(priv->regmap);
-               goto out_clk;
+               goto out_m_can_class_free_dev;
        }
 
        ret = tcan4x5x_power_enable(priv->power, 1);
        if (ret)
-               goto out_clk;
+               goto out_m_can_class_free_dev;
 
        ret = tcan4x5x_get_gpios(mcan_class);
        if (ret)
@@ -509,11 +509,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
 
 out_power:
        tcan4x5x_power_enable(priv->power, 0);
-out_clk:
-       if (!IS_ERR(mcan_class->cclk)) {
-               clk_disable_unprepare(mcan_class->cclk);
-               clk_disable_unprepare(mcan_class->hclk);
-       }
  out_m_can_class_free_dev:
        m_can_class_free_dev(mcan_class->net);
        return ret;
index e603299..b6a7003 100644 (file)
@@ -473,7 +473,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                netdev_dbg(dev, "arbitration lost interrupt\n");
                alc = priv->read_reg(priv, SJA1000_ALC);
                priv->can.can_stats.arbitration_lost++;
-               stats->tx_errors++;
                cf->can_id |= CAN_ERR_LOSTARB;
                cf->data[0] = alc & 0x1f;
        }
index 098cc96..783b632 100644 (file)
@@ -604,7 +604,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
                netdev_dbg(dev, "arbitration lost interrupt\n");
                alc = readl(priv->base + SUN4I_REG_STA_ADDR);
                priv->can.can_stats.arbitration_lost++;
-               stats->tx_errors++;
                if (likely(skb)) {
                        cf->can_id |= CAN_ERR_LOSTARB;
                        cf->data[0] = (alc >> 8) & 0x1f;
index 7fb42f3..7b79528 100644 (file)
@@ -88,6 +88,7 @@ config BNX2
 config CNIC
        tristate "QLogic CNIC support"
        depends on PCI && (IPV6 || IPV6=n)
+       depends on MMU
        select BNX2
        select UIO
        help
index e18e9ce..1cc3c51 100644 (file)
@@ -3175,6 +3175,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
                          GFP_KERNEL | __GFP_COMP);
        if (!avail) {
                CH_ALERT(adapter, "free list queue 0 initialization failed\n");
+               ret = -ENOMEM;
                goto err;
        }
        if (avail < q->fl[0].size)
index 63aacc1..95ab871 100644 (file)
@@ -1206,6 +1206,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        sk_setup_caps(newsk, dst);
        ctx = tls_get_ctx(lsk);
        newsk->sk_destruct = ctx->sk_destruct;
+       newsk->sk_prot_creator = lsk->sk_prot_creator;
        csk->sk = newsk;
        csk->passive_reap_next = oreq;
        csk->tx_chan = cxgb4_port_chan(ndev);
index 62c8290..a4fb463 100644 (file)
@@ -391,6 +391,7 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen,
        csk->wr_unacked += DIV_ROUND_UP(len, 16);
        enqueue_wr(csk, skb);
        cxgb4_ofld_send(csk->egress_dev, skb);
+       skb = NULL;
 
        chtls_set_scmd(csk);
        /* Clear quiesce for Rx key */
index 947b3d2..33c71b5 100644 (file)
@@ -2158,6 +2158,15 @@ workaround:
        skb_copy_header(new_skb, skb);
        new_skb->dev = skb->dev;
 
+       /* Copy relevant timestamp info from the old skb to the new */
+       if (priv->tx_tstamp) {
+               skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
+               skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
+               skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
+       }
+
        /* We move the headroom when we align it so we have to reset the
         * network and transport header offsets relative to the new data
         * pointer. The checksum offload relies on these offsets.
@@ -2165,7 +2174,6 @@ workaround:
        skb_set_network_header(new_skb, skb_network_offset(skb));
        skb_set_transport_header(new_skb, skb_transport_offset(skb));
 
-       /* TODO: does timestamping need the result in the old skb? */
        dev_kfree_skb(skb);
        *s = new_skb;
 
index cdd1ff9..cf55c5e 100644 (file)
@@ -852,7 +852,7 @@ static void release_napi(struct ibmvnic_adapter *adapter)
 static int ibmvnic_login(struct net_device *netdev)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       unsigned long timeout = msecs_to_jiffies(30000);
+       unsigned long timeout = msecs_to_jiffies(20000);
        int retry_count = 0;
        int retries = 10;
        bool retry;
@@ -868,10 +868,8 @@ static int ibmvnic_login(struct net_device *netdev)
                adapter->init_done_rc = 0;
                reinit_completion(&adapter->init_done);
                rc = send_login(adapter);
-               if (rc) {
-                       netdev_warn(netdev, "Unable to login\n");
+               if (rc)
                        return rc;
-               }
 
                if (!wait_for_completion_timeout(&adapter->init_done,
                                                 timeout)) {
@@ -958,7 +956,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
 {
        struct net_device *netdev = adapter->netdev;
-       unsigned long timeout = msecs_to_jiffies(30000);
+       unsigned long timeout = msecs_to_jiffies(20000);
        union ibmvnic_crq crq;
        bool resend;
        int rc;
@@ -1939,7 +1937,7 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
        if (reset_state == VNIC_OPEN) {
                rc = __ibmvnic_close(netdev);
                if (rc)
-                       return rc;
+                       goto out;
        }
 
        release_resources(adapter);
@@ -1957,24 +1955,25 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
        }
 
        rc = ibmvnic_reset_init(adapter, true);
-       if (rc)
-               return IBMVNIC_INIT_FAILED;
+       if (rc) {
+               rc = IBMVNIC_INIT_FAILED;
+               goto out;
+       }
 
        /* If the adapter was in PROBE state prior to the reset,
         * exit here.
         */
        if (reset_state == VNIC_PROBED)
-               return 0;
+               goto out;
 
        rc = ibmvnic_login(netdev);
        if (rc) {
-               adapter->state = reset_state;
-               return rc;
+               goto out;
        }
 
        rc = init_resources(adapter);
        if (rc)
-               return rc;
+               goto out;
 
        ibmvnic_disable_irqs(adapter);
 
@@ -1984,8 +1983,10 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
                return 0;
 
        rc = __ibmvnic_open(netdev);
-       if (rc)
-               return IBMVNIC_OPEN_FAILED;
+       if (rc) {
+               rc = IBMVNIC_OPEN_FAILED;
+               goto out;
+       }
 
        /* refresh device's multicast list */
        ibmvnic_set_multi(netdev);
@@ -1994,7 +1995,10 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_schedule(&adapter->napi[i]);
 
-       return 0;
+out:
+       if (rc)
+               adapter->state = reset_state;
+       return rc;
 }
 
 /**
@@ -2097,7 +2101,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
                rc = ibmvnic_login(netdev);
                if (rc) {
-                       adapter->state = reset_state;
                        goto out;
                }
 
@@ -2165,6 +2168,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        rc = 0;
 
 out:
+       /* restore the adapter state if reset failed */
+       if (rc)
+               adapter->state = reset_state;
        rtnl_unlock();
 
        return rc;
@@ -2197,43 +2203,46 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
        if (rc) {
                netdev_err(adapter->netdev,
                           "Couldn't initialize crq. rc=%d\n", rc);
-               return rc;
+               goto out;
        }
 
        rc = ibmvnic_reset_init(adapter, false);
        if (rc)
-               return rc;
+               goto out;
 
        /* If the adapter was in PROBE state prior to the reset,
         * exit here.
         */
        if (reset_state == VNIC_PROBED)
-               return 0;
+               goto out;
 
        rc = ibmvnic_login(netdev);
-       if (rc) {
-               adapter->state = VNIC_PROBED;
-               return 0;
-       }
+       if (rc)
+               goto out;
 
        rc = init_resources(adapter);
        if (rc)
-               return rc;
+               goto out;
 
        ibmvnic_disable_irqs(adapter);
        adapter->state = VNIC_CLOSED;
 
        if (reset_state == VNIC_CLOSED)
-               return 0;
+               goto out;
 
        rc = __ibmvnic_open(netdev);
-       if (rc)
-               return IBMVNIC_OPEN_FAILED;
+       if (rc) {
+               rc = IBMVNIC_OPEN_FAILED;
+               goto out;
+       }
 
        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
        call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
-
-       return 0;
+out:
+       /* restore adapter state if reset failed */
+       if (rc)
+               adapter->state = reset_state;
+       return rc;
 }
 
 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
@@ -2255,17 +2264,6 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
        return rwi;
 }
 
-static void free_all_rwi(struct ibmvnic_adapter *adapter)
-{
-       struct ibmvnic_rwi *rwi;
-
-       rwi = get_next_rwi(adapter);
-       while (rwi) {
-               kfree(rwi);
-               rwi = get_next_rwi(adapter);
-       }
-}
-
 static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
@@ -2323,20 +2321,23 @@ static void __ibmvnic_reset(struct work_struct *work)
                                rc = do_hard_reset(adapter, rwi, reset_state);
                                rtnl_unlock();
                        }
+                       if (rc) {
+                               /* give backing device time to settle down */
+                               netdev_dbg(adapter->netdev,
+                                          "[S:%d] Hard reset failed, waiting 60 secs\n",
+                                          adapter->state);
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               schedule_timeout(60 * HZ);
+                       }
                } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
                                adapter->from_passive_init)) {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
-               if (rc == IBMVNIC_OPEN_FAILED) {
-                       if (list_empty(&adapter->rwi_list))
-                               adapter->state = VNIC_CLOSED;
-                       else
-                               adapter->state = reset_state;
-                       rc = 0;
-               } else if (rc && rc != IBMVNIC_INIT_FAILED &&
-                   !adapter->force_reset_recovery)
-                       break;
+               adapter->last_reset_time = jiffies;
+
+               if (rc)
+                       netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
 
                rwi = get_next_rwi(adapter);
 
@@ -2350,11 +2351,6 @@ static void __ibmvnic_reset(struct work_struct *work)
                complete(&adapter->reset_done);
        }
 
-       if (rc) {
-               netdev_dbg(adapter->netdev, "Reset failed\n");
-               free_all_rwi(adapter);
-       }
-
        clear_bit_unlock(0, &adapter->resetting);
 }
 
@@ -2442,7 +2438,13 @@ static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
                           "Adapter is resetting, skip timeout reset\n");
                return;
        }
-
+       /* No queuing up reset until at least 5 seconds (default watchdog val)
+        * after last reset
+        */
+       if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
+               netdev_dbg(dev, "Not yet time to tx timeout.\n");
+               return;
+       }
        ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
 }
 
@@ -2491,6 +2493,12 @@ restart_poll:
 
                if (!pending_scrq(adapter, rx_scrq))
                        break;
+               /* The queue entry at the current index is peeked at above
+                * to determine that there is a valid descriptor awaiting
+                * processing. We want to be sure that the current slot
+                * holds a valid descriptor before reading its contents.
+                */
+               dma_rmb();
                next = ibmvnic_next_scrq(adapter, rx_scrq);
                rx_buff =
                    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
@@ -2954,16 +2962,28 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
 {
        int rc;
 
+       if (!scrq) {
+               netdev_dbg(adapter->netdev,
+                          "Invalid scrq reset. irq (%d) or msgs (%p).\n",
+                          scrq->irq, scrq->msgs);
+               return -EINVAL;
+       }
+
        if (scrq->irq) {
                free_irq(scrq->irq, scrq);
                irq_dispose_mapping(scrq->irq);
                scrq->irq = 0;
        }
 
-       memset(scrq->msgs, 0, 4 * PAGE_SIZE);
-       atomic_set(&scrq->used, 0);
-       scrq->cur = 0;
-       scrq->ind_buf.index = 0;
+       if (scrq->msgs) {
+               memset(scrq->msgs, 0, 4 * PAGE_SIZE);
+               atomic_set(&scrq->used, 0);
+               scrq->cur = 0;
+               scrq->ind_buf.index = 0;
+       } else {
+               netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
+               return -EINVAL;
+       }
 
        rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
                           4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@ -3220,13 +3240,18 @@ restart_loop:
                int total_bytes = 0;
                int num_packets = 0;
 
+               /* The queue entry at the current index is peeked at above
+                * to determine that there is a valid descriptor awaiting
+                * processing. We want to be sure that the current slot
+                * holds a valid descriptor before reading its contents.
+                */
+               dma_rmb();
+
                next = ibmvnic_next_scrq(adapter, scrq);
                for (i = 0; i < next->tx_comp.num_comps; i++) {
-                       if (next->tx_comp.rcs[i]) {
+                       if (next->tx_comp.rcs[i])
                                dev_err(dev, "tx error %x\n",
                                        next->tx_comp.rcs[i]);
-                               continue;
-                       }
                        index = be32_to_cpu(next->tx_comp.correlators[i]);
                        if (index & IBMVNIC_TSO_POOL_MASK) {
                                tx_pool = &adapter->tso_pool[pool];
@@ -3618,6 +3643,11 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
        }
        spin_unlock_irqrestore(&scrq->lock, flags);
 
+       /* Ensure that the entire buffer descriptor has been
+        * loaded before reading its contents
+        */
+       dma_rmb();
+
        return entry;
 }
 
@@ -3807,15 +3837,16 @@ static int send_login(struct ibmvnic_adapter *adapter)
        struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
        struct ibmvnic_login_buffer *login_buffer;
        struct device *dev = &adapter->vdev->dev;
+       struct vnic_login_client_data *vlcd;
        dma_addr_t rsp_buffer_token;
        dma_addr_t buffer_token;
        size_t rsp_buffer_size;
        union ibmvnic_crq crq;
+       int client_data_len;
        size_t buffer_size;
        __be64 *tx_list_p;
        __be64 *rx_list_p;
-       int client_data_len;
-       struct vnic_login_client_data *vlcd;
+       int rc;
        int i;
 
        if (!adapter->tx_scrq || !adapter->rx_scrq) {
@@ -3919,16 +3950,25 @@ static int send_login(struct ibmvnic_adapter *adapter)
        crq.login.cmd = LOGIN;
        crq.login.ioba = cpu_to_be32(buffer_token);
        crq.login.len = cpu_to_be32(buffer_size);
-       ibmvnic_send_crq(adapter, &crq);
+
+       adapter->login_pending = true;
+       rc = ibmvnic_send_crq(adapter, &crq);
+       if (rc) {
+               adapter->login_pending = false;
+               netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
+               goto buf_rsp_map_failed;
+       }
 
        return 0;
 
 buf_rsp_map_failed:
        kfree(login_rsp_buffer);
+       adapter->login_rsp_buf = NULL;
 buf_rsp_alloc_failed:
        dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
 buf_map_failed:
        kfree(login_buffer);
+       adapter->login_buf = NULL;
 buf_alloc_failed:
        return -1;
 }
@@ -4471,6 +4511,15 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
        u64 *size_array;
        int i;
 
+       /* CHECK: Test/set of login_pending does not need to be atomic
+        * because only ibmvnic_tasklet tests/clears this.
+        */
+       if (!adapter->login_pending) {
+               netdev_warn(netdev, "Ignoring unexpected login response\n");
+               return 0;
+       }
+       adapter->login_pending = false;
+
        dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
                         DMA_TO_DEVICE);
        dma_unmap_single(dev, adapter->login_rsp_buf_token,
@@ -4500,7 +4549,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
             adapter->req_rx_add_queues !=
             be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
                dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
-               ibmvnic_remove(adapter->vdev);
+               ibmvnic_reset(adapter, VNIC_RESET_FATAL);
                return -EIO;
        }
        size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
@@ -4842,6 +4891,11 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                case IBMVNIC_CRQ_INIT:
                        dev_info(dev, "Partner initialized\n");
                        adapter->from_passive_init = true;
+                       /* Discard any stale login responses from prev reset.
+                        * CHECK: should we clear even on INIT_COMPLETE?
+                        */
+                       adapter->login_pending = false;
+
                        if (!completion_done(&adapter->init_done)) {
                                complete(&adapter->init_done);
                                adapter->init_done_rc = -EIO;
@@ -5179,7 +5233,7 @@ map_failed:
 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
 {
        struct device *dev = &adapter->vdev->dev;
-       unsigned long timeout = msecs_to_jiffies(30000);
+       unsigned long timeout = msecs_to_jiffies(20000);
        u64 old_num_rx_queues, old_num_tx_queues;
        int rc;
 
@@ -5274,6 +5328,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        dev_set_drvdata(&dev->dev, netdev);
        adapter->vdev = dev;
        adapter->netdev = netdev;
+       adapter->login_pending = false;
 
        ether_addr_copy(adapter->mac_addr, mac_addr_p);
        ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
@@ -5337,7 +5392,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        adapter->state = VNIC_PROBED;
 
        adapter->wait_for_reset = false;
-
+       adapter->last_reset_time = jiffies;
        return 0;
 
 ibmvnic_register_fail:
index 9d0c32a..c09c3f6 100644 (file)
@@ -1087,6 +1087,9 @@ struct ibmvnic_adapter {
        struct delayed_work ibmvnic_delayed_reset;
        unsigned long resetting;
        bool napi_enabled, from_passive_init;
+       bool login_pending;
+       /* last device reset time */
+       unsigned long last_reset_time;
 
        bool failover_pending;
        bool force_reset_recovery;
index 68d2e52..adf337d 100644 (file)
@@ -4434,6 +4434,7 @@ static int mvpp2_open(struct net_device *dev)
        if (!valid) {
                netdev_err(port->dev,
                           "invalid configuration: no dt or link IRQ");
+               err = -ENOENT;
                goto err_free_irq;
        }
 
index 97f1594..e51f60b 100644 (file)
@@ -44,6 +44,7 @@ static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock
                         outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
 {
        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
@@ -63,6 +64,7 @@ static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock
                            outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
               0xff, 16);
 }
+#endif
 
 void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
 {
index 6dd3ea3..d97203c 100644 (file)
@@ -161,7 +161,9 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 }
 
 static inline void
-mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+                           struct mlx5e_accel_tx_state *accel,
+                           struct mlx5_wqe_eth_seg *eseg)
 {
        if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
@@ -173,6 +175,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                        sq->stats->csum_partial++;
                }
+#ifdef CONFIG_MLX5_EN_TLS
+       } else if (unlikely(accel && accel->tls.tls_tisn)) {
+               eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+               sq->stats->csum_partial++;
+#endif
        } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
                ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
 
@@ -607,12 +614,13 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
 }
 
 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
-                                  struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+                                  struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
+                                  struct mlx5_wqe_eth_seg *eseg)
 {
        if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
                return false;
 
-       mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+       mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
 
        return true;
 }
@@ -639,7 +647,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
                if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
                        struct mlx5_wqe_eth_seg eseg = {};
 
-                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
+                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
                                return NETDEV_TX_OK;
 
                        mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -656,7 +664,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        /* May update the WQE, but may not post other WQEs. */
        mlx5e_accel_tx_finish(sq, wqe, &accel,
                              (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
-       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
+       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
                return NETDEV_TX_OK;
 
        mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
@@ -675,7 +683,7 @@ void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit
        mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
        pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
        wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-       mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
+       mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
        mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
 }
 
@@ -944,7 +952,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 
        mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
 
-       mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+       mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
 
        eseg->mss = attr.mss;
 
index 539baea..eb956ce 100644 (file)
@@ -422,6 +422,24 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
                      npages, ec_function, func_id);
 }
 
+static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
+                                    u32 npages)
+{
+       u32 pages_set = 0;
+       unsigned int n;
+
+       for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
+               MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
+                                fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
+               pages_set++;
+
+               if (!--npages)
+                       break;
+       }
+
+       return pages_set;
+}
+
 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
                             u32 *in, int in_size, u32 *out, int out_size)
 {
@@ -448,8 +466,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
                fwp = rb_entry(p, struct fw_page, rb_node);
                p = rb_next(p);
 
-               MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
-               i++;
+               i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
        }
 
        MLX5_SET(manage_pages_out, out, output_num_entries, i);
index ebc8790..ba65ec4 100644 (file)
@@ -92,6 +92,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
        caps->eswitch_manager   = MLX5_CAP_GEN(mdev, eswitch_manager);
        caps->gvmi              = MLX5_CAP_GEN(mdev, vhca_id);
        caps->flex_protocols    = MLX5_CAP_GEN(mdev, flex_parser_protocols);
+       caps->sw_format_ver     = MLX5_CAP_GEN(mdev, steering_format_version);
 
        if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
                caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
index 890767a..aa2c2d6 100644 (file)
@@ -223,6 +223,11 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
        if (ret)
                return ret;
 
+       if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
+               mlx5dr_err(dmn, "SW steering is not supported on this device\n");
+               return -EOPNOTSUPP;
+       }
+
        ret = dr_domain_query_fdb_caps(mdev, dmn);
        if (ret)
                return ret;
index 81453fe..51880df 100644 (file)
@@ -626,6 +626,7 @@ struct mlx5dr_cmd_caps {
        u8 max_ft_level;
        u16 roce_min_src_udp;
        u8 num_esw_ports;
+       u8 sw_format_ver;
        bool eswitch_manager;
        bool rx_sw_owner;
        bool tx_sw_owner;
index be66601..040a15a 100644 (file)
@@ -1078,16 +1078,20 @@ static int pasemi_mac_open(struct net_device *dev)
 
        mac->tx = pasemi_mac_setup_tx_resources(dev);
 
-       if (!mac->tx)
+       if (!mac->tx) {
+               ret = -ENOMEM;
                goto out_tx_ring;
+       }
 
        /* We might already have allocated rings in case mtu was changed
         * before interface was brought up.
         */
        if (dev->mtu > 1500 && !mac->num_cs) {
                pasemi_mac_setup_csrings(mac);
-               if (!mac->num_cs)
+               if (!mac->num_cs) {
+                       ret = -ENOMEM;
                        goto out_tx_ring;
+               }
        }
 
        /* Zero out rmon counters */
index 5523f06..627c333 100644 (file)
@@ -258,11 +258,21 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                skb_dst_set(skb, &tun_dst->dst);
 
        /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) {
-               geneve->dev->stats.rx_errors++;
-               goto drop;
-       }
+       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+               goto rx_error;
 
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
+               if (pskb_may_pull(skb, sizeof(struct iphdr)))
+                       goto rx_error;
+               break;
+       case htons(ETH_P_IPV6):
+               if (pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+                       goto rx_error;
+               break;
+       default:
+               goto rx_error;
+       }
        oiph = skb_network_header(skb);
        skb_reset_network_header(skb);
 
@@ -299,6 +309,8 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
                dev_sw_netstats_rx_add(geneve->dev, len);
 
        return;
+rx_error:
+       geneve->dev->stats.rx_errors++;
 drop:
        /* Consume bad packet */
        kfree_skb(skb);
index 236fcc5..e1e44d6 100644 (file)
@@ -3799,6 +3799,9 @@ static void vxlan_config_apply(struct net_device *dev,
                dev->gso_max_segs = lowerdev->gso_max_segs;
 
                needed_headroom = lowerdev->hard_header_len;
+               needed_headroom += lowerdev->needed_headroom;
+
+               dev->needed_tailroom = lowerdev->needed_tailroom;
 
                max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
                                           VXLAN_HEADROOM);
@@ -3878,8 +3881,10 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 
        if (dst->remote_ifindex) {
                remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
-               if (!remote_dev)
+               if (!remote_dev) {
+                       err = -ENODEV;
                        goto errout;
+               }
 
                err = netdev_upper_dev_link(remote_dev, dev, extack);
                if (err)
index ca4967b..580b07a 100644 (file)
@@ -491,8 +491,8 @@ struct iwl_cfg {
 #define IWL_CFG_RF_ID_HR               0x7
 #define IWL_CFG_RF_ID_HR1              0x4
 
-#define IWL_CFG_NO_160                 0x0
-#define IWL_CFG_160                    0x1
+#define IWL_CFG_NO_160                 0x1
+#define IWL_CFG_160                    0x0
 
 #define IWL_CFG_CORES_BT               0x0
 #define IWL_CFG_CORES_BT_GNSS          0x5
index 129021f..7b5ece3 100644 (file)
@@ -536,9 +536,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
        {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
        {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)},
        {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
        {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
        {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0)},
+       {IWL_PCI_DEVICE(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0)},
        {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
        {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
        {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
index 7d3f0a2..f1ae9ff 100644 (file)
@@ -1020,8 +1020,6 @@ void mt76u_stop_tx(struct mt76_dev *dev)
 {
        int ret;
 
-       mt76_worker_disable(&dev->tx_worker);
-
        ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
                                 HZ / 5);
        if (!ret) {
@@ -1040,6 +1038,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
                                usb_kill_urb(q->entry[j].urb);
                }
 
+               mt76_worker_disable(&dev->tx_worker);
+
                /* On device removal we maight queue skb's, but mt76u_tx_kick()
                 * will fail to submit urb, cleanup those skb's manually.
                 */
@@ -1048,18 +1048,19 @@ void mt76u_stop_tx(struct mt76_dev *dev)
                        if (!q)
                                continue;
 
-                       entry = q->entry[q->tail];
-                       q->entry[q->tail].done = false;
-
-                       mt76_queue_tx_complete(dev, q, &entry);
+                       while (q->queued > 0) {
+                               entry = q->entry[q->tail];
+                               q->entry[q->tail].done = false;
+                               mt76_queue_tx_complete(dev, q, &entry);
+                       }
                }
+
+               mt76_worker_enable(&dev->tx_worker);
        }
 
        cancel_work_sync(&dev->usb.stat_work);
        clear_bit(MT76_READING_STATS, &dev->phy.state);
 
-       mt76_worker_enable(&dev->tx_worker);
-
        mt76_tx_status_check(dev, NULL, true);
 }
 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
index 3852c4f..efbba9c 100644 (file)
@@ -147,6 +147,8 @@ static int rtw_debugfs_copy_from_user(char tmp[], int size,
 {
        int tmp_len;
 
+       memset(tmp, 0, size);
+
        if (count < num)
                return -EFAULT;
 
index 456dc4a..e63457e 100644 (file)
@@ -270,11 +270,6 @@ static void usb_init_common_7211b0(struct brcm_usb_init_params *params)
        reg |= params->mode << USB_PHY_UTMI_CTL_1_PHY_MODE_SHIFT;
        brcm_usb_writel(reg, usb_phy + USB_PHY_UTMI_CTL_1);
 
-       /* Fix the incorrect default */
-       reg = brcm_usb_readl(ctrl + USB_CTRL_SETUP);
-       reg &= ~USB_CTRL_SETUP_tca_drv_sel_MASK;
-       brcm_usb_writel(reg, ctrl + USB_CTRL_SETUP);
-
        usb_init_common(params);
 
        /*
index 58ec695..62c2476 100644 (file)
@@ -4,7 +4,7 @@
 #
 config PHY_INTEL_KEEMBAY_EMMC
        tristate "Intel Keem Bay EMMC PHY driver"
-       depends on (OF && ARM64) || COMPILE_TEST
+       depends on ARCH_KEEMBAY || COMPILE_TEST
        depends on HAS_IOMEM
        select GENERIC_PHY
        select REGMAP_MMIO
index 50c5e93..c8126bd 100644 (file)
@@ -12,7 +12,7 @@ config PHY_MTK_TPHY
          it supports multiple usb2.0, usb3.0 ports, PCIe and
          SATA, and meanwhile supports two version T-PHY which have
          different banks layout, the T-PHY with shared banks between
-         multi-ports is first version, otherwise is second veriosn,
+         multi-ports is first version, otherwise is second version,
          so you can easily distinguish them by banks layout.
 
 config PHY_MTK_UFS
index 089db0d..442522b 100644 (file)
@@ -364,7 +364,8 @@ static int cpcap_usb_init_irq(struct platform_device *pdev,
 
        error = devm_request_threaded_irq(ddata->dev, irq, NULL,
                                          cpcap_phy_irq_thread,
-                                         IRQF_SHARED,
+                                         IRQF_SHARED |
+                                         IRQF_ONESHOT,
                                          name, ddata);
        if (error) {
                dev_err(ddata->dev, "could not get irq %s: %i\n",
index 928db51..7f6fcb8 100644 (file)
@@ -87,7 +87,7 @@ config PHY_QCOM_USB_HSIC
 
 config PHY_QCOM_USB_HS_28NM
        tristate "Qualcomm 28nm High-Speed PHY"
-       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF && (ARCH_QCOM || COMPILE_TEST)
        depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
        select GENERIC_PHY
        help
@@ -98,7 +98,7 @@ config PHY_QCOM_USB_HS_28NM
 
 config PHY_QCOM_USB_SS
        tristate "Qualcomm USB Super-Speed PHY driver"
-       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF && (ARCH_QCOM || COMPILE_TEST)
        depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
        select GENERIC_PHY
        help
index 5d33ad4..0cda168 100644 (file)
@@ -3926,7 +3926,7 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
        struct phy_provider *phy_provider;
        void __iomem *serdes;
        void __iomem *usb_serdes;
-       void __iomem *dp_serdes;
+       void __iomem *dp_serdes = NULL;
        const struct qmp_phy_combo_cfg *combo_cfg = NULL;
        const struct qmp_phy_cfg *cfg = NULL;
        const struct qmp_phy_cfg *usb_cfg = NULL;
index de4a46f..ad88d74 100644 (file)
@@ -1242,6 +1242,7 @@ power_down:
 reset:
        reset_control_assert(padctl->rst);
 remove:
+       platform_set_drvdata(pdev, NULL);
        soc->ops->remove(padctl);
        return err;
 }
index 5046b6b..b4c651f 100644 (file)
@@ -84,12 +84,14 @@ struct sl28cpld_pwm {
        struct regmap *regmap;
        u32 offset;
 };
+#define sl28cpld_pwm_from_chip(_chip) \
+       container_of(_chip, struct sl28cpld_pwm, pwm_chip)
 
 static void sl28cpld_pwm_get_state(struct pwm_chip *chip,
                                   struct pwm_device *pwm,
                                   struct pwm_state *state)
 {
-       struct sl28cpld_pwm *priv = dev_get_drvdata(chip->dev);
+       struct sl28cpld_pwm *priv = sl28cpld_pwm_from_chip(chip);
        unsigned int reg;
        int prescaler;
 
@@ -118,7 +120,7 @@ static void sl28cpld_pwm_get_state(struct pwm_chip *chip,
 static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                              const struct pwm_state *state)
 {
-       struct sl28cpld_pwm *priv = dev_get_drvdata(chip->dev);
+       struct sl28cpld_pwm *priv = sl28cpld_pwm_from_chip(chip);
        unsigned int cycle, prescaler;
        bool write_duty_cycle_first;
        int ret;
index 66c1e67..365f30f 100644 (file)
@@ -1114,7 +1114,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
        struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
        struct cdns3_request *priv_req;
        struct cdns3_trb *trb;
-       struct cdns3_trb *link_trb;
+       struct cdns3_trb *link_trb = NULL;
        dma_addr_t trb_dma;
        u32 togle_pcs = 1;
        int sg_iter = 0;
@@ -1193,10 +1193,20 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
 
        /* set incorrect Cycle Bit for first trb*/
        control = priv_ep->pcs ? 0 : TRB_CYCLE;
+       trb->length = 0;
+       if (priv_dev->dev_ver >= DEV_VER_V2) {
+               u16 td_size;
+
+               td_size = DIV_ROUND_UP(request->length,
+                                      priv_ep->endpoint.maxpacket);
+               if (priv_dev->gadget.speed == USB_SPEED_SUPER)
+                       trb->length = TRB_TDL_SS_SIZE(td_size);
+               else
+                       control |= TRB_TDL_HS_SIZE(td_size);
+       }
 
        do {
                u32 length;
-               u16 td_size = 0;
 
                /* fill TRB */
                control |= TRB_TYPE(TRB_NORMAL);
@@ -1208,20 +1218,12 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                        length = request->length;
                }
 
-               if (likely(priv_dev->dev_ver >= DEV_VER_V2))
-                       td_size = DIV_ROUND_UP(length,
-                                              priv_ep->endpoint.maxpacket);
-               else if (priv_ep->flags & EP_TDLCHK_EN)
+               if (priv_ep->flags & EP_TDLCHK_EN)
                        total_tdl += DIV_ROUND_UP(length,
                                               priv_ep->endpoint.maxpacket);
 
-               trb->length = cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
+               trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) |
                                        TRB_LEN(length));
-               if (priv_dev->gadget.speed == USB_SPEED_SUPER)
-                       trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
-               else
-                       control |= TRB_TDL_HS_SIZE(td_size);
-
                pcs = priv_ep->pcs ? TRB_CYCLE : 0;
 
                /*
index e96a858..5332363 100644 (file)
@@ -482,11 +482,11 @@ static void snoop_urb(struct usb_device *udev,
 
        if (userurb) {          /* Async */
                if (when == SUBMIT)
-                       dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
+                       dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
                                        "length %u\n",
                                        userurb, ep, t, d, length);
                else
-                       dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
+                       dev_info(&udev->dev, "userurb %px, ep%d %s-%s, "
                                        "actual_length %u status %d\n",
                                        userurb, ep, t, d, length,
                                        timeout_or_status);
@@ -1997,7 +1997,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
        if (as) {
                int retval;
 
-               snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+               snoop(&ps->dev->dev, "reap %px\n", as->userurb);
                retval = processcompl(as, (void __user * __user *)arg);
                free_async(as);
                return retval;
@@ -2014,7 +2014,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
 
        as = async_getcompleted(ps);
        if (as) {
-               snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+               snoop(&ps->dev->dev, "reap %px\n", as->userurb);
                retval = processcompl(as, (void __user * __user *)arg);
                free_async(as);
        } else {
@@ -2142,7 +2142,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
        if (as) {
                int retval;
 
-               snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+               snoop(&ps->dev->dev, "reap %px\n", as->userurb);
                retval = processcompl_compat(as, (void __user * __user *)arg);
                free_async(as);
                return retval;
@@ -2159,7 +2159,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
 
        as = async_getcompleted(ps);
        if (as) {
-               snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
+               snoop(&ps->dev->dev, "reap %px\n", as->userurb);
                retval = processcompl_compat(as, (void __user * __user *)arg);
                free_async(as);
        } else {
@@ -2624,7 +2624,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
 #endif
 
        case USBDEVFS_DISCARDURB:
-               snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
+               snoop(&dev->dev, "%s: DISCARDURB %px\n", __func__, p);
                ret = proc_unlinkurb(ps, p);
                break;
 
index a1e3a03..fad31cc 100644 (file)
@@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Guillemot Webcam Hercules Dualpix Exchange*/
        { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Guillemot Hercules DJ Console audio card (BZ 208357) */
+       { USB_DEVICE(0x06f8, 0xb000), .driver_info =
+                       USB_QUIRK_ENDPOINT_IGNORE },
+
        /* Midiman M-Audio Keystation 88es */
        { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -421,6 +425,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1532, 0x0116), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
+       { USB_DEVICE(0x17ef, 0xa012), .driver_info =
+                       USB_QUIRK_DISCONNECT_SUSPEND },
+
        /* BUILDWIN Photo Frame */
        { USB_DEVICE(0x1908, 0x1315), .driver_info =
                        USB_QUIRK_HONOR_BNUMINTERFACES },
@@ -521,6 +529,8 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
  * Matched for devices with USB_QUIRK_ENDPOINT_IGNORE.
  */
 static const struct usb_device_id usb_endpoint_ignore[] = {
+       { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x01 },
+       { USB_DEVICE_INTERFACE_NUMBER(0x06f8, 0xb000, 5), .driver_info = 0x81 },
        { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
        { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
        { }
index 85cb157..19d9794 100644 (file)
@@ -1315,7 +1315,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
        midi->id = kstrdup(opts->id, GFP_KERNEL);
        if (opts->id && !midi->id) {
                status = -ENOMEM;
-               goto setup_fail;
+               goto midi_free;
        }
        midi->in_ports = opts->in_ports;
        midi->out_ports = opts->out_ports;
@@ -1327,7 +1327,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
 
        status = kfifo_alloc(&midi->in_req_fifo, midi->qlen, GFP_KERNEL);
        if (status)
-               goto setup_fail;
+               goto midi_free;
 
        spin_lock_init(&midi->transmit_lock);
 
@@ -1343,9 +1343,13 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
 
        return &midi->func;
 
+midi_free:
+       if (midi)
+               kfree(midi->id);
+       kfree(midi);
 setup_fail:
        mutex_unlock(&opts->lock);
-       kfree(midi);
+
        return ERR_PTR(status);
 }
 
index 1b430b3..71e7d10 100644 (file)
@@ -2039,6 +2039,9 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
        return 0;
 
 Enomem:
+       kfree(CHIP);
+       CHIP = NULL;
+
        return -ENOMEM;
 }
 
index 6c5908a..e7f1208 100644 (file)
@@ -88,6 +88,7 @@ config TYPEC_STUSB160X
 config TYPEC_QCOM_PMIC
        tristate "Qualcomm PMIC USB Type-C driver"
        depends on ARCH_QCOM || COMPILE_TEST
+       depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
        help
          Driver for supporting role switch over the Qualcomm PMIC.  This will
          handle the USB Type-C role and orientation detection reported by the
index 2a618f0..d21750b 100644 (file)
@@ -562,7 +562,7 @@ static int stusb160x_get_fw_caps(struct stusb160x *chip,
         * Supported power operation mode can be configured through device tree
         * else it is read from chip registers in stusb160x_get_caps.
         */
-       ret = fwnode_property_read_string(fwnode, "power-opmode", &cap_str);
+       ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &cap_str);
        if (!ret) {
                ret = typec_find_pwr_opmode(cap_str);
                /* Power delivery not yet supported */
index 358f604..6caf539 100644 (file)
@@ -32,6 +32,7 @@ config IFCVF
 
 config MLX5_VDPA
        bool
+       select VHOST_IOTLB
        help
          Support library for Mellanox VDPA drivers. Provides code that is
          common for all types of VDPA drivers. The following drivers are planned:
index f22fce5..6ff8a50 100644 (file)
@@ -220,6 +220,7 @@ struct vhost_scsi_tmf {
        struct list_head queue_entry;
 
        struct se_cmd se_cmd;
+       u8 scsi_resp;
        struct vhost_scsi_inflight *inflight;
        struct iovec resp_iov;
        int in_iovs;
@@ -426,6 +427,7 @@ static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
        struct vhost_scsi_tmf *tmf = container_of(se_cmd, struct vhost_scsi_tmf,
                                                  se_cmd);
 
+       tmf->scsi_resp = se_cmd->se_tmr_req->response;
        transport_generic_free_cmd(&tmf->se_cmd, 0);
 }
 
@@ -1183,7 +1185,7 @@ static void vhost_scsi_tmf_resp_work(struct vhost_work *work)
                                                  vwork);
        int resp_code;
 
-       if (tmf->se_cmd.se_tmr_req->response == TMR_FUNCTION_COMPLETE)
+       if (tmf->scsi_resp == TMR_FUNCTION_COMPLETE)
                resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
        else
                resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED;
index 2754f30..29ed417 100644 (file)
@@ -348,7 +348,9 @@ static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
                .last = v->range.last,
        };
 
-       return copy_to_user(argp, &range, sizeof(range));
+       if (copy_to_user(argp, &range, sizeof(range)))
+               return -EFAULT;
+       return 0;
 }
 
 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
@@ -577,6 +579,8 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
 
        if (r)
                vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+       else
+               atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
 
        return r;
 }
@@ -608,8 +612,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
        unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
        unsigned int gup_flags = FOLL_LONGTERM;
        unsigned long npages, cur_base, map_pfn, last_pfn = 0;
-       unsigned long locked, lock_limit, pinned, i;
+       unsigned long lock_limit, sz2pin, nchunks, i;
        u64 iova = msg->iova;
+       long pinned;
        int ret = 0;
 
        if (msg->iova < v->range.first ||
@@ -620,6 +625,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
                                    msg->iova + msg->size - 1))
                return -EEXIST;
 
+       /* Limit the use of memory for bookkeeping */
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list)
                return -ENOMEM;
@@ -628,52 +634,75 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
                gup_flags |= FOLL_WRITE;
 
        npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
-       if (!npages)
-               return -EINVAL;
+       if (!npages) {
+               ret = -EINVAL;
+               goto free;
+       }
 
        mmap_read_lock(dev->mm);
 
-       locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
        lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-       if (locked > lock_limit) {
+       if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
                ret = -ENOMEM;
-               goto out;
+               goto unlock;
        }
 
        cur_base = msg->uaddr & PAGE_MASK;
        iova &= PAGE_MASK;
+       nchunks = 0;
 
        while (npages) {
-               pinned = min_t(unsigned long, npages, list_size);
-               ret = pin_user_pages(cur_base, pinned,
-                                    gup_flags, page_list, NULL);
-               if (ret != pinned)
+               sz2pin = min_t(unsigned long, npages, list_size);
+               pinned = pin_user_pages(cur_base, sz2pin,
+                                       gup_flags, page_list, NULL);
+               if (sz2pin != pinned) {
+                       if (pinned < 0) {
+                               ret = pinned;
+                       } else {
+                               unpin_user_pages(page_list, pinned);
+                               ret = -ENOMEM;
+                       }
                        goto out;
+               }
+               nchunks++;
 
                if (!last_pfn)
                        map_pfn = page_to_pfn(page_list[0]);
 
-               for (i = 0; i < ret; i++) {
+               for (i = 0; i < pinned; i++) {
                        unsigned long this_pfn = page_to_pfn(page_list[i]);
                        u64 csize;
 
                        if (last_pfn && (this_pfn != last_pfn + 1)) {
                                /* Pin a contiguous chunk of memory */
                                csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
-                               if (vhost_vdpa_map(v, iova, csize,
-                                                  map_pfn << PAGE_SHIFT,
-                                                  msg->perm))
+                               ret = vhost_vdpa_map(v, iova, csize,
+                                                    map_pfn << PAGE_SHIFT,
+                                                    msg->perm);
+                               if (ret) {
+                                       /*
+                                        * Unpin the pages that are left unmapped
+                                        * from this point on in the current
+                                        * page_list. The remaining outstanding
+                                        * ones which may stride across several
+                                        * chunks will be covered in the common
+                                        * error path subsequently.
+                                        */
+                                       unpin_user_pages(&page_list[i],
+                                                        pinned - i);
                                        goto out;
+                               }
+
                                map_pfn = this_pfn;
                                iova += csize;
+                               nchunks = 0;
                        }
 
                        last_pfn = this_pfn;
                }
 
-               cur_base += ret << PAGE_SHIFT;
-               npages -= ret;
+               cur_base += pinned << PAGE_SHIFT;
+               npages -= pinned;
        }
 
        /* Pin the rest chunk */
@@ -681,10 +710,27 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
                             map_pfn << PAGE_SHIFT, msg->perm);
 out:
        if (ret) {
+               if (nchunks) {
+                       unsigned long pfn;
+
+                       /*
+                        * Unpin the outstanding pages which are yet to be
+                        * mapped but haven't due to vdpa_map() or
+                        * pin_user_pages() failure.
+                        *
+                        * Mapped pages are accounted in vdpa_map(), hence
+                        * the corresponding unpinning will be handled by
+                        * vdpa_unmap().
+                        */
+                       WARN_ON(!last_pfn);
+                       for (pfn = map_pfn; pfn <= last_pfn; pfn++)
+                               unpin_user_page(pfn_to_page(pfn));
+               }
                vhost_vdpa_unmap(v, msg->iova, msg->size);
-               atomic64_sub(npages, &dev->mm->pinned_vm);
        }
+unlock:
        mmap_read_unlock(dev->mm);
+free:
        free_page((unsigned long)page_list);
        return ret;
 }
index 8bd8b40..b7403ba 100644 (file)
@@ -730,7 +730,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user);
 /**
  * vringh_iov_push_user - copy bytes into vring_iov.
  * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
- * @dst: the place to copy.
+ * @src: the place to copy from.
  * @len: the maximum length to copy.
  *
  * Returns the bytes copied <= len or a negative errno.
@@ -976,7 +976,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
 /**
  * vringh_iov_push_kern - copy bytes into vring_iov.
  * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
- * @dst: the place to copy.
+ * @src: the place to copy from.
  * @len: the maximum length to copy.
  *
  * Returns the bytes copied <= len or a negative errno.
@@ -1333,7 +1333,7 @@ EXPORT_SYMBOL(vringh_iov_pull_iotlb);
  * vringh_iov_push_iotlb - copy bytes into vring_iov.
  * @vrh: the vring.
  * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
- * @dst: the place to copy.
+ * @src: the place to copy from.
  * @len: the maximum length to copy.
  *
  * Returns the bytes copied <= len or a negative errno.
index b177fd3..be57689 100644 (file)
@@ -655,6 +655,8 @@ const struct file_operations v9fs_cached_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = v9fs_file_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync,
 };
 
@@ -667,6 +669,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
        .lock = v9fs_file_lock_dotl,
        .flock = v9fs_file_flock_dotl,
        .mmap = v9fs_file_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync_dotl,
 };
 
@@ -678,6 +682,8 @@ const struct file_operations v9fs_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = generic_file_readonly_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync,
 };
 
@@ -690,6 +696,8 @@ const struct file_operations v9fs_file_operations_dotl = {
        .lock = v9fs_file_lock_dotl,
        .flock = v9fs_file_flock_dotl,
        .mmap = generic_file_readonly_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync_dotl,
 };
 
@@ -701,6 +709,8 @@ const struct file_operations v9fs_mmap_file_operations = {
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
        .mmap = v9fs_mmap_file_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync,
 };
 
@@ -713,5 +723,7 @@ const struct file_operations v9fs_mmap_file_operations_dotl = {
        .lock = v9fs_file_lock_dotl,
        .flock = v9fs_file_flock_dotl,
        .mmap = v9fs_mmap_file_mmap,
+       .splice_read = generic_file_splice_read,
+       .splice_write = iter_file_splice_write,
        .fsync = v9fs_file_fsync_dotl,
 };
index c38156f..28c1459 100644 (file)
@@ -876,6 +876,8 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
        list_del_init(&server->tcp_ses_list);
        spin_unlock(&cifs_tcp_ses_lock);
 
+       cancel_delayed_work_sync(&server->echo);
+
        spin_lock(&GlobalMid_Lock);
        server->tcpStatus = CifsExiting;
        spin_unlock(&GlobalMid_Lock);
index e27e255..36b2ece 100644 (file)
@@ -339,8 +339,8 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                return -EAGAIN;
 
        if (signal_pending(current)) {
-               cifs_dbg(FYI, "signal is pending before sending any data\n");
-               return -EINTR;
+               cifs_dbg(FYI, "signal pending before send request\n");
+               return -ERESTARTSYS;
        }
 
        /* cork the socket */
index 96c0c86..0297ad9 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/efi.h>
 #include <linux/fs.h>
 #include <linux/ctype.h>
+#include <linux/kmemleak.h>
 #include <linux/slab.h>
 #include <linux/uuid.h>
 
@@ -103,6 +104,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        var->var.VariableName[i] = '\0';
 
        inode->i_private = var;
+       kmemleak_ignore(var);
 
        err = efivar_entry_add(var, &efivarfs_list);
        if (err)
index f943fd0..15880a6 100644 (file)
@@ -21,7 +21,6 @@ LIST_HEAD(efivarfs_list);
 static void efivarfs_evict_inode(struct inode *inode)
 {
        clear_inode(inode);
-       kfree(inode->i_private);
 }
 
 static const struct super_operations efivarfs_ops = {
index d98a2e5..35a6fd1 100644 (file)
@@ -1035,6 +1035,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        gl->gl_node.next = NULL;
        gl->gl_flags = 0;
        gl->gl_name = name;
+       lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
        gl->gl_lockref.count = 1;
        gl->gl_state = LM_ST_UNLOCKED;
        gl->gl_target = LM_ST_UNLOCKED;
index 67f2921..3faa421 100644 (file)
@@ -245,7 +245,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
                              const char *fs_id_buf)
 {
-       struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
+       struct gfs2_rgrpd *rgd = gl->gl_object;
 
        if (rgd)
                gfs2_rgrp_dump(seq, rgd, fs_id_buf);
@@ -582,7 +582,8 @@ static int freeze_go_sync(struct gfs2_glock *gl)
         * Once thawed, the work func acquires the freeze glock in
         * SH and everybody goes back to thawed.
         */
-       if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp)) {
+       if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
+           !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
                atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
                error = freeze_super(sdp->sd_vfs);
                if (error) {
@@ -781,6 +782,7 @@ const struct gfs2_glock_operations gfs2_iopen_glops = {
        .go_callback = iopen_go_callback,
        .go_demote_ok = iopen_go_demote_ok,
        .go_flags = GLOF_LRU | GLOF_NONDISK,
+       .go_subclass = 1,
 };
 
 const struct gfs2_glock_operations gfs2_flock_glops = {
index d770730..f8858d9 100644 (file)
@@ -247,6 +247,7 @@ struct gfs2_glock_operations {
                        const char *fs_id_buf);
        void (*go_callback)(struct gfs2_glock *gl, bool remote);
        void (*go_free)(struct gfs2_glock *gl);
+       const int go_subclass;
        const int go_type;
        const unsigned long go_flags;
 #define GLOF_ASPACE 1 /* address space attached */
index 077ccb1..65ae4fc 100644 (file)
@@ -150,6 +150,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
                if (unlikely(error))
                        goto fail;
+               if (blktype != GFS2_BLKST_UNLINKED)
+                       gfs2_cancel_delete_work(io_gl);
 
                if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
                        /*
@@ -180,8 +182,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                if (unlikely(error))
                        goto fail;
-               if (blktype != GFS2_BLKST_UNLINKED)
-                       gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
                glock_set_object(ip->i_iopen_gh.gh_gl, ip);
                gfs2_glock_put(io_gl);
                io_gl = NULL;
@@ -725,13 +725,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        flush_delayed_work(&ip->i_gl->gl_work);
        glock_set_object(ip->i_gl, ip);
 
-       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
        if (error)
                goto fail_free_inode;
+       gfs2_cancel_delete_work(io_gl);
+       glock_set_object(io_gl, ip);
+
+       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
+       if (error)
+               goto fail_gunlock2;
 
        error = gfs2_trans_begin(sdp, blocks, 0);
        if (error)
-               goto fail_free_inode;
+               goto fail_gunlock2;
 
        if (blocks > 1) {
                ip->i_eattr = ip->i_no_addr + 1;
@@ -740,18 +746,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        init_dinode(dip, ip, symname);
        gfs2_trans_end(sdp);
 
-       error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
-       if (error)
-               goto fail_free_inode;
-
        BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
 
        error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
 
-       gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
-       glock_set_object(ip->i_iopen_gh.gh_gl, ip);
        gfs2_set_iop(inode);
        insert_inode_hash(inode);
 
@@ -803,6 +803,7 @@ fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_gunlock2:
        clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+       glock_clear_object(io_gl, ip);
        gfs2_glock_put(io_gl);
 fail_free_inode:
        if (ip->i_gl) {
@@ -2116,6 +2117,25 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset)
        return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
 }
 
+static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
+                           int flags)
+{
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_glock *gl = ip->i_gl;
+       struct gfs2_holder *gh;
+       int error;
+
+       gh = gfs2_glock_is_locked_by_me(gl);
+       if (gh && !gfs2_glock_is_held_excl(gl)) {
+               gfs2_glock_dq(gh);
+               gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
+               error = gfs2_glock_nq(gh);
+               if (error)
+                       return error;
+       }
+       return generic_update_time(inode, time, flags);
+}
+
 const struct inode_operations gfs2_file_iops = {
        .permission = gfs2_permission,
        .setattr = gfs2_setattr,
@@ -2124,6 +2144,7 @@ const struct inode_operations gfs2_file_iops = {
        .fiemap = gfs2_fiemap,
        .get_acl = gfs2_get_acl,
        .set_acl = gfs2_set_acl,
+       .update_time = gfs2_update_time,
 };
 
 const struct inode_operations gfs2_dir_iops = {
@@ -2143,6 +2164,7 @@ const struct inode_operations gfs2_dir_iops = {
        .fiemap = gfs2_fiemap,
        .get_acl = gfs2_get_acl,
        .set_acl = gfs2_set_acl,
+       .update_time = gfs2_update_time,
        .atomic_open = gfs2_atomic_open,
 };
 
index f7addc6..5e8eef9 100644 (file)
@@ -985,6 +985,10 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
        if (error < 0)
                return error;
 
+       if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) {
+               fs_err(sdp, "no resource groups found in the file system.\n");
+               return -ENOENT;
+       }
        set_rgrp_preferences(sdp);
 
        sdp->sd_rindex_uptodate = 1;
index 9903088..2696eb0 100644 (file)
@@ -12,6 +12,9 @@
 
 #define BOOTCONFIG_MAGIC       "#BOOTCONFIG\n"
 #define BOOTCONFIG_MAGIC_LEN   12
+#define BOOTCONFIG_ALIGN_SHIFT 2
+#define BOOTCONFIG_ALIGN       (1 << BOOTCONFIG_ALIGN_SHIFT)
+#define BOOTCONFIG_ALIGN_MASK  (BOOTCONFIG_ALIGN - 1)
 
 /* XBC tree node */
 struct xbc_node {
index 82fa5e9..0d6e287 100644 (file)
@@ -1248,6 +1248,11 @@ enum mlx5_fc_bulk_alloc_bitmask {
 
 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
 
+enum {
+       MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
+       MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
+};
+
 struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_0[0x1f];
        u8         vhca_resource_manager[0x1];
@@ -1558,7 +1563,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 
        u8         general_obj_types[0x40];
 
-       u8         reserved_at_440[0x20];
+       u8         reserved_at_440[0x4];
+       u8         steering_format_version[0x4];
+       u8         create_qp_start_hint[0x18];
 
        u8         reserved_at_460[0x3];
        u8         log_max_uctx[0x5];
index 2e077e2..7f85c23 100644 (file)
@@ -2838,9 +2838,21 @@ u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
                     struct net_device *sb_dev);
 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
                       struct net_device *sb_dev);
+
 int dev_queue_xmit(struct sk_buff *skb);
 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+
+static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+{
+       int ret;
+
+       ret = __dev_direct_xmit(skb, queue_id);
+       if (!dev_xmit_complete(ret))
+               kfree_skb(skb);
+       return ret;
+}
+
 int register_netdevice(struct net_device *dev);
 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
 void unregister_netdevice_many(struct list_head *head);
index e1eaf17..563457f 100644 (file)
@@ -107,7 +107,7 @@ static inline int IP_ECN_set_ect1(struct iphdr *iph)
        if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
                return 0;
 
-       check += (__force u16)htons(0x100);
+       check += (__force u16)htons(0x1);
 
        iph->check = (__force __sum16)(check + (check>=0xFFFF));
        iph->tos ^= INET_ECN_MASK;
index ea7d1d7..1d34fe1 100644 (file)
@@ -37,6 +37,7 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
 
 struct nft_flow_key {
        struct flow_dissector_key_basic                 basic;
+       struct flow_dissector_key_control               control;
        union {
                struct flow_dissector_key_ipv4_addrs    ipv4;
                struct flow_dissector_key_ipv6_addrs    ipv6;
@@ -62,6 +63,9 @@ struct nft_flow_rule {
 
 #define NFT_OFFLOAD_F_ACTION   (1 << 0)
 
+void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+                                enum flow_dissector_key_id addr_type);
+
 struct nft_rule;
 struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
 void nft_flow_rule_destroy(struct nft_flow_rule *flow);
@@ -74,6 +78,9 @@ int nft_flow_rule_offload_commit(struct net *net);
                offsetof(struct nft_flow_key, __base.__field);          \
        (__reg)->len            = __len;                                \
        (__reg)->key            = __key;                                \
+
+#define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg)  \
+       NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg)         \
        memset(&(__reg)->mask, 0xff, (__reg)->len);
 
 int nft_chain_offload_priority(struct nft_base_chain *basechain);
index 1a9559c..4f4e93b 100644 (file)
@@ -31,6 +31,7 @@ struct xdp_umem {
        struct page **pgs;
        int id;
        struct list_head xsk_dma_list;
+       struct work_struct work;
 };
 
 struct xsk_map {
index 82cc58f..1500a0f 100644 (file)
@@ -171,9 +171,12 @@ struct statx {
  * be of use to ordinary userspace programs such as GUIs or ls rather than
  * specialised tools.
  *
- * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
+ * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags
  * semantically.  Where possible, the numerical value is picked to correspond
- * also.
+ * also.  Note that the DAX attribute indicates that the file is in the CPU
+ * direct access state.  It does not correspond to the per-inode flag that
+ * some filesystems support.
+ *
  */
 #define STATX_ATTR_COMPRESSED          0x00000004 /* [I] File is compressed by the fs */
 #define STATX_ATTR_IMMUTABLE           0x00000010 /* [I] File is marked immutable */
@@ -183,7 +186,7 @@ struct statx {
 #define STATX_ATTR_AUTOMOUNT           0x00001000 /* Dir: Automount trigger */
 #define STATX_ATTR_MOUNT_ROOT          0x00002000 /* Root of a mount */
 #define STATX_ATTR_VERITY              0x00100000 /* [I] Verity protected file */
-#define STATX_ATTR_DAX                 0x00002000 /* [I] File is DAX */
+#define STATX_ATTR_DAX                 0x00200000 /* File is currently in DAX state */
 
 
 #endif /* _UAPI_LINUX_STAT_H */
index 20baced..32b2a8a 100644 (file)
@@ -288,8 +288,8 @@ static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
 
 found:
        hdr = (u32 *)(data - 8);
-       size = hdr[0];
-       csum = hdr[1];
+       size = le32_to_cpu(hdr[0]);
+       csum = le32_to_cpu(hdr[1]);
 
        data = ((void *)hdr) - size;
        if ((unsigned long)data < initrd_start) {
index 24d0ee2..c6932b8 100644 (file)
@@ -78,7 +78,7 @@ void __weak arch_cpu_idle_dead(void) { }
 void __weak arch_cpu_idle(void)
 {
        cpu_idle_force_poll = 1;
-       local_irq_enable();
+       raw_local_irq_enable();
 }
 
 /**
@@ -94,9 +94,35 @@ void __cpuidle default_idle_call(void)
 
                trace_cpu_idle(1, smp_processor_id());
                stop_critical_timings();
+
+               /*
+                * arch_cpu_idle() is supposed to enable IRQs, however
+                * we can't do that because of RCU and tracing.
+                *
+                * Trace IRQs enable here, then switch off RCU, and have
+                * arch_cpu_idle() use raw_local_irq_enable(). Note that
+                * rcu_idle_enter() relies on lockdep IRQ state, so switch that
+                * last -- this is very similar to the entry code.
+                */
+               trace_hardirqs_on_prepare();
+               lockdep_hardirqs_on_prepare(_THIS_IP_);
                rcu_idle_enter();
+               lockdep_hardirqs_on(_THIS_IP_);
+
                arch_cpu_idle();
+
+               /*
+                * OK, so IRQs are enabled here, but RCU needs them disabled to
+                * turn itself back on.. funny thing is that disabling IRQs
+                * will cause tracing, which needs RCU. Jump through hoops to
+                * make it 'work'.
+                */
+               raw_local_irq_disable();
+               lockdep_hardirqs_off(_THIS_IP_);
                rcu_idle_exit();
+               lockdep_hardirqs_on(_THIS_IP_);
+               raw_local_irq_enable();
+
                start_critical_timings();
                trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
        }
index a4020c0..e1bf522 100644 (file)
@@ -202,7 +202,7 @@ config DYNAMIC_FTRACE_WITH_REGS
 
 config DYNAMIC_FTRACE_WITH_DIRECT_CALLS
        def_bool y
-       depends on DYNAMIC_FTRACE
+       depends on DYNAMIC_FTRACE_WITH_REGS
        depends on HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 
 config FUNCTION_PROFILER
index 8185f72..9c1bba8 100644 (file)
@@ -1629,6 +1629,8 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
 static struct ftrace_ops *
 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
 static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
+static struct ftrace_ops *
 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
 
 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
@@ -1778,7 +1780,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
                         * to it.
                         */
                        if (ftrace_rec_count(rec) == 1 &&
-                           ftrace_find_tramp_ops_any(rec))
+                           ftrace_find_tramp_ops_any_other(rec, ops))
                                rec->flags |= FTRACE_FL_TRAMP;
                        else
                                rec->flags &= ~FTRACE_FL_TRAMP;
@@ -2244,6 +2246,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
        return NULL;
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
+{
+       struct ftrace_ops *op;
+       unsigned long ip = rec->ip;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               if (op == op_exclude || !op->trampoline)
+                       continue;
+
+               if (hash_contains_ip(ip, op->func_hash))
+                       return op;
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
 static struct ftrace_ops *
 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
                           struct ftrace_ops *op)
index dc83b3f..a6268e0 100644 (file)
@@ -3234,14 +3234,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
 
        /* See if we shot pass the end of this buffer page */
        if (unlikely(write > BUF_PAGE_SIZE)) {
-               if (tail != w) {
-                       /* before and after may now different, fix it up*/
-                       b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
-                       a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
-                       if (a_ok && b_ok && info->before != info->after)
-                               (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
-                                                     info->before, info->after);
-               }
+               /* before and after may now different, fix it up*/
+               b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
+               a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
+               if (a_ok && b_ok && info->before != info->after)
+                       (void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
+                                             info->before, info->after);
                return rb_move_tail(cpu_buffer, tail, info);
        }
 
@@ -3287,11 +3285,11 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                ts = rb_time_stamp(cpu_buffer->buffer);
                barrier();
  /*E*/         if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
-                   info->after < ts) {
+                   info->after < ts &&
+                   rb_time_cmpxchg(&cpu_buffer->write_stamp,
+                                   info->after, ts)) {
                        /* Nothing came after this event between C and E */
                        info->delta = ts - info->after;
-                       (void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
-                                             info->after, info->ts);
                        info->ts = ts;
                } else {
                        /*
index 410cfeb..7d53c5b 100644 (file)
@@ -3534,7 +3534,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
 }
 
 #define STATIC_TEMP_BUF_SIZE   128
-static char static_temp_buf[STATIC_TEMP_BUF_SIZE];
+static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
 
 /* Find the next real entry, without updating the iterator itself */
 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
index c9ad5c6..d071fc2 100644 (file)
@@ -368,7 +368,7 @@ static int start_kthread(struct trace_array *tr)
        struct task_struct *kthread;
        int next_cpu;
 
-       if (WARN_ON(hwlat_kthread))
+       if (hwlat_kthread)
                return 0;
 
        /* Just pick the first CPU on first iteration */
index 8533d2f..ba13e92 100644 (file)
@@ -7,6 +7,7 @@
 
 static int collect_syscall(struct task_struct *target, struct syscall_info *info)
 {
+       unsigned long args[6] = { };
        struct pt_regs *regs;
 
        if (!try_get_task_stack(target)) {
@@ -27,8 +28,14 @@ static int collect_syscall(struct task_struct *target, struct syscall_info *info
 
        info->data.nr = syscall_get_nr(target, regs);
        if (info->data.nr != -1L)
-               syscall_get_arguments(target, regs,
-                                     (unsigned long *)&info->data.args[0]);
+               syscall_get_arguments(target, regs, args);
+
+       info->data.args[0] = args[0];
+       info->data.args[1] = args[1];
+       info->data.args[2] = args[2];
+       info->data.args[3] = args[3];
+       info->data.args[4] = args[4];
+       info->data.args[5] = args[5];
 
        put_task_stack(target);
        return 0;
index 9a47ef8..1f1f5b0 100644 (file)
@@ -391,6 +391,7 @@ out:
 
 /**
  * batadv_frag_create() - create a fragment from skb
+ * @net_dev: outgoing device for fragment
  * @skb: skb to create fragment from
  * @frag_head: header to use in new fragment
  * @fragment_size: size of new fragment
@@ -401,22 +402,25 @@ out:
  *
  * Return: the new fragment, NULL on error.
  */
-static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
+static struct sk_buff *batadv_frag_create(struct net_device *net_dev,
+                                         struct sk_buff *skb,
                                          struct batadv_frag_packet *frag_head,
                                          unsigned int fragment_size)
 {
+       unsigned int ll_reserved = LL_RESERVED_SPACE(net_dev);
+       unsigned int tailroom = net_dev->needed_tailroom;
        struct sk_buff *skb_fragment;
        unsigned int header_size = sizeof(*frag_head);
        unsigned int mtu = fragment_size + header_size;
 
-       skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
+       skb_fragment = dev_alloc_skb(ll_reserved + mtu + tailroom);
        if (!skb_fragment)
                goto err;
 
        skb_fragment->priority = skb->priority;
 
        /* Eat the last mtu-bytes of the skb */
-       skb_reserve(skb_fragment, header_size + ETH_HLEN);
+       skb_reserve(skb_fragment, ll_reserved + header_size);
        skb_split(skb, skb_fragment, skb->len - fragment_size);
 
        /* Add the header */
@@ -439,11 +443,12 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                            struct batadv_orig_node *orig_node,
                            struct batadv_neigh_node *neigh_node)
 {
+       struct net_device *net_dev = neigh_node->if_incoming->net_dev;
        struct batadv_priv *bat_priv;
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_frag_packet frag_header;
        struct sk_buff *skb_fragment;
-       unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
+       unsigned int mtu = net_dev->mtu;
        unsigned int header_size = sizeof(frag_header);
        unsigned int max_fragment_size, num_fragments;
        int ret;
@@ -503,7 +508,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                        goto put_primary_if;
                }
 
-               skb_fragment = batadv_frag_create(skb, &frag_header,
+               skb_fragment = batadv_frag_create(net_dev, skb, &frag_header,
                                                  max_fragment_size);
                if (!skb_fragment) {
                        ret = -ENOMEM;
@@ -522,13 +527,14 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                frag_header.no++;
        }
 
-       /* Make room for the fragment header. */
-       if (batadv_skb_head_push(skb, header_size) < 0 ||
-           pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
-               ret = -ENOMEM;
+       /* make sure that there is at least enough head for the fragmentation
+        * and ethernet headers
+        */
+       ret = skb_cow_head(skb, ETH_HLEN + header_size);
+       if (ret < 0)
                goto put_primary_if;
-       }
 
+       skb_push(skb, header_size);
        memcpy(skb->data, &frag_header, header_size);
 
        /* Send the last fragment */
index dad9964..3390459 100644 (file)
@@ -554,6 +554,9 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
        needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
        needed_headroom += batadv_max_header_len();
 
+       /* fragmentation headers don't strip the unicast/... header */
+       needed_headroom += sizeof(struct batadv_frag_packet);
+
        soft_iface->needed_headroom = needed_headroom;
        soft_iface->needed_tailroom = lower_tailroom;
 }
index 04c3f9a..8edfb98 100644 (file)
@@ -735,6 +735,11 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
        mtu_reserved = nf_bridge_mtu_reduction(skb);
        mtu = skb->dev->mtu;
 
+       if (nf_bridge->pkt_otherhost) {
+               skb->pkt_type = PACKET_OTHERHOST;
+               nf_bridge->pkt_otherhost = false;
+       }
+
        if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
                mtu = nf_bridge->frag_max_size;
 
@@ -835,8 +840,6 @@ static unsigned int br_nf_post_routing(void *priv,
        else
                return NF_ACCEPT;
 
-       /* We assume any code from br_dev_queue_push_xmit onwards doesn't care
-        * about the value of skb->pkt_type. */
        if (skb->pkt_type == PACKET_OTHERHOST) {
                skb->pkt_type = PACKET_HOST;
                nf_bridge->pkt_otherhost = true;
index b5130fd..e3f998d 100644 (file)
@@ -4173,7 +4173,7 @@ int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
 }
 EXPORT_SYMBOL(dev_queue_xmit_accel);
 
-int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
+int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
 {
        struct net_device *dev = skb->dev;
        struct sk_buff *orig_skb = skb;
@@ -4203,17 +4203,13 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
        dev_xmit_recursion_dec();
 
        local_bh_enable();
-
-       if (!dev_xmit_complete(ret))
-               kfree_skb(skb);
-
        return ret;
 drop:
        atomic_long_inc(&dev->tx_dropped);
        kfree_skb_list(skb);
        return NET_XMIT_DROP;
 }
-EXPORT_SYMBOL(dev_direct_xmit);
+EXPORT_SYMBOL(__dev_direct_xmit);
 
 /*************************************************************************
  *                     Receiver routines
index 90d3423..bfa5c99 100644 (file)
@@ -5794,6 +5794,9 @@ int skb_mpls_dec_ttl(struct sk_buff *skb)
        if (unlikely(!eth_p_mpls(skb->protocol)))
                return -EINVAL;
 
+       if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
+               return -ENOMEM;
+
        lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
        ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
        if (!--ttl)
index c962f0d..e26652f 100644 (file)
@@ -3225,7 +3225,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
 
        fl4.daddr = dst;
        fl4.saddr = src;
-       fl4.flowi4_tos = rtm->rtm_tos;
+       fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
        fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
        fl4.flowi4_mark = mark;
        fl4.flowi4_uid = uid;
@@ -3249,8 +3249,9 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
                fl4.flowi4_iif = iif; /* for rt_fill_info */
                skb->dev        = dev;
                skb->mark       = mark;
-               err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
-                                        dev, &res);
+               err = ip_route_input_rcu(skb, dst, src,
+                                        rtm->rtm_tos & IPTOS_RT_MASK, dev,
+                                        &res);
 
                rt = skb_rtable(skb);
                if (err == 0 && rt->dst.error)
index 8cf6599..c3bc89b 100644 (file)
@@ -1133,8 +1133,13 @@ static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
                        return;
 
                if (rt->dst.dev) {
-                       dev->needed_headroom = rt->dst.dev->hard_header_len +
-                                              t_hlen;
+                       unsigned short dst_len = rt->dst.dev->hard_header_len +
+                                                t_hlen;
+
+                       if (t->dev->header_ops)
+                               dev->hard_header_len = dst_len;
+                       else
+                               dev->needed_headroom = dst_len;
 
                        if (set_mtu) {
                                dev->mtu = rt->dst.dev->mtu - t_hlen;
@@ -1159,7 +1164,12 @@ static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
        tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
 
        t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
-       tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
+
+       if (tunnel->dev->header_ops)
+               tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+       else
+               tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
+
        return t_hlen;
 }
 
index c7eaa37..89009c8 100644 (file)
@@ -271,8 +271,7 @@ flag_nested(const struct nlattr *nla)
 
 static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
        [IPSET_ATTR_IPADDR_IPV4]        = { .type = NLA_U32 },
-       [IPSET_ATTR_IPADDR_IPV6]        = { .type = NLA_BINARY,
-                                           .len = sizeof(struct in6_addr) },
+       [IPSET_ATTR_IPADDR_IPV6]        = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
 };
 
 int
index e279ded..d45dbcb 100644 (file)
@@ -4167,12 +4167,18 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 
        spin_lock_init(&ipvs->tot_stats.lock);
 
-       proc_create_net("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_seq_ops,
-                       sizeof(struct ip_vs_iter));
-       proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
-                       ip_vs_stats_show, NULL);
-       proc_create_net_single("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
-                       ip_vs_stats_percpu_show, NULL);
+#ifdef CONFIG_PROC_FS
+       if (!proc_create_net("ip_vs", 0, ipvs->net->proc_net,
+                            &ip_vs_info_seq_ops, sizeof(struct ip_vs_iter)))
+               goto err_vs;
+       if (!proc_create_net_single("ip_vs_stats", 0, ipvs->net->proc_net,
+                                   ip_vs_stats_show, NULL))
+               goto err_stats;
+       if (!proc_create_net_single("ip_vs_stats_percpu", 0,
+                                   ipvs->net->proc_net,
+                                   ip_vs_stats_percpu_show, NULL))
+               goto err_percpu;
+#endif
 
        if (ip_vs_control_net_init_sysctl(ipvs))
                goto err;
@@ -4180,6 +4186,17 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
        return 0;
 
 err:
+#ifdef CONFIG_PROC_FS
+       remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
+
+err_percpu:
+       remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
+
+err_stats:
+       remove_proc_entry("ip_vs", ipvs->net->proc_net);
+
+err_vs:
+#endif
        free_percpu(ipvs->tot_stats.cpustats);
        return -ENOMEM;
 }
@@ -4188,9 +4205,11 @@ void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
 {
        ip_vs_trash_cleanup(ipvs);
        ip_vs_control_net_cleanup_sysctl(ipvs);
+#ifdef CONFIG_PROC_FS
        remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
        remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
        remove_proc_entry("ip_vs", ipvs->net->proc_net);
+#endif
        free_percpu(ipvs->tot_stats.cpustats);
 }
 
index 65aa98f..a11bc8d 100644 (file)
@@ -620,7 +620,8 @@ static __printf(2, 3) int nft_request_module(struct net *net, const char *fmt,
 static void lockdep_nfnl_nft_mutex_not_held(void)
 {
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
+       if (debug_locks)
+               WARN_ON_ONCE(lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES));
 #endif
 }
 
index 9f62572..9ae1427 100644 (file)
@@ -28,6 +28,23 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
        return flow;
 }
 
+void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
+                                enum flow_dissector_key_id addr_type)
+{
+       struct nft_flow_match *match = &flow->match;
+       struct nft_flow_key *mask = &match->mask;
+       struct nft_flow_key *key = &match->key;
+
+       if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL))
+               return;
+
+       key->control.addr_type = addr_type;
+       mask->control.addr_type = 0xffff;
+       match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
+       match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] =
+               offsetof(struct nft_flow_key, control);
+}
+
 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
                                           const struct nft_rule *rule)
 {
index bc079d6..00e563a 100644 (file)
@@ -123,11 +123,11 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
        u8 *mask = (u8 *)&flow->match.mask;
        u8 *key = (u8 *)&flow->match.key;
 
-       if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
+       if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
                return -EOPNOTSUPP;
 
-       memcpy(key + reg->offset, &priv->data, priv->len);
-       memcpy(mask + reg->offset, &reg->mask, priv->len);
+       memcpy(key + reg->offset, &priv->data, reg->len);
+       memcpy(mask + reg->offset, &reg->mask, reg->len);
 
        flow->match.dissector.used_keys |= BIT(reg->key);
        flow->match.dissector.offset[reg->key] = reg->base_offset;
@@ -137,7 +137,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
            nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
                return -EOPNOTSUPP;
 
-       nft_offload_update_dependency(ctx, &priv->data, priv->len);
+       nft_offload_update_dependency(ctx, &priv->data, reg->len);
 
        return 0;
 }
index b37bd02..bf4b3ad 100644 (file)
@@ -724,22 +724,22 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
 
        switch (priv->key) {
        case NFT_META_PROTOCOL:
-               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
-                                 sizeof(__u16), reg);
+               NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, n_proto,
+                                       sizeof(__u16), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
                break;
        case NFT_META_L4PROTO:
-               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
-                                 sizeof(__u8), reg);
+               NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
+                                       sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
                break;
        case NFT_META_IIF:
-               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
-                                 ingress_ifindex, sizeof(__u32), reg);
+               NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
+                                       ingress_ifindex, sizeof(__u32), reg);
                break;
        case NFT_META_IIFTYPE:
-               NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_META, meta,
-                                 ingress_iftype, sizeof(__u16), reg);
+               NFT_OFFLOAD_MATCH_EXACT(FLOW_DISSECTOR_KEY_META, meta,
+                                       ingress_iftype, sizeof(__u16), reg);
                break;
        default:
                return -EOPNOTSUPP;
index dcd3c7b..47d4e0e 100644 (file)
@@ -165,6 +165,34 @@ nla_put_failure:
        return -1;
 }
 
+static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
+                                    u32 priv_len, u32 field_len)
+{
+       unsigned int remainder, delta, k;
+       struct nft_data mask = {};
+       __be32 remainder_mask;
+
+       if (priv_len == field_len) {
+               memset(&reg->mask, 0xff, priv_len);
+               return true;
+       } else if (priv_len > field_len) {
+               return false;
+       }
+
+       memset(&mask, 0xff, field_len);
+       remainder = priv_len % sizeof(u32);
+       if (remainder) {
+               k = priv_len / sizeof(u32);
+               delta = field_len - priv_len;
+               remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
+               mask.data[k] = (__force u32)remainder_mask;
+       }
+
+       memcpy(&reg->mask, &mask, field_len);
+
+       return true;
+}
+
 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
                                  struct nft_flow_rule *flow,
                                  const struct nft_payload *priv)
@@ -173,21 +201,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ethhdr, h_source):
-               if (priv->len != ETH_ALEN)
+               if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  src, ETH_ALEN, reg);
                break;
        case offsetof(struct ethhdr, h_dest):
-               if (priv->len != ETH_ALEN)
+               if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  dst, ETH_ALEN, reg);
                break;
        case offsetof(struct ethhdr, h_proto):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
@@ -195,14 +223,14 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
                break;
        case offsetof(struct vlan_ethhdr, h_vlan_TCI):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
                                  vlan_tci, sizeof(__be16), reg);
                break;
        case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
@@ -210,7 +238,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
                break;
        case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
@@ -218,7 +246,7 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
                break;
        case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
                                                        sizeof(struct vlan_hdr):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
@@ -239,21 +267,25 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct iphdr, saddr):
-               if (priv->len != sizeof(struct in_addr))
+               if (!nft_payload_offload_mask(reg, priv->len,
+                                             sizeof(struct in_addr)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
                                  sizeof(struct in_addr), reg);
+               nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
                break;
        case offsetof(struct iphdr, daddr):
-               if (priv->len != sizeof(struct in_addr))
+               if (!nft_payload_offload_mask(reg, priv->len,
+                                             sizeof(struct in_addr)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
                                  sizeof(struct in_addr), reg);
+               nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
                break;
        case offsetof(struct iphdr, protocol):
-               if (priv->len != sizeof(__u8))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
@@ -275,21 +307,25 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ipv6hdr, saddr):
-               if (priv->len != sizeof(struct in6_addr))
+               if (!nft_payload_offload_mask(reg, priv->len,
+                                             sizeof(struct in6_addr)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
                                  sizeof(struct in6_addr), reg);
+               nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
                break;
        case offsetof(struct ipv6hdr, daddr):
-               if (priv->len != sizeof(struct in6_addr))
+               if (!nft_payload_offload_mask(reg, priv->len,
+                                             sizeof(struct in6_addr)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
                                  sizeof(struct in6_addr), reg);
+               nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
                break;
        case offsetof(struct ipv6hdr, nexthdr):
-               if (priv->len != sizeof(__u8))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
@@ -331,14 +367,14 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct tcphdr, source):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct tcphdr, dest):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
@@ -359,14 +395,14 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct udphdr, source):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct udphdr, dest):
-               if (priv->len != sizeof(__be16))
+               if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
                        return -EOPNOTSUPP;
 
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
index 5829a02..c3a6648 100644 (file)
@@ -199,6 +199,9 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
        __be32 lse;
        int err;
 
+       if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
+               return -ENOMEM;
+
        stack = mpls_hdr(skb);
        lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask);
        err = skb_mpls_update_lse(skb, lse);
index 5c7456e..d1486ea 100644 (file)
@@ -105,6 +105,9 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
                break;
        case TCA_MPLS_ACT_MODIFY:
+               if (!pskb_may_pull(skb,
+                                  skb_network_offset(skb) + MPLS_HLEN))
+                       goto drop;
                new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
                if (skb_mpls_update_lse(skb, new_lse))
                        goto drop;
index 032d8fc..c4b87d2 100644 (file)
@@ -2207,6 +2207,8 @@ void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
                        else if (prop == TIPC_NLA_PROP_MTU)
                                tipc_link_set_mtu(e->link, b->mtu);
                }
+               /* Update MTU for node link entry */
+               e->mtu = tipc_link_mss(e->link);
                tipc_node_write_unlock(n);
                tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
        }
index 9232cdb..d41fffb 100644 (file)
@@ -675,7 +675,8 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        int len, i, rc = 0;
 
        if (addr_len != sizeof(struct sockaddr_x25) ||
-           addr->sx25_family != AF_X25) {
+           addr->sx25_family != AF_X25 ||
+           strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN) {
                rc = -EINVAL;
                goto out;
        }
@@ -769,7 +770,8 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
 
        rc = -EINVAL;
        if (addr_len != sizeof(struct sockaddr_x25) ||
-           addr->sx25_family != AF_X25)
+           addr->sx25_family != AF_X25 ||
+           strnlen(addr->sx25_addr.x25_addr, X25_ADDR_LEN) == X25_ADDR_LEN)
                goto out;
 
        rc = -ENETUNREACH;
index 56d052b..56a28a6 100644 (file)
@@ -66,18 +66,31 @@ static void xdp_umem_release(struct xdp_umem *umem)
        kfree(umem);
 }
 
+static void xdp_umem_release_deferred(struct work_struct *work)
+{
+       struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
+
+       xdp_umem_release(umem);
+}
+
 void xdp_get_umem(struct xdp_umem *umem)
 {
        refcount_inc(&umem->users);
 }
 
-void xdp_put_umem(struct xdp_umem *umem)
+void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup)
 {
        if (!umem)
                return;
 
-       if (refcount_dec_and_test(&umem->users))
-               xdp_umem_release(umem);
+       if (refcount_dec_and_test(&umem->users)) {
+               if (defer_cleanup) {
+                       INIT_WORK(&umem->work, xdp_umem_release_deferred);
+                       schedule_work(&umem->work);
+               } else {
+                       xdp_umem_release(umem);
+               }
+       }
 }
 
 static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
index 181fdda..aa9fe27 100644 (file)
@@ -9,7 +9,7 @@
 #include <net/xdp_sock_drv.h>
 
 void xdp_get_umem(struct xdp_umem *umem);
-void xdp_put_umem(struct xdp_umem *umem);
+void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
 
 #endif /* XDP_UMEM_H_ */
index cfbec39..b7b039b 100644 (file)
@@ -411,11 +411,7 @@ static int xsk_generic_xmit(struct sock *sk)
                skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
                skb->destructor = xsk_destruct_skb;
 
-               /* Hinder dev_direct_xmit from freeing the packet and
-                * therefore completing it in the destructor
-                */
-               refcount_inc(&skb->users);
-               err = dev_direct_xmit(skb, xs->queue_id);
+               err = __dev_direct_xmit(skb, xs->queue_id);
                if  (err == NETDEV_TX_BUSY) {
                        /* Tell user-space to retry the send */
                        skb->destructor = sock_wfree;
@@ -429,12 +425,10 @@ static int xsk_generic_xmit(struct sock *sk)
                /* Ignore NET_XMIT_CN as packet might have been sent */
                if (err == NET_XMIT_DROP) {
                        /* SKB completed but not sent */
-                       kfree_skb(skb);
                        err = -EBUSY;
                        goto out;
                }
 
-               consume_skb(skb);
                sent_frame = true;
        }
 
@@ -1147,7 +1141,7 @@ static void xsk_destruct(struct sock *sk)
                return;
 
        if (!xp_put_pool(xs->pool))
-               xdp_put_umem(xs->umem);
+               xdp_put_umem(xs->umem, !xs->pool);
 
        sk_refcnt_debug_dec(sk);
 }
index 8a3bf4e..9287edd 100644 (file)
@@ -185,8 +185,10 @@ err_unreg_xsk:
 err_unreg_pool:
        if (!force_zc)
                err = 0; /* fallback to copy mode */
-       if (err)
+       if (err) {
                xsk_clear_pool_at_qid(netdev, queue_id);
+               dev_put(netdev);
+       }
        return err;
 }
 
@@ -242,7 +244,7 @@ static void xp_release_deferred(struct work_struct *work)
                pool->cq = NULL;
        }
 
-       xdp_put_umem(pool->umem);
+       xdp_put_umem(pool->umem, false);
        xp_destroy(pool);
 }
 
index c13a5bc..5b9a099 100644 (file)
@@ -21,6 +21,7 @@ static unsigned long my_ip = (unsigned long)schedule;
 asm (
 "      .pushsection    .text, \"ax\", @progbits\n"
 "      .type           my_tramp1, @function\n"
+"      .globl          my_tramp1\n"
 "   my_tramp1:"
 "      pushq %rbp\n"
 "      movq %rsp, %rbp\n"
@@ -29,6 +30,7 @@ asm (
 "      .size           my_tramp1, .-my_tramp1\n"
 "      ret\n"
 "      .type           my_tramp2, @function\n"
+"      .globl          my_tramp2\n"
 "   my_tramp2:"
 "      pushq %rbp\n"
 "      movq %rsp, %rbp\n"
index d5c5022..3f0079c 100644 (file)
@@ -16,6 +16,7 @@ extern void my_tramp(void *);
 asm (
 "      .pushsection    .text, \"ax\", @progbits\n"
 "      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
 "   my_tramp:"
 "      pushq %rbp\n"
 "      movq %rsp, %rbp\n"
index 63ca06d..a2729d1 100644 (file)
@@ -14,6 +14,7 @@ extern void my_tramp(void *);
 asm (
 "      .pushsection    .text, \"ax\", @progbits\n"
 "      .type           my_tramp, @function\n"
+"      .globl          my_tramp\n"
 "   my_tramp:"
 "      pushq %rbp\n"
 "      movq %rsp, %rbp\n"
index 1b11f89..91a502b 100755 (executable)
@@ -45,6 +45,8 @@ create_package() {
        chmod -R go-w "$pdir"
        # in case we are in a restrictive umask environment like 0077
        chmod -R a+rX "$pdir"
+       # in case we build in a setuid/setgid directory
+       chmod -R ug-s "$pdir"
 
        # Create the package
        dpkg-gencontrol -p$pname -P"$pdir"
index bbb1748..8060cc8 100644 (file)
@@ -1364,16 +1364,20 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
                struct nid_path *path;
                hda_nid_t pin = pins[i];
 
-               path = snd_hda_get_path_from_idx(codec, path_idx[i]);
-               if (path) {
-                       badness += assign_out_path_ctls(codec, path);
-                       continue;
+               if (!spec->obey_preferred_dacs) {
+                       path = snd_hda_get_path_from_idx(codec, path_idx[i]);
+                       if (path) {
+                               badness += assign_out_path_ctls(codec, path);
+                               continue;
+                       }
                }
 
                dacs[i] = get_preferred_dac(codec, pin);
                if (dacs[i]) {
                        if (is_dac_already_used(codec, dacs[i]))
                                badness += bad->shared_primary;
+               } else if (spec->obey_preferred_dacs) {
+                       badness += BAD_NO_PRIMARY_DAC;
                }
 
                if (!dacs[i])
index a43f0bb..0886bc8 100644 (file)
@@ -237,6 +237,7 @@ struct hda_gen_spec {
        unsigned int power_down_unused:1; /* power down unused widgets */
        unsigned int dac_min_mute:1; /* minimal = mute for DACs */
        unsigned int suppress_vmaster:1; /* don't create vmaster kctls */
+       unsigned int obey_preferred_dacs:1; /* obey preferred_dacs assignment */
 
        /* other internal flags */
        unsigned int no_analog:1; /* digital I/O only */
index 739dbaf..8616c56 100644 (file)
@@ -119,6 +119,7 @@ struct alc_spec {
        unsigned int no_shutup_pins:1;
        unsigned int ultra_low_power:1;
        unsigned int has_hs_key:1;
+       unsigned int no_internal_mic_pin:1;
 
        /* for PLL fix */
        hda_nid_t pll_nid;
@@ -445,6 +446,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
                        alc_update_coef_idx(codec, 0x7, 1<<5, 0);
                break;
        case 0x10ec0892:
+       case 0x10ec0897:
                alc_update_coef_idx(codec, 0x7, 1<<5, 0);
                break;
        case 0x10ec0899:
@@ -4523,6 +4525,7 @@ static const struct coef_fw alc225_pre_hsmode[] = {
 
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
+       struct alc_spec *spec = codec->spec;
        static const struct coef_fw coef0255[] = {
                WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
                WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
@@ -4597,6 +4600,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
                {}
        };
 
+       if (spec->no_internal_mic_pin) {
+               alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+               return;
+       }
+
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
                alc_process_coef_fw(codec, coef0255);
@@ -5163,6 +5171,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                {}
        };
 
+       if (spec->no_internal_mic_pin) {
+               alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+               return;
+       }
+
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
                alc_process_coef_fw(codec, coef0255);
@@ -6014,6 +6027,21 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
        codec->power_save_node = 0;
 }
 
+/* avoid DAC 0x06 for bass speaker 0x17; it has no volume control */
+static void alc289_fixup_asus_ga401(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       static const hda_nid_t preferred_pairs[] = {
+               0x14, 0x02, 0x17, 0x02, 0x21, 0x03, 0
+       };
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.preferred_dacs = preferred_pairs;
+               spec->gen.obey_preferred_dacs = 1;
+       }
+}
+
 /* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
 static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
                              const struct hda_fixup *fix, int action)
@@ -6121,6 +6149,23 @@ static void alc274_fixup_hp_headset_mic(struct hda_codec *codec,
        }
 }
 
+static void alc_fixup_no_int_mic(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               /* Mic RING SLEEVE swap for combo jack */
+               alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+               spec->no_internal_mic_pin = true;
+               break;
+       case HDA_FIXUP_ACT_INIT:
+               alc_combo_jack_hp_jd_restart(codec);
+               break;
+       }
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -6320,6 +6365,7 @@ enum {
        ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
        ALC287_FIXUP_HP_GPIO_LED,
        ALC256_FIXUP_HP_HEADSET_MIC,
+       ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7569,11 +7615,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
        [ALC289_FIXUP_ASUS_GA401] = {
-               .type = HDA_FIXUP_PINS,
-               .v.pins = (const struct hda_pintbl[]) {
-                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
-                       { }
-               },
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc289_fixup_asus_ga401,
+               .chained = true,
+               .chain_id = ALC289_FIXUP_ASUS_GA502,
        },
        [ALC289_FIXUP_ASUS_GA502] = {
                .type = HDA_FIXUP_PINS,
@@ -7697,7 +7742,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { }
                },
                .chained = true,
-               .chain_id = ALC289_FIXUP_ASUS_GA401
+               .chain_id = ALC289_FIXUP_ASUS_GA502
        },
        [ALC274_FIXUP_HP_MIC] = {
                .type = HDA_FIXUP_VERBS,
@@ -7738,6 +7783,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc274_fixup_hp_headset_mic,
        },
+       [ALC236_FIXUP_DELL_AIO_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_no_int_mic,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7815,6 +7866,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7881,6 +7934,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
+       SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
@@ -8353,6 +8407,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x19, 0x02a11020},
                {0x1a, 0x02a11030},
                {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
+               {0x21, 0x02211010}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x103c, "HP", ALC256_FIXUP_HP_HEADSET_MIC,
                {0x14, 0x90170110},
                {0x19, 0x02a11020},
@@ -8585,6 +8641,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x13, 0x90a60140}),
+       SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_HPE,
+               {0x17, 0x90170110},
+               {0x21, 0x04211020}),
        SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
@@ -10171,6 +10230,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0888, "ALC888", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0889, "ALC889", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0892, "ALC892", patch_alc662),
+       HDA_CODEC_ENTRY(0x10ec0897, "ALC897", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0899, "ALC898", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0900, "ALC1150", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882),
index a9acce7..d987817 100644 (file)
@@ -43,6 +43,7 @@ static const struct reg_sequence patch_list[] = {
        {RT5682_DAC_ADC_DIG_VOL1, 0xa020},
        {RT5682_I2C_CTRL, 0x000f},
        {RT5682_PLL2_INTERNAL, 0x8266},
+       {RT5682_SAR_IL_CMD_3, 0x8365},
 };
 
 void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
index bcf18bf..e61d004 100644 (file)
@@ -1937,6 +1937,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                        mem = wm_adsp_find_region(dsp, type);
                        if (!mem) {
                                adsp_err(dsp, "No region of type: %x\n", type);
+                               ret = -EINVAL;
                                goto out_fw;
                        }
 
index 9dadf65..f790514 100644 (file)
@@ -520,10 +520,10 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                .driver_data = (void *)(BYT_RT5640_IN1_MAP |
                                        BYT_RT5640_MCLK_EN),
        },
-       {       /* HP Pavilion x2 10-n000nd */
+       {       /* HP Pavilion x2 10-k0XX, 10-n0XX */
                .matches = {
-                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
                },
                .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
                                        BYT_RT5640_JD_SRC_JD2_IN4N |
@@ -532,6 +532,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* HP Pavilion x2 10-p0XX */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
+               },
+               .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
+                                       BYT_RT5640_JD_SRC_JD1_IN4P |
+                                       BYT_RT5640_OVCD_TH_1500UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* HP Stream 7 */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index 9d17c87..426235a 100644 (file)
@@ -263,28 +263,6 @@ static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *dai)
-{
-       struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
-       struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
-       unsigned int id = dai->driver->id;
-       int ret;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               ret = regmap_fields_write(i2sctl->spken, id,
-                                        LPAIF_I2SCTL_SPKEN_ENABLE);
-       } else {
-               ret = regmap_fields_write(i2sctl->micen, id,
-                                        LPAIF_I2SCTL_MICEN_ENABLE);
-       }
-
-       if (ret)
-               dev_err(dai->dev, "error writing to i2sctl enable: %d\n", ret);
-
-       return ret;
-}
-
 static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
@@ -292,6 +270,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
        unsigned int id = dai->driver->id;
        int ret = -EINVAL;
+       unsigned int val = 0;
+
+       ret = regmap_read(drvdata->lpaif_map,
+                               LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), &val);
+       if (ret) {
+               dev_err(dai->dev, "error reading from i2sctl reg: %d\n", ret);
+               return ret;
+       }
+       if (val == LPAIF_I2SCTL_RESET_STATE) {
+               dev_err(dai->dev, "error in i2sctl register state\n");
+               return -ENOTRECOVERABLE;
+       }
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -308,11 +298,14 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
                        dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
                                ret);
 
-               ret = clk_enable(drvdata->mi2s_bit_clk[id]);
-               if (ret) {
-                       dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
-                       clk_disable(drvdata->mi2s_osr_clk[id]);
-                       return ret;
+               if (drvdata->bit_clk_state[id] == LPAIF_BIT_CLK_DISABLE) {
+                       ret = clk_enable(drvdata->mi2s_bit_clk[id]);
+                       if (ret) {
+                               dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
+                               clk_disable(drvdata->mi2s_osr_clk[id]);
+                               return ret;
+                       }
+                       drvdata->bit_clk_state[id] = LPAIF_BIT_CLK_ENABLE;
                }
 
                break;
@@ -329,7 +322,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
                if (ret)
                        dev_err(dai->dev, "error writing to i2sctl reg: %d\n",
                                ret);
-               clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+               if (drvdata->bit_clk_state[id] == LPAIF_BIT_CLK_ENABLE) {
+                       clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
+                       drvdata->bit_clk_state[id] = LPAIF_BIT_CLK_DISABLE;
+               }
                break;
        }
 
@@ -341,7 +337,6 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
        .startup        = lpass_cpu_daiops_startup,
        .shutdown       = lpass_cpu_daiops_shutdown,
        .hw_params      = lpass_cpu_daiops_hw_params,
-       .prepare        = lpass_cpu_daiops_prepare,
        .trigger        = lpass_cpu_daiops_trigger,
 };
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
@@ -459,16 +454,20 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
        struct lpass_variant *v = drvdata->variant;
        int i;
 
+       for (i = 0; i < v->i2s_ports; ++i)
+               if (reg == LPAIF_I2SCTL_REG(v, i))
+                       return true;
        for (i = 0; i < v->irq_ports; ++i)
                if (reg == LPAIF_IRQSTAT_REG(v, i))
                        return true;
 
        for (i = 0; i < v->rdma_channels; ++i)
-               if (reg == LPAIF_RDMACURR_REG(v, i))
+               if (reg == LPAIF_RDMACURR_REG(v, i) || reg == LPAIF_RDMACTL_REG(v, i))
                        return true;
 
        for (i = 0; i < v->wrdma_channels; ++i)
-               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
+               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start) ||
+                       reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
                        return true;
 
        return false;
@@ -861,6 +860,7 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
                                PTR_ERR(drvdata->mi2s_bit_clk[dai_id]));
                        return PTR_ERR(drvdata->mi2s_bit_clk[dai_id]);
                }
+               drvdata->bit_clk_state[dai_id] = LPAIF_BIT_CLK_DISABLE;
        }
 
        /* Allocation for i2sctl regmap fields */
index 08f3fe5..4055428 100644 (file)
 #define LPAIF_I2SCTL_BITWIDTH_24       1
 #define LPAIF_I2SCTL_BITWIDTH_32       2
 
+#define LPAIF_BIT_CLK_DISABLE          0
+#define LPAIF_BIT_CLK_ENABLE           1
+
+#define LPAIF_I2SCTL_RESET_STATE       0x003C0004
+#define LPAIF_DMACTL_RESET_STATE       0x00200000
+
+
 /* LPAIF IRQ */
 #define LPAIF_IRQ_REG_ADDR(v, addr, port) \
        (v->irq_reg_base + (addr) + v->irq_reg_stride * (port))
index 7a3fdf8..80b09de 100644 (file)
@@ -110,6 +110,7 @@ static int lpass_platform_pcmops_open(struct snd_soc_component *component,
        struct regmap *map;
        unsigned int dai_id = cpu_dai->driver->id;
 
+       component->id = dai_id;
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -451,19 +452,34 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
        unsigned int reg_irqclr = 0, val_irqclr = 0;
        unsigned int  reg_irqen = 0, val_irqen = 0, val_mask = 0;
        unsigned int dai_id = cpu_dai->driver->id;
+       unsigned int dma_ctrl_reg = 0;
 
        ch = pcm_data->dma_ch;
        if (dir ==  SNDRV_PCM_STREAM_PLAYBACK) {
                id = pcm_data->dma_ch;
-               if (dai_id == LPASS_DP_RX)
+               if (dai_id == LPASS_DP_RX) {
                        dmactl = drvdata->hdmi_rd_dmactl;
-               else
+                       map = drvdata->hdmiif_map;
+               } else {
                        dmactl = drvdata->rd_dmactl;
+                       map = drvdata->lpaif_map;
+               }
        } else {
                dmactl = drvdata->wr_dmactl;
                id = pcm_data->dma_ch - v->wrdma_channel_start;
+               map = drvdata->lpaif_map;
+       }
+       ret = regmap_read(map, LPAIF_DMACTL_REG(v, ch, dir, dai_id), &dma_ctrl_reg);
+       if (ret) {
+               dev_err(soc_runtime->dev, "error reading from rdmactl reg: %d\n", ret);
+               return ret;
        }
 
+       if (dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE ||
+               dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE + 1) {
+               dev_err(soc_runtime->dev, "error in rdmactl register state\n");
+               return -ENOTRECOVERABLE;
+       }
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
index b4830f3..bccd1a0 100644 (file)
@@ -68,6 +68,7 @@ struct lpass_data {
        unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
        unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
        int hdmi_port_enable;
+       int bit_clk_state[LPASS_MAX_MI2S_PORTS];
 
        /* low-power audio interface (LPAIF) registers */
        void __iomem *lpaif;
index 92b1a6d..bd63a9c 100644 (file)
@@ -607,7 +607,7 @@ static int snd_us16x08_eq_put(struct snd_kcontrol *kcontrol,
 static int snd_us16x08_meter_info(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_info *uinfo)
 {
-       uinfo->count = 1;
+       uinfo->count = 34;
        uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
        uinfo->value.integer.max = 0x7FFF;
        uinfo->value.integer.min = 0;
index eb92027..7362bef 100644 (file)
@@ -10,6 +10,7 @@
 #include <unistd.h>
 #include <string.h>
 #include <errno.h>
+#include <endian.h>
 
 #include <linux/kernel.h>
 #include <linux/bootconfig.h>
@@ -147,6 +148,12 @@ static int load_xbc_file(const char *path, char **buf)
        return ret;
 }
 
+static int pr_errno(const char *msg, int err)
+{
+       pr_err("%s: %d\n", msg, err);
+       return err;
+}
+
 static int load_xbc_from_initrd(int fd, char **buf)
 {
        struct stat stat;
@@ -162,26 +169,26 @@ static int load_xbc_from_initrd(int fd, char **buf)
        if (stat.st_size < 8 + BOOTCONFIG_MAGIC_LEN)
                return 0;
 
-       if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0) {
-               pr_err("Failed to lseek: %d\n", -errno);
-               return -errno;
-       }
+       if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0)
+               return pr_errno("Failed to lseek for magic", -errno);
+
        if (read(fd, magic, BOOTCONFIG_MAGIC_LEN) < 0)
-               return -errno;
+               return pr_errno("Failed to read", -errno);
+
        /* Check the bootconfig magic bytes */
        if (memcmp(magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN) != 0)
                return 0;
 
-       if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0) {
-               pr_err("Failed to lseek: %d\n", -errno);
-               return -errno;
-       }
+       if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0)
+               return pr_errno("Failed to lseek for size", -errno);
 
        if (read(fd, &size, sizeof(u32)) < 0)
-               return -errno;
+               return pr_errno("Failed to read size", -errno);
+       size = le32toh(size);
 
        if (read(fd, &csum, sizeof(u32)) < 0)
-               return -errno;
+               return pr_errno("Failed to read checksum", -errno);
+       csum = le32toh(csum);
 
        /* Wrong size error  */
        if (stat.st_size < size + 8 + BOOTCONFIG_MAGIC_LEN) {
@@ -190,10 +197,8 @@ static int load_xbc_from_initrd(int fd, char **buf)
        }
 
        if (lseek(fd, stat.st_size - (size + 8 + BOOTCONFIG_MAGIC_LEN),
-                 SEEK_SET) < 0) {
-               pr_err("Failed to lseek: %d\n", -errno);
-               return -errno;
-       }
+                 SEEK_SET) < 0)
+               return pr_errno("Failed to lseek", -errno);
 
        ret = load_xbc_fd(fd, buf, size);
        if (ret < 0)
@@ -262,14 +267,16 @@ static int show_xbc(const char *path, bool list)
 
        ret = stat(path, &st);
        if (ret < 0) {
-               pr_err("Failed to stat %s: %d\n", path, -errno);
-               return -errno;
+               ret = -errno;
+               pr_err("Failed to stat %s: %d\n", path, ret);
+               return ret;
        }
 
        fd = open(path, O_RDONLY);
        if (fd < 0) {
-               pr_err("Failed to open initrd %s: %d\n", path, fd);
-               return -errno;
+               ret = -errno;
+               pr_err("Failed to open initrd %s: %d\n", path, ret);
+               return ret;
        }
 
        ret = load_xbc_from_initrd(fd, &buf);
@@ -307,8 +314,9 @@ static int delete_xbc(const char *path)
 
        fd = open(path, O_RDWR);
        if (fd < 0) {
-               pr_err("Failed to open initrd %s: %d\n", path, fd);
-               return -errno;
+               ret = -errno;
+               pr_err("Failed to open initrd %s: %d\n", path, ret);
+               return ret;
        }
 
        size = load_xbc_from_initrd(fd, &buf);
@@ -332,11 +340,13 @@ static int delete_xbc(const char *path)
 
 static int apply_xbc(const char *path, const char *xbc_path)
 {
+       char *buf, *data, *p;
+       size_t total_size;
+       struct stat stat;
+       const char *msg;
        u32 size, csum;
-       char *buf, *data;
+       int pos, pad;
        int ret, fd;
-       const char *msg;
-       int pos;
 
        ret = load_xbc_file(xbc_path, &buf);
        if (ret < 0) {
@@ -346,13 +356,12 @@ static int apply_xbc(const char *path, const char *xbc_path)
        size = strlen(buf) + 1;
        csum = checksum((unsigned char *)buf, size);
 
-       /* Prepare xbc_path data */
-       data = malloc(size + 8);
+       /* Backup the bootconfig data */
+       data = calloc(size + BOOTCONFIG_ALIGN +
+                     sizeof(u32) + sizeof(u32) + BOOTCONFIG_MAGIC_LEN, 1);
        if (!data)
                return -ENOMEM;
-       strcpy(data, buf);
-       *(u32 *)(data + size) = size;
-       *(u32 *)(data + size + 4) = csum;
+       memcpy(data, buf, size);
 
        /* Check the data format */
        ret = xbc_init(buf, &msg, &pos);
@@ -383,28 +392,61 @@ static int apply_xbc(const char *path, const char *xbc_path)
        /* Apply new one */
        fd = open(path, O_RDWR | O_APPEND);
        if (fd < 0) {
-               pr_err("Failed to open %s: %d\n", path, fd);
+               ret = -errno;
+               pr_err("Failed to open %s: %d\n", path, ret);
                free(data);
-               return fd;
+               return ret;
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
-       ret = write(fd, data, size + 8);
-       if (ret < 0) {
-               pr_err("Failed to apply a boot config: %d\n", ret);
+       if (fstat(fd, &stat) < 0) {
+               pr_err("Failed to get the size of %s\n", path);
                goto out;
        }
-       /* Write a magic word of the bootconfig */
-       ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
-       if (ret < 0) {
-               pr_err("Failed to apply a boot config magic: %d\n", ret);
-               goto out;
-       }
-       ret = 0;
+
+       /* To align up the total size to BOOTCONFIG_ALIGN, get padding size */
+       total_size = stat.st_size + size + sizeof(u32) * 2 + BOOTCONFIG_MAGIC_LEN;
+       pad = ((total_size + BOOTCONFIG_ALIGN - 1) & (~BOOTCONFIG_ALIGN_MASK)) - total_size;
+       size += pad;
+
+       /* Add a footer */
+       p = data + size;
+       *(u32 *)p = htole32(size);
+       p += sizeof(u32);
+
+       *(u32 *)p = htole32(csum);
+       p += sizeof(u32);
+
+       memcpy(p, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+       p += BOOTCONFIG_MAGIC_LEN;
+
+       total_size = p - data;
+
+       ret = write(fd, data, total_size);
+       if (ret < total_size) {
+               if (ret < 0)
+                       ret = -errno;
+               pr_err("Failed to apply a boot config: %d\n", ret);
+               if (ret >= 0)
+                       goto out_rollback;
+       } else
+               ret = 0;
+
 out:
        close(fd);
        free(data);
 
        return ret;
+
+out_rollback:
+       /* Map the partial write to -ENOSPC */
+       if (ret >= 0)
+               ret = -ENOSPC;
+       if (ftruncate(fd, stat.st_size) < 0) {
+               ret = -errno;
+               pr_err("Failed to rollback the write error: %d\n", ret);
+               pr_err("The initrd %s may be corrupted. Recommend to rebuild.\n", path);
+       }
+       goto out;
 }
 
 static int usage(void)
index d295e40..baed891 100755 (executable)
@@ -9,6 +9,7 @@ else
   TESTDIR=.
 fi
 BOOTCONF=${TESTDIR}/bootconfig
+ALIGN=4
 
 INITRD=`mktemp ${TESTDIR}/initrd-XXXX`
 TEMPCONF=`mktemp ${TESTDIR}/temp-XXXX.bconf`
@@ -59,7 +60,10 @@ echo "Show command test"
 xpass $BOOTCONF $INITRD
 
 echo "File size check"
-xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9 + 12)
+total_size=$(expr $bconf_size + $initrd_size + 9 + 12 + $ALIGN - 1 )
+total_size=$(expr $total_size / $ALIGN)
+total_size=$(expr $total_size \* $ALIGN)
+xpass test $new_size -eq $total_size
 
 echo "Apply command repeat test"
 xpass $BOOTCONF -a $TEMPCONF $INITRD
index ed5e971..c873a79 100644 (file)
@@ -696,6 +696,7 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
                obj_node = calloc(1, sizeof(*obj_node));
                if (!obj_node) {
                        p_err("failed to allocate memory: %s", strerror(errno));
+                       err = -ENOMEM;
                        goto err_free;
                }
 
index 584e2e1..cefc715 100644 (file)
@@ -1222,8 +1222,10 @@ static int __cmd_diff(void)
                if (compute == COMPUTE_STREAM) {
                        d->evlist_streams = evlist__create_streams(
                                                d->session->evlist, 5);
-                       if (!d->evlist_streams)
+                       if (!d->evlist_streams) {
+                               ret = -ENOMEM;
                                goto out_delete;
+                       }
                }
        }
 
index aa89801..7b2d471 100644 (file)
@@ -356,9 +356,25 @@ bool die_is_signed_type(Dwarf_Die *tp_die)
 bool die_is_func_def(Dwarf_Die *dw_die)
 {
        Dwarf_Attribute attr;
+       Dwarf_Addr addr = 0;
+
+       if (dwarf_tag(dw_die) != DW_TAG_subprogram)
+               return false;
+
+       if (dwarf_attr(dw_die, DW_AT_declaration, &attr))
+               return false;
 
-       return (dwarf_tag(dw_die) == DW_TAG_subprogram &&
-               dwarf_attr(dw_die, DW_AT_declaration, &attr) == NULL);
+       /*
+        * DW_AT_declaration can be lost from function declaration
+        * by gcc's bug #97060.
+        * So we need to check this subprogram DIE has DW_AT_inline
+        * or an entry address.
+        */
+       if (!dwarf_attr(dw_die, DW_AT_inline, &attr) &&
+           die_entrypc(dw_die, &addr) < 0)
+               return false;
+
+       return true;
 }
 
 /**
@@ -373,6 +389,7 @@ bool die_is_func_def(Dwarf_Die *dw_die)
 int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
 {
        Dwarf_Addr base, end;
+       Dwarf_Attribute attr;
 
        if (!addr)
                return -EINVAL;
@@ -380,6 +397,13 @@ int die_entrypc(Dwarf_Die *dw_die, Dwarf_Addr *addr)
        if (dwarf_entrypc(dw_die, addr) == 0)
                return 0;
 
+       /*
+        *  Since the dwarf_ranges() will return 0 if there is no
+        * DW_AT_ranges attribute, we should check it first.
+        */
+       if (!dwarf_attr(dw_die, DW_AT_ranges, &attr))
+               return -ENOENT;
+
        return dwarf_ranges(dw_die, 0, &base, addr, &end) < 0 ? -ENOENT : 0;
 }
 
index d9b385f..10a4c4c 100644 (file)
@@ -15,6 +15,9 @@
 static inline size_t hash_bits(size_t h, int bits)
 {
        /* shuffle bits and return requested number of upper bits */
+       if (bits == 0)
+               return 0;
+
 #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
        /* LP64 case */
        return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
@@ -174,17 +177,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
  * @key: key to iterate entries for
  */
 #define hashmap__for_each_key_entry(map, cur, _key)                        \
-       for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
-                                            map->cap_bits);                \
-                    map->buckets ? map->buckets[bkt] : NULL; });           \
+       for (cur = map->buckets                                             \
+                    ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+                    : NULL;                                                \
             cur;                                                           \
             cur = cur->next)                                               \
                if (map->equal_fn(cur->key, (_key), map->ctx))
 
 #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key)              \
-       for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
-                                            map->cap_bits);                \
-                    cur = map->buckets ? map->buckets[bkt] : NULL; });     \
+       for (cur = map->buckets                                             \
+                    ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+                    : NULL;                                                \
             cur && ({ tmp = cur->next; true; });                           \
             cur = tmp)                                                     \
                if (map->equal_fn(cur->key, (_key), map->ctx))
index 2c40610..76dd349 100644 (file)
@@ -1885,8 +1885,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
        if (lr->file && strtailcmp(lr->file, dwarf_decl_file(sp_die)))
                return DWARF_CB_OK;
 
-       if (die_is_func_def(sp_die) &&
-           die_match_name(sp_die, lr->function)) {
+       if (die_match_name(sp_die, lr->function) && die_is_func_def(sp_die)) {
                lf->fname = dwarf_decl_file(sp_die);
                dwarf_decl_line(sp_die, &lr->offset);
                pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
index 4b57c0c..a963b5b 100644 (file)
@@ -324,13 +324,10 @@ static int first_shadow_cpu(struct perf_stat_config *config,
        struct evlist *evlist = evsel->evlist;
        int i;
 
-       if (!config->aggr_get_id)
-               return 0;
-
        if (config->aggr_mode == AGGR_NONE)
                return id;
 
-       if (config->aggr_mode == AGGR_GLOBAL)
+       if (!config->aggr_get_id)
                return 0;
 
        for (i = 0; i < evsel__nr_cpus(evsel); i++) {
index 8a23391..d9c6243 100644 (file)
@@ -563,6 +563,9 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool,
        char cgrp_root[PATH_MAX];
        size_t mount_len;  /* length of mount point in the path */
 
+       if (!tool || !tool->cgroup_events)
+               return 0;
+
        if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
                pr_debug("cannot find cgroup mount point\n");
                return -1;
index c33a7aa..b71828d 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_NET_IFE_SKBPRIO=m
 CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
 CONFIG_NET_SCH_ETS=m
+CONFIG_NET_SCH_RED=m
 
 #
 ## Network testing