Merge tag 'gpio-v4.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Oct 2018 10:56:25 +0000 (12:56 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Oct 2018 10:56:25 +0000 (12:56 +0200)
Linus writes:
  "GPIO fix for the v4.19 series:
   - Fix up the interrupt parent for the irqdomains."

* tag 'gpio-v4.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio:
  gpio: Assign gpio_irq_chip::parents to non-stack pointer

117 files changed:
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/kernel/process.c
arch/arm/kernel/vmlinux.lds.h
arch/mips/include/asm/processor.h
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/vdso.c
arch/mips/lib/memset.S
arch/s390/include/asm/sclp.h
arch/s390/kernel/early_printk.c
arch/s390/kernel/swsusp.S
arch/sparc/kernel/auxio_64.c
arch/sparc/kernel/kgdb_32.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/power.c
arch/sparc/kernel/prom_32.c
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/viohs.c
arch/sparc/vdso/Makefile
arch/x86/kernel/cpu/intel_rdt.h
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/mm/pgtable.c
drivers/bluetooth/hci_qca.c
drivers/crypto/inside-secure/safexcel.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/hwmon/npcm750-pwm-fan.c
drivers/infiniband/hw/mlx5/mr.c
drivers/input/evdev.c
drivers/input/joystick/xpad.c
drivers/input/misc/uinput.c
drivers/input/mousedev.c
drivers/input/serio/i8042.c
drivers/md/dm-cache-target.c
drivers/md/dm-flakey.c
drivers/md/dm-integrity.c
drivers/md/dm-linear.c
drivers/md/dm.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/phy/sfp.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/of/unittest.c
drivers/pci/controller/pcie-cadence.c
drivers/pinctrl/pinctrl-mcp23s08.c
drivers/platform/chrome/cros_ec_proto.c
drivers/s390/char/sclp_early_core.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/s390/cio/vfio_ccw_private.h
drivers/sbus/char/openprom.c
drivers/sbus/char/oradax.c
drivers/tty/serial/qcom_geni_serial.c
drivers/video/fbdev/aty/atyfb.h
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/mach64_ct.c
fs/gfs2/bmap.c
fs/xfs/xfs_reflink.c
include/asm-generic/vmlinux.lds.h
include/linux/cgroup-defs.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/suspend.h
include/net/devlink.h
include/net/ip_fib.h
include/trace/events/rxrpc.h
include/uapi/linux/smc_diag.h
include/uapi/linux/udp.h
kernel/cgroup/cgroup.c
kernel/power/suspend.c
lib/Makefile
lib/bch.c
lib/vsprintf.c
mm/page_alloc.c
mm/percpu.c
net/core/dev.c
net/core/devlink.c
net/core/skbuff.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/ip6_fib.c
net/rds/send.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/sched/cls_u32.c
net/sched/sch_cake.c
net/tipc/link.c
net/tipc/socket.c
samples/Kconfig
scripts/Makefile.build
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/util/machine.c
tools/perf/util/setup.py
tools/testing/selftests/net/rtnetlink.sh
tools/testing/selftests/net/udpgso_bench.sh

index 48a65c3..40082e4 100644 (file)
@@ -9657,7 +9657,8 @@ MIPS/LOONGSON2 ARCHITECTURE
 M:     Jiaxun Yang <jiaxun.yang@flygoat.com>
 L:     linux-mips@linux-mips.org
 S:     Maintained
-F:     arch/mips/loongson64/*{2e/2f}*
+F:     arch/mips/loongson64/fuloong-2e/
+F:     arch/mips/loongson64/lemote-2f/
 F:     arch/mips/include/asm/mach-loongson64/
 F:     drivers/*/*loongson2*
 F:     drivers/*/*/*loongson2*
index 9b2df07..e8b599b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -483,13 +483,15 @@ endif
 ifeq ($(cc-name),clang)
 ifneq ($(CROSS_COMPILE),)
 CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN  := $(realpath $(dir $(shell which $(LD)))/..)
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX   := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN  := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
 endif
 ifneq ($(GCC_TOOLCHAIN),)
 CLANG_GCC_TC   := --gcc-toolchain=$(GCC_TOOLCHAIN)
 endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
 KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
 KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
 endif
index b4441b0..a045f30 100644 (file)
@@ -149,7 +149,7 @@ config ARC_CPU_770
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
          -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
-                   Shared Address Spaces (for sharing TLB entires in MMU)
+                   Shared Address Spaces (for sharing TLB entries in MMU)
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
index 99cce77..644815c 100644 (file)
@@ -6,33 +6,11 @@
 # published by the Free Software Foundation.
 #
 
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
-    $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
-    $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
@@ -79,7 +57,7 @@ cflags-$(disable_small_data)          += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
 KBUILD_CFLAGS_MODULE   += -mlong-calls -mno-millicode
index 4674541..8ce6e72 100644 (file)
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
                task_thread_info(current)->thr_ptr;
        }
 
+
+       /*
+        * setup usermode thread pointer #1:
+        * when child is picked by scheduler, __switch_to() uses @c_callee to
+        * populate usermode callee regs: this works (despite being in a kernel
+        * function) since special return path for child @ret_from_fork()
+        * ensures those regs are not clobbered all the way to RTIE to usermode
+        */
+       c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /*
+        * setup usermode thread pointer #2:
+        * however for this special use of r25 in kernel, __switch_to() sets
+        * r25 for kernel needs and only in the final return path is usermode
+        * r25 setup, from pt_regs->user_r25. So set that up as well
+        */
+       c_regs->user_r25 = c_callee->r25;
+#endif
+
        return 0;
 }
 
index ae5fdff..8247bc1 100644 (file)
@@ -49,6 +49,8 @@
 #define ARM_DISCARD                                                    \
                *(.ARM.exidx.exit.text)                                 \
                *(.ARM.extab.exit.text)                                 \
+               *(.ARM.exidx.text.exit)                                 \
+               *(.ARM.extab.text.exit)                                 \
                ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))             \
                ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))             \
                ARM_EXIT_DISCARD(EXIT_TEXT)                             \
index b2fa629..49d6046 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
+#include <linux/sizes.h>
 #include <linux/threads.h>
 
 #include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
 
 #endif
 
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP      ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE    (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP              mips_stack_top()
 
 /*
  * This decides where the kernel will search for a free chunk of vm
index 8fc6989..d4f7fd4 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/nmi.h>
 #include <linux/cpu.h>
 
+#include <asm/abi.h>
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
@@ -39,6 +40,7 @@
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/irq.h>
+#include <asm/mips-cps.h>
 #include <asm/msa.h>
 #include <asm/pgtable.h>
 #include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
        return pc;
 }
 
+unsigned long mips_stack_top(void)
+{
+       unsigned long top = TASK_SIZE & PAGE_MASK;
+
+       /* One page for branch delay slot "emulation" */
+       top -= PAGE_SIZE;
+
+       /* Space for the VDSO, data page & GIC user page */
+       top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+       top -= PAGE_SIZE;
+       top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+       /* Space for cache colour alignment */
+       if (cpu_has_dc_aliases)
+               top -= shm_align_mask + 1;
+
+       /* Space to randomize the VDSO base */
+       if (current->flags & PF_RANDOMIZE)
+               top -= VDSO_RANDOMIZE_SIZE;
+
+       return top;
+}
+
 /*
  * Don't forget that the stack pointer must be aligned on a 8 bytes
  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
index c71d1eb..8aaaa42 100644 (file)
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
        struct memblock_region *reg;
        extern void plat_mem_setup(void);
 
+       /*
+        * Initialize boot_command_line to an innocuous but non-empty string in
+        * order to prevent early_init_dt_scan_chosen() from copying
+        * CONFIG_CMDLINE into it without our knowledge. We handle
+        * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+        * content because repeating arguments can be problematic.
+        */
+       strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+       /* call board setup routine */
+       plat_mem_setup();
+
+       /*
+        * Make sure all kernel memory is in the maps.  The "UP" and
+        * "DOWN" are opposite for initdata since if it crosses over
+        * into another memory section you don't want that to be
+        * freed when the initdata is freed.
+        */
+       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+                        BOOT_MEM_RAM);
+       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+                        BOOT_MEM_INIT_RAM);
+
+       pr_info("Determined physical RAM map:\n");
+       print_memory_map();
+
 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 #else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
        }
 #endif
 #endif
-
-       /* call board setup routine */
-       plat_mem_setup();
-
-       /*
-        * Make sure all kernel memory is in the maps.  The "UP" and
-        * "DOWN" are opposite for initdata since if it crosses over
-        * into another memory section you don't want that to be
-        * freed when the initdata is freed.
-        */
-       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
-                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
-                        BOOT_MEM_RAM);
-       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
-                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
-                        BOOT_MEM_INIT_RAM);
-
-       pr_info("Determined physical RAM map:\n");
-       print_memory_map();
-
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 
        *cmdline_p = command_line;
index 8f845f6..48a9c6b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/ioport.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/random.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/timekeeper_internal.h>
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
        }
 }
 
+static unsigned long vdso_base(void)
+{
+       unsigned long base;
+
+       /* Skip the delay slot emulation page */
+       base = STACK_TOP + PAGE_SIZE;
+
+       if (current->flags & PF_RANDOMIZE) {
+               base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+               base = PAGE_ALIGN(base);
+       }
+
+       return base;
+}
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        if (cpu_has_dc_aliases)
                size += shm_align_mask + 1;
 
-       base = get_unmapped_area(NULL, 0, size, 0, 0);
+       base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
index 3a6f34e..069acec 100644 (file)
         * unset_bytes = end_addr - current_addr + 1
         *      a2     =    t1    -      a0      + 1
         */
+       .set            reorder
        PTR_SUBU        a2, t1, a0
+       PTR_ADDIU       a2, 1
        jr              ra
-        PTR_ADDIU      a2, 1
+       .set            noreorder
 
        .endm
 
index 3cae916..e44a8d7 100644 (file)
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
 void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
 void sclp_early_detect(void);
 void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
 
 int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
index 9431784..40c1dfe 100644 (file)
@@ -10,7 +10,7 @@
 
 static void sclp_early_write(struct console *con, const char *s, unsigned int len)
 {
-       __sclp_early_printk(s, len);
+       __sclp_early_printk(s, len, 0);
 }
 
 static struct console sclp_early_console = {
index a049a7b..c1a080b 100644 (file)
@@ -198,12 +198,10 @@ pgm_check_entry:
 
        /* Suspend CPU not available -> panic */
        larl    %r15,init_thread_union
-       ahi     %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r2,.Lpanic_string
-       lghi    %r1,0
-       sam31
-       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
-       brasl   %r14,sclp_early_printk
+       brasl   %r14,sclp_early_printk_force
        larl    %r3,.Ldisabled_wait_31
        lpsw    0(%r3)
 4:
index 4e8f56c..cc42225 100644 (file)
@@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev)
                auxio_devtype = AUXIO_TYPE_SBUS;
                size = 1;
        } else {
-               printk("auxio: Unknown parent bus type [%s]\n",
-                      dp->parent->name);
+               printk("auxio: Unknown parent bus type [%pOFn]\n",
+                      dp->parent);
                return -ENODEV;
        }
        auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
index 5868fc3..639c8e5 100644 (file)
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->pc = addr;
                        linux_regs->npc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index d5f7dc6..a68bbdd 100644 (file)
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->tpc = addr;
                        linux_regs->tnpc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index 92627ab..d941875 100644 (file)
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op)
 
        power_reg = of_ioremap(res, 0, 0x4, "power");
 
-       printk(KERN_INFO "%s: Control reg at %llx\n",
-              op->dev.of_node->name, res->start);
+       printk(KERN_INFO "%pOFn: Control reg at %llx\n",
+              op->dev.of_node, res->start);
 
        if (has_button_interrupt(irq, op->dev.of_node)) {
                if (request_irq(irq,
index b51cbb9..17c87d2 100644 (file)
@@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = rprop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io, regs->phys_addr);
 }
 
@@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = prop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io,
                regs->phys_addr);
 }
@@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
        devfn = (regs->phys_hi >> 8) & 0xff;
        if (devfn & 0x07) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        devfn >> 3,
                        devfn & 0x07);
        } else {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp,
                        devfn >> 3);
        }
 }
@@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io, regs->phys_addr);
 }
 
@@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
                return;
        device = prop->value;
 
-       sprintf(tmp_buf, "%s:%d:%d@%x,%x",
-               dp->name, *vendor, *device,
+       sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x",
+               dp, *vendor, *device,
                *intr, reg0);
 }
 
@@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp)
        tmp_buf[0] = '\0';
        __build_path_component(dp, tmp_buf);
        if (tmp_buf[0] == '\0')
-               strcpy(tmp_buf, dp->name);
+               snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
 
        n = prom_early_alloc(strlen(tmp_buf) + 1);
        strcpy(n, tmp_buf);
index baeaeed..6220411 100644 (file)
@@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = rprop->value;
        if (!of_node_is_root(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        (unsigned int) (regs->phys_addr >> 32UL),
                        (unsigned int) (regs->phys_addr & 0xffffffffUL));
                return;
@@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
                const char *prefix = (type == 0) ? "m" : "i";
 
                if (low_bits)
-                       sprintf(tmp_buf, "%s@%s%x,%x",
-                               dp->name, prefix,
+                       sprintf(tmp_buf, "%pOFn@%s%x,%x",
+                               dp, prefix,
                                high_bits, low_bits);
                else
-                       sprintf(tmp_buf, "%s@%s%x",
-                               dp->name,
+                       sprintf(tmp_buf, "%pOFn@%s%x",
+                               dp,
                                prefix,
                                high_bits);
        } else if (type == 12) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, high_bits);
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp, high_bits);
        }
 }
 
@@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
        if (!of_node_is_root(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        (unsigned int) (regs->phys_addr >> 32UL),
                        (unsigned int) (regs->phys_addr & 0xffffffffUL));
                return;
@@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
                if (tlb_type >= cheetah)
                        mask = 0x7fffff;
 
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        *(u32 *)prop->value,
                        (unsigned int) (regs->phys_addr & mask));
        }
@@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = prop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io,
                regs->phys_addr);
 }
@@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
        devfn = (regs->phys_hi >> 8) & 0xff;
        if (devfn & 0x07) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        devfn >> 3,
                        devfn & 0x07);
        } else {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp,
                        devfn >> 3);
        }
 }
@@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
        if (!prop)
                return;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                *(u32 *) prop->value,
                (unsigned int) (regs->phys_addr & 0xffffffffUL));
 }
@@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+       sprintf(tmp_buf, "%pOFn@%x", dp, *regs);
 }
 
 /* "name@addrhi,addrlo" */
@@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                (unsigned int) (regs->phys_addr >> 32UL),
                (unsigned int) (regs->phys_addr & 0xffffffffUL));
 }
@@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
        /* This actually isn't right... should look at the #address-cells
         * property of the i2c bus node etc. etc.
         */
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name, regs[0], regs[1]);
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp, regs[0], regs[1]);
 }
 
 /* "name@reg0[,reg1]" */
@@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
 
        if (prop->length == sizeof(u32) || regs[1] == 1) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, regs[0]);
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp, regs[0]);
        } else {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name, regs[0], regs[1]);
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp, regs[0], regs[1]);
        }
 }
 
@@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
        regs = prop->value;
 
        if (regs[2] || regs[3]) {
-               sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
-                       dp->name, regs[0], regs[1], regs[2], regs[3]);
+               sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x",
+                       dp, regs[0], regs[1], regs[2], regs[3]);
        } else {
-               sprintf(tmp_buf, "%s@%08x%08x",
-                       dp->name, regs[0], regs[1]);
+               sprintf(tmp_buf, "%pOFn@%08x%08x",
+                       dp, regs[0], regs[1]);
        }
 }
 
@@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp)
        tmp_buf[0] = '\0';
        __build_path_component(dp, tmp_buf);
        if (tmp_buf[0] == '\0')
-               strcpy(tmp_buf, dp->name);
+               snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
 
        n = prom_early_alloc(strlen(tmp_buf) + 1);
        strcpy(n, tmp_buf);
index 635d67f..7db5aab 100644 (file)
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
                struct vio_dring_register pkt;
                char all[sizeof(struct vio_dring_register) +
                         (sizeof(struct ldc_trans_cookie) *
-                         dr->ncookies)];
+                         VIO_MAX_RING_COOKIES)];
        } u;
+       size_t bytes = sizeof(struct vio_dring_register) +
+                      (sizeof(struct ldc_trans_cookie) *
+                       dr->ncookies);
        int i;
 
-       memset(&u, 0, sizeof(u));
+       if (WARN_ON(bytes > sizeof(u)))
+               return -EINVAL;
+
+       memset(&u, 0, bytes);
        init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
        u.pkt.dring_ident = 0;
        u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
                       (unsigned long long) u.pkt.cookies[i].cookie_size);
        }
 
-       return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+       return send_ctrl(vio, &u.pkt.tag, bytes);
 }
 
 static int send_rdx(struct vio_driver_state *vio)
index dd0b5a9..dc85570 100644 (file)
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
 targets += $(vdso_img_cfiles)
 targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
                        -Wl,--no-undefined \
                        -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
                        $(DISABLE_LTO)
 
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
 
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 hostprogs-y                    += vdso2c
 
 quiet_cmd_vdso2c = VDSO2C  $@
-define cmd_vdso2c
-       $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+      cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
 $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
index 285eb3e..3736f6d 100644 (file)
@@ -529,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
 int rdtgroup_schemata_show(struct kernfs_open_file *of,
                           struct seq_file *s, void *v);
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive);
+                          unsigned long cbm, int closid, bool exclusive);
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
-                                 u32 cbm);
+                                 unsigned long cbm);
 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
 int rdtgroup_tasks_assigned(struct rdtgroup *r);
 int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
 int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
 bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
 int rdt_pseudo_lock_init(void);
 void rdt_pseudo_lock_release(void);
index 40f3903..f8c260d 100644 (file)
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
 /**
  * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
  * @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
  *
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
  * pseudo-locked region on @d.
  *
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
  * otherwise.
  */
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *cbm_b;
        unsigned int cbm_len;
+       unsigned long cbm_b;
 
        if (d->plr) {
                cbm_len = d->plr->r->cache.cbm_len;
-               cbm_b = (unsigned long *)&d->plr->cbm;
-               if (bitmap_intersects(cbm, cbm_b, cbm_len))
+               cbm_b = d->plr->cbm;
+               if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
                        return true;
        }
        return false;
index 1b8e86a..b140c68 100644 (file)
@@ -975,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
  * is false then overlaps with any resource group or hardware entities
  * will be considered.
  *
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
  * Return: false if CBM does not overlap, true if it does.
  */
 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
-                          u32 _cbm, int closid, bool exclusive)
+                          unsigned long cbm, int closid, bool exclusive)
 {
-       unsigned long *cbm = (unsigned long *)&_cbm;
-       unsigned long *ctrl_b;
        enum rdtgrp_mode mode;
+       unsigned long ctrl_b;
        u32 *ctrl;
        int i;
 
        /* Check for any overlap with regions used by hardware directly */
        if (!exclusive) {
-               if (bitmap_intersects(cbm,
-                                     (unsigned long *)&r->cache.shareable_bits,
-                                     r->cache.cbm_len))
+               ctrl_b = r->cache.shareable_bits;
+               if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
                        return true;
        }
 
        /* Check for overlap with other resource groups */
        ctrl = d->ctrl_val;
        for (i = 0; i < closids_supported(); i++, ctrl++) {
-               ctrl_b = (unsigned long *)ctrl;
+               ctrl_b = *ctrl;
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
                    mode != RDT_MODE_PSEUDO_LOCKSETUP) {
-                       if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+                       if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
                                if (exclusive) {
                                        if (mode == RDT_MODE_EXCLUSIVE)
                                                return true;
@@ -1138,15 +1139,18 @@ out:
  * computed by first dividing the total cache size by the CBM length to
  * determine how many bytes each bit in the bitmask represents. The result
  * is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
  */
 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
-                                 struct rdt_domain *d, u32 cbm)
+                                 struct rdt_domain *d, unsigned long cbm)
 {
        struct cpu_cacheinfo *ci;
        unsigned int size = 0;
        int num_b, i;
 
-       num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+       num_b = bitmap_weight(&cbm, r->cache.cbm_len);
        ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
        for (i = 0; i < ci->num_leaves; i++) {
                if (ci->info_list[i].level == r->cache_level) {
@@ -2353,6 +2357,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
        u32 used_b = 0, unused_b = 0;
        u32 closid = rdtgrp->closid;
        struct rdt_resource *r;
+       unsigned long tmp_cbm;
        enum rdtgrp_mode mode;
        struct rdt_domain *d;
        int i, ret;
@@ -2390,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
                         * modify the CBM based on system availability.
                         */
                        cbm_ensure_valid(&d->new_ctrl, r);
-                       if (bitmap_weight((unsigned long *) &d->new_ctrl,
-                                         r->cache.cbm_len) <
-                                       r->cache.min_cbm_bits) {
+                       /*
+                        * Assign the u32 CBM to an unsigned long to ensure
+                        * that bitmap_weight() does not access out-of-bound
+                        * memory.
+                        */
+                       tmp_cbm = d->new_ctrl;
+                       if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+                           r->cache.min_cbm_bits) {
                                rdt_last_cmd_printf("no space on %s:%d\n",
                                                    r->name, d->id);
                                return -ENOSPC;
index 089e78c..59274e2 100644 (file)
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
 
 #define UNSHARED_PTRS_PER_PGD                          \
        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#define MAX_UNSHARED_PTRS_PER_PGD                      \
+       max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
 
 
 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
  * and initialize the kernel pmds here.
  */
 #define PREALLOCATED_PMDS      UNSHARED_PTRS_PER_PGD
+#define MAX_PREALLOCATED_PMDS  MAX_UNSHARED_PTRS_PER_PGD
 
 /*
  * We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
  */
 #define PREALLOCATED_USER_PMDS  (static_cpu_has(X86_FEATURE_PTI) ? \
                                        KERNEL_PGD_PTRS : 0)
+#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
 
 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 {
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 
 /* No need to prepopulate any pagetable entries in non-PAE modes. */
 #define PREALLOCATED_PMDS      0
+#define MAX_PREALLOCATED_PMDS  0
 #define PREALLOCATED_USER_PMDS  0
+#define MAX_PREALLOCATED_USER_PMDS 0
 #endif /* CONFIG_X86_PAE */
 
 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
-       pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
-       pmd_t *pmds[PREALLOCATED_PMDS];
+       pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
+       pmd_t *pmds[MAX_PREALLOCATED_PMDS];
 
        pgd = _pgd_alloc();
 
index e182f60..2fee658 100644 (file)
@@ -1322,7 +1322,7 @@ static int qca_init_regulators(struct qca_power *qca,
 {
        int i;
 
-       qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+       qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
                                      sizeof(struct regulator_bulk_data),
                                      GFP_KERNEL);
        if (!qca->vreg_bulk)
index 7e71043..86c699c 100644 (file)
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
 
        safexcel_configure(priv);
 
-       priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+       priv->ring = devm_kcalloc(dev, priv->config.rings,
+                                 sizeof(*priv->ring),
                                  GFP_KERNEL);
        if (!priv->ring) {
                ret = -ENOMEM;
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
                if (ret)
                        goto err_reg_clk;
 
-               priv->ring[i].rdr_req = devm_kzalloc(dev,
-                       sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+               priv->ring[i].rdr_req = devm_kcalloc(dev,
+                       EIP197_DEFAULT_RING_SIZE,
+                       sizeof(priv->ring[i].rdr_req),
                        GFP_KERNEL);
                if (!priv->ring[i].rdr_req) {
                        ret = -ENOMEM;
index 0b976df..92ecb9b 100644 (file)
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        }
 
        mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
-       mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+       mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
                                        sizeof(struct drm_plane),
                                        GFP_KERNEL);
 
index 790d39f..b557687 100644 (file)
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
                return 0;
        }
 
-       mp->clk_config = devm_kzalloc(&pdev->dev,
-                                     sizeof(struct dss_clk) * num_clk,
+       mp->clk_config = devm_kcalloc(&pdev->dev,
+                                     num_clk, sizeof(struct dss_clk),
                                      GFP_KERNEL);
        if (!mp->clk_config)
                return -ENOMEM;
index 5691dfa..041e7da 100644 (file)
@@ -900,9 +900,22 @@ static enum drm_connector_status
 nv50_mstc_detect(struct drm_connector *connector, bool force)
 {
        struct nv50_mstc *mstc = nv50_mstc(connector);
+       enum drm_connector_status conn_status;
+       int ret;
+
        if (!mstc->port)
                return connector_status_disconnected;
-       return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+
+       ret = pm_runtime_get_sync(connector->dev->dev);
+       if (ret < 0 && ret != -EACCES)
+               return connector_status_disconnected;
+
+       conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+                                            mstc->port);
+
+       pm_runtime_mark_last_busy(connector->dev->dev);
+       pm_runtime_put_autosuspend(connector->dev->dev);
+       return conn_status;
 }
 
 static void
index 8474d60..b998f9f 100644 (file)
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
        if (fan_cnt < 1)
                return -EINVAL;
 
-       fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+       fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
        if (!fan_ch)
                return -ENOMEM;
 
index 9fb1d9c..e223148 100644 (file)
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
        int shrink = 0;
        int c;
 
+       if (!mr->allocated_from_cache)
+               return;
+
        c = order2idx(dev, mr->order);
        if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
                mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                umem = NULL;
        }
 #endif
-
        clean_mr(dev, mr);
 
+       /*
+        * We should unregister the DMA address from the HCA before
+        * remove the DMA mapping.
+        */
+       mlx5_mr_cache_free(dev, mr);
        if (umem) {
                ib_umem_release(umem);
                atomic_sub(npages, &dev->mdev->priv.reg_pages);
        }
-
        if (!mr->allocated_from_cache)
                kfree(mr);
-       else
-               mlx5_mr_cache_free(dev, mr);
 }
 
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
index 370206f..f48369d 100644 (file)
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
 
                input_inject_event(&evdev->handle,
                                   event.type, event.code, event.value);
+               cond_resched();
        }
 
  out:
index cd620e0..d4b9db4 100644 (file)
@@ -231,6 +231,7 @@ static const struct xpad_device {
        { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
        XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index eb14ddf..8ec483e 100644 (file)
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
 
                input_event(udev->dev, ev.type, ev.code, ev.value);
                bytes += input_event_size();
+               cond_resched();
        }
 
        return bytes;
index e082280..412fa71 100644 (file)
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
                mousedev_generate_response(client, c);
 
                spin_unlock_irq(&client->packet_lock);
+               cond_resched();
        }
 
        kill_fasync(&client->fasync, SIGIO, POLL_IN);
index b8bc715..95a78cc 100644 (file)
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
        for (i = 0; i < I8042_NUM_PORTS; i++) {
                struct serio *serio = i8042_ports[i].serio;
 
-               if (serio) {
-                       printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
-                               serio->name,
-                               (unsigned long) I8042_DATA_REG,
-                               (unsigned long) I8042_COMMAND_REG,
-                               i8042_ports[i].irq);
-                       serio_register_port(serio);
-                       device_set_wakeup_capable(&serio->dev, true);
-               }
+               if (!serio)
+                       continue;
+
+               printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
+                       serio->name,
+                       (unsigned long) I8042_DATA_REG,
+                       (unsigned long) I8042_COMMAND_REG,
+                       i8042_ports[i].irq);
+               serio_register_port(serio);
+               device_set_wakeup_capable(&serio->dev, true);
+
+               /*
+                * On platforms using suspend-to-idle, allow the keyboard to
+                * wake up the system from sleep by enabling keyboard wakeups
+                * by default.  This is consistent with keyboard wakeup
+                * behavior on many platforms using suspend-to-RAM (ACPI S3)
+                * by default.
+                */
+               if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+                       device_set_wakeup_enable(&serio->dev, true);
        }
 }
 
index e13d991..b29a832 100644 (file)
@@ -3484,14 +3484,13 @@ static int __init dm_cache_init(void)
        int r;
 
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
-       if (!migration_cache) {
-               dm_unregister_target(&cache_target);
+       if (!migration_cache)
                return -ENOMEM;
-       }
 
        r = dm_register_target(&cache_target);
        if (r) {
                DMERR("cache target registration failed: %d", r);
+               kmem_cache_destroy(migration_cache);
                return r;
        }
 
index 21d126a..32aabe2 100644 (file)
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
 static struct target_type flakey_target = {
        .name   = "flakey",
        .version = {1, 5, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
        .features = DM_TARGET_ZONED_HM,
+#endif
        .module = THIS_MODULE,
        .ctr    = flakey_ctr,
        .dtr    = flakey_dtr,
index 89ccb64..e1fa6ba 100644 (file)
@@ -3462,7 +3462,8 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+               ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+                                                ic->tag_size, GFP_KERNEL);
                if (!ic->recalc_tags) {
                        ti->error = "Cannot allocate tags for recalculating";
                        r = -ENOMEM;
index d10964d..2f7c44a 100644 (file)
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
+#ifdef CONFIG_BLK_DEV_ZONED
 static int linear_end_io(struct dm_target *ti, struct bio *bio,
                         blk_status_t *error)
 {
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
 
        return DM_ENDIO_DONE;
 }
+#endif
 
 static void linear_status(struct dm_target *ti, status_type_t type,
                          unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
 static struct target_type linear_target = {
        .name   = "linear",
        .version = {1, 4, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
+       .end_io = linear_end_io,
        .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+       .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
        .module = THIS_MODULE,
        .ctr    = linear_ctr,
        .dtr    = linear_dtr,
        .map    = linear_map,
-       .end_io = linear_end_io,
        .status = linear_status,
        .prepare_ioctl = linear_prepare_ioctl,
        .iterate_devices = linear_iterate_devices,
index 20f7e4e..45abb54 100644 (file)
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
 /*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
  */
 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
 {
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
        struct blk_zone *zone;
        unsigned int nr_rep = 0;
        unsigned int ofst;
+       sector_t part_offset;
        struct bio_vec bvec;
        struct bvec_iter iter;
        void *addr;
@@ -1178,6 +1181,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
        if (bio->bi_status)
                return;
 
+       /*
+        * bio sector was incremented by the request size on completion. Taking
+        * into account the original request sector, the target start offset on
+        * the backing device and the target mapping offset (ti->begin), the
+        * start sector of the backing device. The partition offset is always 0
+        * if the target uses a whole device.
+        */
+       part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
        /*
         * Remap the start sector of the reported zones. For sequential zones,
         * also remap the write pointer position.
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                /* Set zones start sector */
                while (hdr->nr_zones && ofst < bvec.bv_len) {
                        zone = addr + ofst;
+                       zone->start -= part_offset;
                        if (zone->start >= start + ti->len) {
                                hdr->nr_zones = 0;
                                break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                                else if (zone->cond == BLK_ZONE_COND_EMPTY)
                                        zone->wp = zone->start;
                                else
-                                       zone->wp = zone->wp + ti->begin - start;
+                                       zone->wp = zone->wp + ti->begin - start - part_offset;
                        }
                        ofst += sizeof(struct blk_zone);
                        hdr->nr_zones--;
index e0066ad..fc8b48a 100644 (file)
@@ -703,7 +703,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
 static int bcm_sf2_sw_resume(struct dsa_switch *ds)
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       unsigned int port;
        int ret;
 
        ret = bcm_sf2_sw_rst(priv);
@@ -715,14 +714,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
        if (priv->hw_params.num_gphy == 1)
                bcm_sf2_gphy_enable_set(ds, true);
 
-       for (port = 0; port < DSA_MAX_PORTS; port++) {
-               if (dsa_is_user_port(ds, port))
-                       bcm_sf2_port_setup(ds, port, NULL);
-               else if (dsa_is_cpu_port(ds, port))
-                       bcm_sf2_imp_setup(ds, port);
-       }
-
-       bcm_sf2_enable_acb(ds);
+       ds->ops->setup(ds);
 
        return 0;
 }
@@ -1173,10 +1165,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
 {
        struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
 
-       /* Disable all ports and interrupts */
        priv->wol_ports_mask = 0;
-       bcm_sf2_sw_suspend(priv->dev->ds);
        dsa_unregister_switch(priv->dev->ds);
+       /* Disable all ports and interrupts */
+       bcm_sf2_sw_suspend(priv->dev->ds);
        bcm_sf2_mdio_unregister(priv);
 
        return 0;
index 1c682b7..2b3ff0c 100644 (file)
@@ -245,11 +245,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
                ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
        ena_rx_ctx->l3_csum_err =
-               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
-               ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+               !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
        ena_rx_ctx->l4_csum_err =
-               (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
-               ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+               !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+               ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
        ena_rx_ctx->hash = cdesc->hash;
        ena_rx_ctx->frag =
                (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
index 25621a2..d906293 100644 (file)
@@ -1575,8 +1575,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
        if (rc)
                return rc;
 
-       ena_init_napi(adapter);
-
        ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
 
        ena_refill_all_rx_bufs(adapter);
@@ -1730,6 +1728,13 @@ static int ena_up(struct ena_adapter *adapter)
 
        ena_setup_io_intr(adapter);
 
+       /* napi poll functions should be initialized before running
+        * request_irq(), to handle a rare condition where there is a pending
+        * interrupt, causing the ISR to fire immediately while the poll
+        * function wasn't set yet, causing a null dereference
+        */
+       ena_init_napi(adapter);
+
        rc = ena_request_io_irq(adapter);
        if (rc)
                goto err_req_irq;
@@ -2619,7 +2624,11 @@ err_disable_msix:
        ena_free_mgmnt_irq(adapter);
        ena_disable_msix(adapter);
 err_device_destroy:
+       ena_com_abort_admin_commands(ena_dev);
+       ena_com_wait_for_abort_completion(ena_dev);
        ena_com_admin_destroy(ena_dev);
+       ena_com_mmio_reg_read_request_destroy(ena_dev);
+       ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
 err:
        clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3099,15 +3108,8 @@ err_rss_init:
 
 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
 {
-       int release_bars;
-
-       if (ena_dev->mem_bar)
-               devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
-       if (ena_dev->reg_bar)
-               devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+       int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
 
-       release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
        pci_release_selected_regions(pdev, release_bars);
 }
 
index d2d5944..6a04603 100644 (file)
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
                             NULL, NULL, NULL),
 };
 
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
-                                       union devlink_param_value init_val)
-{
-       struct mlx4_priv *priv = devlink_priv(devlink);
-       struct mlx4_dev *dev = &priv->dev;
-       int err;
-
-       err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
-       if (err)
-               mlx4_warn(dev,
-                         "devlink set parameter %u value failed (err = %d)",
-                         param_id, err);
-}
-
 static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
 {
        union devlink_param_value value;
 
        value.vbool = !!mlx4_internal_err_reset;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+                                          value);
 
        value.vu32 = 1UL << log_num_mac;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+                                          value);
 
        value.vbool = enable_64b_cqe_eqe;
-       mlx4_devlink_set_init_value(devlink,
-                                   MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+                                          value);
 
        value.vbool = enable_4k_uar;
-       mlx4_devlink_set_init_value(devlink,
-                                   MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+                                          value);
 
        value.vbool = false;
-       mlx4_devlink_set_init_value(devlink,
-                                   DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
-                                   value);
+       devlink_param_driverinit_value_set(devlink,
+                                          DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+                                          value);
 }
 
 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
index 9a5e296..3a5e616 100644 (file)
@@ -4282,8 +4282,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
-       case RTL_GIGA_MAC_VER_34:
-       case RTL_GIGA_MAC_VER_35:
+       case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+       case RTL_GIGA_MAC_VER_38:
                RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
index 7aa5ebb..4289ccb 100644 (file)
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                u16 idx = dring->tail;
                struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
 
-               if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+               if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+                       /* reading the register clears the irq */
+                       netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
                        break;
+               }
 
                /* This  barrier is needed to keep us from reading
                 * any other fields out of the netsec_de until we have
index 6e13b88..fd8bb99 100644 (file)
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
 /* Give this long for the PHY to reset. */
 #define T_PHY_RESET_MS 50
 
-static DEFINE_MUTEX(sfp_mutex);
-
 struct sff_data {
        unsigned int gpios;
        bool (*module_supported)(const struct sfp_eeprom_id *id);
index 533b6fb..72a55b6 100644 (file)
@@ -1241,6 +1241,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
+       {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)},   /* Cinterion ALASxx (1 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
index 7780b07..79e59f2 100644 (file)
@@ -258,7 +258,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
        if (!buf->urb)
                return -ENOMEM;
 
-       buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+       buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
                                    gfp);
        if (!buf->urb->sg)
                return -ENOMEM;
@@ -464,8 +464,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
        int i, err, nsgs;
 
        spin_lock_init(&q->lock);
-       q->entry = devm_kzalloc(dev->dev,
-                               MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+       q->entry = devm_kcalloc(dev->dev,
+                               MT_NUM_RX_ENTRIES, sizeof(*q->entry),
                                GFP_KERNEL);
        if (!q->entry)
                return -ENOMEM;
@@ -717,8 +717,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
                INIT_LIST_HEAD(&q->swq);
                q->hw_idx = q2hwq(i);
 
-               q->entry = devm_kzalloc(dev->dev,
-                                       MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+               q->entry = devm_kcalloc(dev->dev,
+                                       MT_NUM_TX_ENTRIES, sizeof(*q->entry),
                                        GFP_KERNEL);
                if (!q->entry)
                        return -ENOMEM;
index 722537e..41b4971 100644 (file)
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
        pdev = of_find_device_by_node(np);
        unittest(pdev, "device 1 creation failed\n");
 
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+       if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq == -EPROBE_DEFER,
+                        "device deferred probe failed - %d\n", irq);
 
-       /* Test that a parsing failure does not return -EPROBE_DEFER */
-       np = of_find_node_by_path("/testcase-data/testcase-device2");
-       pdev = of_find_device_by_node(np);
-       unittest(pdev, "device 2 creation failed\n");
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+               /* Test that a parsing failure does not return -EPROBE_DEFER */
+               np = of_find_node_by_path("/testcase-data/testcase-device2");
+               pdev = of_find_device_by_node(np);
+               unittest(pdev, "device 2 creation failed\n");
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq < 0 && irq != -EPROBE_DEFER,
+                        "device parsing error failed - %d\n", irq);
+       }
 
        np = of_find_node_by_path("/testcase-data/platform-tests");
        unittest(np, "No testcase data in device tree\n");
index 86f1b00..975bcdd 100644 (file)
@@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
                return 0;
        }
 
-       phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+       phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
        if (!phy)
                return -ENOMEM;
 
-       link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+       link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
        if (!link)
                return -ENOMEM;
 
index 4a8a8ef..cf73a40 100644 (file)
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
                return err;
        }
 
+       return 0;
+}
+
+static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
+{
+       struct gpio_chip *chip = &mcp->chip;
+       int err;
+
        err =  gpiochip_irqchip_add_nested(chip,
                                           &mcp23s08_irq_chip,
                                           0,
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
        }
 
        if (mcp->irq && mcp->irq_controller) {
-               ret = mcp23s08_irq_setup(mcp);
+               ret = mcp23s08_irqchip_setup(mcp);
                if (ret)
                        goto fail;
        }
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                goto fail;
        }
 
+       if (mcp->irq)
+               ret = mcp23s08_irq_setup(mcp);
+
 fail:
        if (ret < 0)
                dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
index 398393a..b6fd483 100644 (file)
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
        ret = cros_ec_cmd_xfer(ec_dev, msg);
        if (ret > 0) {
                ec_dev->event_size = ret - 1;
-               memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+               memcpy(&ec_dev->event_data, msg->data, ret);
        }
 
        return ret;
index eceba38..2f61f55 100644 (file)
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
  */
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
 {
        int have_linemode, have_vt220;
 
-       if (sclp_init_state != sclp_init_state_uninitialized)
+       if (!force && sclp_init_state != sclp_init_state_uninitialized)
                return;
        if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
                return;
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
 
 void sclp_early_printk(const char *str)
 {
-       __sclp_early_printk(str, strlen(str));
+       __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+       __sclp_early_printk(str, strlen(str), 1);
 }
index dbe7c7a..fd77e46 100644 (file)
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
 
        for (i = 0; i < pat->pat_nr; i++, pa++)
                for (j = 0; j < pa->pa_nr; j++)
-                       if (pa->pa_iova_pfn[i] == iova_pfn)
+                       if (pa->pa_iova_pfn[j] == iova_pfn)
                                return true;
 
        return false;
index 770fa9c..f47d16b 100644 (file)
@@ -22,6 +22,7 @@
 #include "vfio_ccw_private.h"
 
 struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
 
 /*
  * Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
                cp_update_scsw(&private->cp, &irb->scsw);
                cp_free(&private->cp);
        }
-       memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+       memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
        if (!private)
                return -ENOMEM;
+
+       private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+                                              GFP_KERNEL | GFP_DMA);
+       if (!private->io_region) {
+               kfree(private);
+               return -ENOMEM;
+       }
+
        private->sch = sch;
        dev_set_drvdata(&sch->dev, private);
 
@@ -139,6 +148,7 @@ out_disable:
        cio_disable_subchannel(sch);
 out_free:
        dev_set_drvdata(&sch->dev, NULL);
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
        return ret;
 }
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
 
        dev_set_drvdata(&sch->dev, NULL);
 
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
 
        return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
        if (!vfio_ccw_work_q)
                return -ENOMEM;
 
+       vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+                                       sizeof(struct ccw_io_region), 0,
+                                       SLAB_ACCOUNT, 0,
+                                       sizeof(struct ccw_io_region), NULL);
+       if (!vfio_ccw_io_region) {
+               destroy_workqueue(vfio_ccw_work_q);
+               return -ENOMEM;
+       }
+
        isc_register(VFIO_CCW_ISC);
        ret = css_driver_register(&vfio_ccw_sch_driver);
        if (ret) {
                isc_unregister(VFIO_CCW_ISC);
+               kmem_cache_destroy(vfio_ccw_io_region);
                destroy_workqueue(vfio_ccw_work_q);
        }
 
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
 {
        css_driver_unregister(&vfio_ccw_sch_driver);
        isc_unregister(VFIO_CCW_ISC);
+       kmem_cache_destroy(vfio_ccw_io_region);
        destroy_workqueue(vfio_ccw_work_q);
 }
 module_init(vfio_ccw_sch_init);
index 797a827..f94aa01 100644 (file)
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
                         enum vfio_ccw_event event)
 {
        pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
-       private->io_region.ret_code = -EIO;
+       private->io_region->ret_code = -EIO;
 }
 
 static void fsm_io_busy(struct vfio_ccw_private *private,
                        enum vfio_ccw_event event)
 {
-       private->io_region.ret_code = -EBUSY;
+       private->io_region->ret_code = -EBUSY;
 }
 
 static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 {
        union orb *orb;
        union scsw *scsw = &private->scsw;
-       struct ccw_io_region *io_region = &private->io_region;
+       struct ccw_io_region *io_region = private->io_region;
        struct mdev_device *mdev = private->mdev;
        char *errstr = "request";
 
index 41eeb57..f673e10 100644 (file)
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
                return -EINVAL;
 
        private = dev_get_drvdata(mdev_parent_dev(mdev));
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_to_user(buf, (void *)region + *ppos, count))
                return -EFAULT;
 
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
        if (private->state != VFIO_CCW_STATE_IDLE)
                return -EACCES;
 
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_from_user((void *)region + *ppos, buf, count))
                return -EFAULT;
 
index 78a66d9..078e46f 100644 (file)
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
        atomic_t                avail;
        struct mdev_device      *mdev;
        struct notifier_block   nb;
-       struct ccw_io_region    io_region;
+       struct ccw_io_region    *io_region;
 
        struct channel_program  cp;
        struct irb              irb;
index 7b31f19..050879a 100644 (file)
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
 
 static int __init openprom_init(void)
 {
-       struct device_node *dp;
        int err;
 
        err = misc_register(&openprom_dev);
        if (err)
                return err;
 
-       dp = of_find_node_by_path("/");
-       dp = dp->child;
-       while (dp) {
-               if (!strcmp(dp->name, "options"))
-                       break;
-               dp = dp->sibling;
-       }
-       options_node = dp;
-
+       options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
        if (!options_node) {
                misc_deregister(&openprom_dev);
                return -EIO;
index 524f9ea..6516bc3 100644 (file)
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
 alloc_error:
        kfree(ctx->ccb_buf);
 done:
-       if (ctx != NULL)
-               kfree(ctx);
+       kfree(ctx);
        return -ENOMEM;
 }
 
index 29ec343..1515074 100644 (file)
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
        geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
        geni_se_select_mode(&port->se, port->xfer_mode);
        if (!uart_console(uport)) {
-               port->rx_fifo = devm_kzalloc(uport->dev,
-                       port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+               port->rx_fifo = devm_kcalloc(uport->dev,
+                       port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
                if (!port->rx_fifo)
                        return -ENOMEM;
        }
index 8235b28..d09bab3 100644 (file)
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
+extern const u8 aty_postdividers[8];
+
 
     /*
      *  Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
 
 extern void aty_reset_engine(const struct atyfb_par *par);
 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
index a9a8272..05111e9 100644 (file)
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
                /*
                 * PLL Reference Divider M:
                 */
-               M = pll_regs[2];
+               M = pll_regs[PLL_REF_DIV];
 
                /*
                 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
                 */
-               N = pll_regs[7 + (clock_cntl & 3)];
+               N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
 
                /*
                 * PLL Post Divider P (Dependent on CLOCK_CNTL):
                 */
-               P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+               P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+                                    ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
 
                /*
                 * PLL Divider Q:
index 74a62aa..f87cc81 100644 (file)
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
  */
 
 #define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
 
 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
 {
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
                pll->vclk_post_div += (q <  64*8);
                pll->vclk_post_div += (q <  32*8);
        }
-       pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+       pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
        //    pll->vclk_post_div <<= 6;
        pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
        pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                u8 mclk_fb_div, pll_ext_cntl;
                pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
                pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
-               pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+               pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
                mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
                if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
                        mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                xpost_div += (q <  64*8);
                xpost_div += (q <  32*8);
        }
-       pll->ct.xclk_post_div_real = postdividers[xpost_div];
+       pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
        pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
 
 #ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                        mpost_div += (q <  64*8);
                        mpost_div += (q <  32*8);
                }
-               sclk_post_div_real = postdividers[mpost_div];
+               sclk_post_div_real = aty_postdividers[mpost_div];
                pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
                pll->ct.spll_cntl2 = mpost_div << 4;
 #ifdef DEBUG
index 03128ed..3c159a7 100644 (file)
@@ -975,6 +975,10 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
 {
        struct gfs2_inode *ip = GFS2_I(inode);
 
+       if (!page_has_buffers(page)) {
+               create_empty_buffers(page, inode->i_sb->s_blocksize,
+                                    (1 << BH_Dirty)|(1 << BH_Uptodate));
+       }
        gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
 }
 
index 5289e22..42ea7ba 100644 (file)
@@ -1220,35 +1220,92 @@ retry:
        return 0;
 }
 
+/* Unlock both inodes after they've been prepped for a range clone. */
+STATIC void
+xfs_reflink_remap_unlock(
+       struct file             *file_in,
+       struct file             *file_out)
+{
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
+       bool                    same_inode = (inode_in == inode_out);
+
+       xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+       if (!same_inode)
+               xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+       inode_unlock(inode_out);
+       if (!same_inode)
+               inode_unlock_shared(inode_in);
+}
+
 /*
- * Link a range of blocks from one file to another.
+ * If we're reflinking to a point past the destination file's EOF, we must
+ * zero any speculative post-EOF preallocations that sit between the old EOF
+ * and the destination file offset.
  */
-int
-xfs_reflink_remap_range(
+static int
+xfs_reflink_zero_posteof(
+       struct xfs_inode        *ip,
+       loff_t                  pos)
+{
+       loff_t                  isize = i_size_read(VFS_I(ip));
+
+       if (pos <= isize)
+               return 0;
+
+       trace_xfs_zero_eof(ip, isize, pos - isize);
+       return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
+                       &xfs_iomap_ops);
+}
+
+/*
+ * Prepare two files for range cloning.  Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file will
+ * be truncated, and any leases on the out file will have been broken.  This
+ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
+ * "block aligned" for the purposes of cloning entire files.  However, if the
+ * source file range includes the EOF block and it lands within the existing EOF
+ * of the destination file, then we can expose stale data from beyond the source
+ * file EOF in the destination file.
+ *
+ * XFS doesn't support partial block sharing, so in both cases we have check
+ * these cases ourselves. For dedupe, we can simply round the length to dedupe
+ * down to the previous whole block and ignore the partial EOF block. While this
+ * means we can't dedupe the last block of a file, this is an acceptible
+ * tradeoff for simplicity on implementation.
+ *
+ * For cloning, we want to share the partial EOF block if it is also the new EOF
+ * block of the destination file. If the partial EOF block lies inside the
+ * existing destination EOF, then we have to abort the clone to avoid exposing
+ * stale data in the destination file. Hence we reject these clone attempts with
+ * -EINVAL in this case.
+ */
+STATIC int
+xfs_reflink_remap_prep(
        struct file             *file_in,
        loff_t                  pos_in,
        struct file             *file_out,
        loff_t                  pos_out,
-       u64                     len,
+       u64                     *len,
        bool                    is_dedupe)
 {
        struct inode            *inode_in = file_inode(file_in);
        struct xfs_inode        *src = XFS_I(inode_in);
        struct inode            *inode_out = file_inode(file_out);
        struct xfs_inode        *dest = XFS_I(inode_out);
-       struct xfs_mount        *mp = src->i_mount;
        bool                    same_inode = (inode_in == inode_out);
-       xfs_fileoff_t           sfsbno, dfsbno;
-       xfs_filblks_t           fsblen;
-       xfs_extlen_t            cowextsize;
+       u64                     blkmask = i_blocksize(inode_in) - 1;
        ssize_t                 ret;
 
-       if (!xfs_sb_version_hasreflink(&mp->m_sb))
-               return -EOPNOTSUPP;
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
        /* Lock both files against IO */
        ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
        if (ret)
@@ -1270,33 +1327,115 @@ xfs_reflink_remap_range(
                goto out_unlock;
 
        ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
-                       &len, is_dedupe);
+                       len, is_dedupe);
        if (ret <= 0)
                goto out_unlock;
 
+       /*
+        * If the dedupe data matches, chop off the partial EOF block
+        * from the source file so we don't try to dedupe the partial
+        * EOF block.
+        */
+       if (is_dedupe) {
+               *len &= ~blkmask;
+       } else if (*len & blkmask) {
+               /*
+                * The user is attempting to share a partial EOF block,
+                * if it's inside the destination EOF then reject it.
+                */
+               if (pos_out + *len < i_size_read(inode_out)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+       }
+
        /* Attach dquots to dest inode before changing block map */
        ret = xfs_qm_dqattach(dest);
        if (ret)
                goto out_unlock;
 
-       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
        /*
-        * Clear out post-eof preallocations because we don't have page cache
-        * backing the delayed allocations and they'll never get freed on
-        * their own.
+        * Zero existing post-eof speculative preallocations in the destination
+        * file.
         */
-       if (xfs_can_free_eofblocks(dest, true)) {
-               ret = xfs_free_eofblocks(dest);
-               if (ret)
-                       goto out_unlock;
-       }
+       ret = xfs_reflink_zero_posteof(dest, pos_out);
+       if (ret)
+               goto out_unlock;
 
        /* Set flags and remap blocks. */
        ret = xfs_reflink_set_inode_flag(src, dest);
        if (ret)
                goto out_unlock;
 
+       /* Zap any page cache for the destination file's range. */
+       truncate_inode_pages_range(&inode_out->i_data, pos_out,
+                                  PAGE_ALIGN(pos_out + *len) - 1);
+
+       /* If we're altering the file contents... */
+       if (!is_dedupe) {
+               /*
+                * ...update the timestamps (which will grab the ilock again
+                * from xfs_fs_dirty_inode, so we have to call it before we
+                * take the ilock).
+                */
+               if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+                       ret = file_update_time(file_out);
+                       if (ret)
+                               goto out_unlock;
+               }
+
+               /*
+                * ...clear the security bits if the process is not being run
+                * by root.  This keeps people from modifying setuid and setgid
+                * binaries.
+                */
+               ret = file_remove_privs(file_out);
+               if (ret)
+                       goto out_unlock;
+       }
+
+       return 1;
+out_unlock:
+       xfs_reflink_remap_unlock(file_in, file_out);
+       return ret;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+       struct file             *file_in,
+       loff_t                  pos_in,
+       struct file             *file_out,
+       loff_t                  pos_out,
+       u64                     len,
+       bool                    is_dedupe)
+{
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
+       struct xfs_mount        *mp = src->i_mount;
+       xfs_fileoff_t           sfsbno, dfsbno;
+       xfs_filblks_t           fsblen;
+       xfs_extlen_t            cowextsize;
+       ssize_t                 ret;
+
+       if (!xfs_sb_version_hasreflink(&mp->m_sb))
+               return -EOPNOTSUPP;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /* Prepare and then clone file data. */
+       ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+                       &len, is_dedupe);
+       if (ret <= 0)
+               return ret;
+
+       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
        dfsbno = XFS_B_TO_FSBT(mp, pos_out);
        sfsbno = XFS_B_TO_FSBT(mp, pos_in);
        fsblen = XFS_B_TO_FSB(mp, len);
@@ -1305,10 +1444,6 @@ xfs_reflink_remap_range(
        if (ret)
                goto out_unlock;
 
-       /* Zap any page cache for the destination file's range. */
-       truncate_inode_pages_range(&inode_out->i_data, pos_out,
-                                  PAGE_ALIGN(pos_out + len) - 1);
-
        /*
         * Carry the cowextsize hint from src to dest if we're sharing the
         * entire source file to the entire destination file, the source file
@@ -1325,12 +1460,7 @@ xfs_reflink_remap_range(
                        is_dedupe);
 
 out_unlock:
-       xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
-       if (!same_inode)
-               xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
-       inode_unlock(inode_out);
-       if (!same_inode)
-               inode_unlock_shared(inode_in);
+       xfs_reflink_remap_unlock(file_in, file_out);
        if (ret)
                trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
        return ret;
index 7b75ff6..d7701d4 100644 (file)
@@ -68,7 +68,7 @@
  */
 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
 
 #define EXIT_DATA                                                      \
        *(.exit.data .exit.data.*)                                      \
-       *(.fini_array)                                                  \
-       *(.dtors)                                                       \
+       *(.fini_array .fini_array.*)                                    \
+       *(.dtors .dtors.*)                                              \
        MEM_DISCARD(exit.data*)                                         \
        MEM_DISCARD(exit.rodata*)
 
index ff20b67..22254c1 100644 (file)
@@ -412,6 +412,7 @@ struct cgroup {
         * specific task are charged to the dom_cgrp.
         */
        struct cgroup *dom_cgrp;
+       struct cgroup *old_dom_cgrp;            /* used while enabling threaded */
 
        /* per-cpu recursive resource statistics */
        struct cgroup_rstat_cpu __percpu *rstat_cpu;
index 3f4c0b1..d4b0c79 100644 (file)
@@ -667,10 +667,6 @@ typedef struct pglist_data {
        enum zone_type kcompactd_classzone_idx;
        wait_queue_head_t kcompactd_wait;
        struct task_struct *kcompactd;
-#endif
-#ifdef CONFIG_NUMA_BALANCING
-       /* Lock serializing the migrate rate limiting window */
-       spinlock_t numabalancing_migrate_lock;
 #endif
        /*
         * This is a per-node reserve of pages that are not available
index c7861e4..d837dad 100644 (file)
@@ -2458,6 +2458,13 @@ struct netdev_notifier_info {
        struct netlink_ext_ack  *extack;
 };
 
+struct netdev_notifier_info_ext {
+       struct netdev_notifier_info info; /* must be first */
+       union {
+               u32 mtu;
+       } ext;
+};
+
 struct netdev_notifier_change_info {
        struct netdev_notifier_info info; /* must be first */
        unsigned int flags_changed;
index 5a28ac9..3f529ad 100644 (file)
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
        return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
 }
 
+extern bool pm_suspend_via_s2idle(void);
 extern void __init pm_states_init(void);
 extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
 extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
 static inline void pm_set_resume_via_firmware(void) {}
 static inline bool pm_suspend_via_firmware(void) { return false; }
 static inline bool pm_resume_via_firmware(void) { return false; }
+static inline bool pm_suspend_via_s2idle(void) { return false; }
 
 static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
index b9b89d6..99efc15 100644 (file)
@@ -298,7 +298,7 @@ struct devlink_resource {
 
 #define DEVLINK_RESOURCE_ID_PARENT_TOP 0
 
-#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
 enum devlink_param_type {
        DEVLINK_PARAM_TYPE_U8,
        DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
        u8 vu8;
        u16 vu16;
        u32 vu32;
-       const char *vstr;
+       char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
        bool vbool;
 };
 
@@ -553,6 +553,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
 int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
                                       union devlink_param_value init_val);
 void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+                                 const char *src);
 struct devlink_region *devlink_region_create(struct devlink *devlink,
                                             const char *region_name,
                                             u32 region_max_snapshots,
@@ -789,6 +791,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
 {
 }
 
+static inline void
+devlink_param_value_str_fill(union devlink_param_value *dst_val,
+                            const char *src)
+{
+}
+
 static inline struct devlink_region *
 devlink_region_create(struct devlink *devlink,
                      const char *region_name,
index 69c91d1..c9b7b13 100644 (file)
@@ -394,6 +394,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
 int fib_sync_down_addr(struct net_device *dev, __be32 local);
 int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
index 837393f..573d5b9 100644 (file)
@@ -931,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
            TP_fast_assign(
                    __entry->call = call_id;
                    memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+                   __entry->where = where;
                           ),
 
            TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
index ac9e8c9..8cb3a6f 100644 (file)
@@ -18,14 +18,17 @@ struct smc_diag_req {
  * on the internal clcsock, and more SMC-related socket data
  */
 struct smc_diag_msg {
-       __u8    diag_family;
-       __u8    diag_state;
-       __u8    diag_mode;
-       __u8    diag_shutdown;
+       __u8            diag_family;
+       __u8            diag_state;
+       union {
+               __u8    diag_mode;
+               __u8    diag_fallback; /* the old name of the field */
+       };
+       __u8            diag_shutdown;
        struct inet_diag_sockid id;
 
-       __u32   diag_uid;
-       __u64   diag_inode;
+       __u32           diag_uid;
+       __aligned_u64   diag_inode;
 };
 
 /* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
 };
 
 struct smcd_diag_dmbinfo {             /* SMC-D Socket internals */
-       __u32 linkid;                   /* Link identifier */
-       __u64 peer_gid;                 /* Peer GID */
-       __u64 my_gid;                   /* My GID */
-       __u64 token;                    /* Token of DMB */
-       __u64 peer_token;               /* Token of remote DMBE */
+       __u32           linkid;         /* Link identifier */
+       __aligned_u64   peer_gid;       /* Peer GID */
+       __aligned_u64   my_gid;         /* My GID */
+       __aligned_u64   token;          /* Token of DMB */
+       __aligned_u64   peer_token;     /* Token of remote DMBE */
 };
 
 #endif /* _UAPI_SMC_DIAG_H_ */
index 09d00f8..09502de 100644 (file)
@@ -40,5 +40,6 @@ struct udphdr {
 #define UDP_ENCAP_L2TPINUDP    3 /* rfc2661 */
 #define UDP_ENCAP_GTP0         4 /* GSM TS 09.60 */
 #define UDP_ENCAP_GTP1U                5 /* 3GPP TS 29.060 */
+#define UDP_ENCAP_RXRPC                6
 
 #endif /* _UAPI_LINUX_UDP_H */
index aae10ba..4a3dae2 100644 (file)
@@ -2836,11 +2836,12 @@ restart:
 }
 
 /**
- * cgroup_save_control - save control masks of a subtree
+ * cgroup_save_control - save control masks and dom_cgrp of a subtree
  * @cgrp: root of the target subtree
  *
- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
  */
 static void cgroup_save_control(struct cgroup *cgrp)
 {
@@ -2850,6 +2851,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
                dsct->old_subtree_control = dsct->subtree_control;
                dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
+               dsct->old_dom_cgrp = dsct->dom_cgrp;
        }
 }
 
@@ -2875,11 +2877,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
 }
 
 /**
- * cgroup_restore_control - restore control masks of a subtree
+ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
  * @cgrp: root of the target subtree
  *
- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
  */
 static void cgroup_restore_control(struct cgroup *cgrp)
 {
@@ -2889,6 +2892,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
        cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
                dsct->subtree_control = dsct->old_subtree_control;
                dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
+               dsct->dom_cgrp = dsct->old_dom_cgrp;
        }
 }
 
@@ -3196,6 +3200,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
 {
        struct cgroup *parent = cgroup_parent(cgrp);
        struct cgroup *dom_cgrp = parent->dom_cgrp;
+       struct cgroup *dsct;
+       struct cgroup_subsys_state *d_css;
        int ret;
 
        lockdep_assert_held(&cgroup_mutex);
@@ -3225,12 +3231,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
         */
        cgroup_save_control(cgrp);
 
-       cgrp->dom_cgrp = dom_cgrp;
+       cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
+               if (dsct == cgrp || cgroup_is_threaded(dsct))
+                       dsct->dom_cgrp = dom_cgrp;
+
        ret = cgroup_apply_control(cgrp);
        if (!ret)
                parent->nr_threaded_children++;
-       else
-               cgrp->dom_cgrp = cgrp;
 
        cgroup_finalize_control(cgrp, ret);
        return ret;
index 5342f6f..0bd595a 100644 (file)
@@ -63,6 +63,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
 enum s2idle_states __read_mostly s2idle_state;
 static DEFINE_RAW_SPINLOCK(s2idle_lock);
 
+bool pm_suspend_via_s2idle(void)
+{
+       return mem_sleep_current == PM_SUSPEND_TO_IDLE;
+}
+EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
+
 void s2idle_set_ops(const struct platform_s2idle_ops *ops)
 {
        lock_system_sleep();
index ca3f7eb..4238764 100644 (file)
@@ -119,7 +119,6 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
 obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_BCH) += bch.o
-CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
 obj-$(CONFIG_LZ4_COMPRESS) += lz4/
index 7b0f200..5db6d3a 100644 (file)
--- a/lib/bch.c
+++ b/lib/bch.c
 #define GF_T(_p)               (CONFIG_BCH_CONST_T)
 #define GF_N(_p)               ((1 << (CONFIG_BCH_CONST_M))-1)
 #define BCH_MAX_M              (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T             (CONFIG_BCH_CONST_T)
 #else
 #define GF_M(_p)               ((_p)->m)
 #define GF_T(_p)               ((_p)->t)
 #define GF_N(_p)               ((_p)->n)
-#define BCH_MAX_M              15
+#define BCH_MAX_M              15 /* 2KB */
+#define BCH_MAX_T              64 /* 64 bit correction */
 #endif
 
-#define BCH_MAX_T              (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
-
 #define BCH_ECC_WORDS(_p)      DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
 #define BCH_ECC_BYTES(_p)      DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
 
 #define BCH_ECC_MAX_WORDS      DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
-#define BCH_ECC_MAX_BYTES      DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
 
 #ifndef dbg
 #define dbg(_fmt, args...)     do {} while (0)
@@ -202,6 +201,9 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
        const uint32_t * const tab3 = tab2 + 256*(l+1);
        const uint32_t *pdata, *p0, *p1, *p2, *p3;
 
+       if (WARN_ON(r_bytes > sizeof(r)))
+               return;
+
        if (ecc) {
                /* load ecc parity bytes into internal 32-bit buffer */
                load_ecc8(bch, bch->ecc_buf, ecc);
@@ -1285,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
                 */
                goto fail;
 
+       if (t > BCH_MAX_T)
+               /*
+                * we can support larger than 64 bits if necessary, at the
+                * cost of higher stack usage.
+                */
+               goto fail;
+
        /* sanity checks */
        if ((t < 1) || (m*t >= ((1 << m)-1)))
                /* invalid t value */
index d5b3a3f..812e59e 100644 (file)
@@ -2794,7 +2794,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
                                                copy = end - str;
                                        memcpy(str, args, copy);
                                        str += len;
-                                       args += len;
+                                       args += len + 1;
                                }
                        }
                        if (process)
index 706a738..e2ef1c1 100644 (file)
@@ -6193,15 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
        return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
 }
 
-#ifdef CONFIG_NUMA_BALANCING
-static void pgdat_init_numabalancing(struct pglist_data *pgdat)
-{
-       spin_lock_init(&pgdat->numabalancing_migrate_lock);
-}
-#else
-static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
-#endif
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {
@@ -6226,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
 {
        pgdat_resize_init(pgdat);
 
-       pgdat_init_numabalancing(pgdat);
        pgdat_init_split_queue(pgdat);
        pgdat_init_kcompactd(pgdat);
 
index a749d4d..4b90682 100644 (file)
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 {
        if (!chunk)
                return;
+       pcpu_mem_free(chunk->md_blocks);
        pcpu_mem_free(chunk->bound_map);
        pcpu_mem_free(chunk->alloc_map);
        pcpu_mem_free(chunk);
index 82114e1..9324347 100644 (file)
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
+/**
+ *     call_netdevice_notifiers_mtu - call all network notifier blocks
+ *     @val: value passed unmodified to notifier function
+ *     @dev: net_device pointer passed unmodified to notifier function
+ *     @arg: additional u32 argument passed to the notifier function
+ *
+ *     Call all network notifier blocks.  Parameters and return value
+ *     are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+                                       struct net_device *dev, u32 arg)
+{
+       struct netdev_notifier_info_ext info = {
+               .info.dev = dev,
+               .ext.mtu = arg,
+       };
+
+       BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+       return call_netdevice_notifiers_info(val, &info.info);
+}
+
 #ifdef CONFIG_NET_INGRESS
 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
 
@@ -7574,14 +7596,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
        err = __dev_set_mtu(dev, new_mtu);
 
        if (!err) {
-               err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+               err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+                                                  orig_mtu);
                err = notifier_to_errno(err);
                if (err) {
                        /* setting mtu back and notifying everyone again,
                         * so that they have a chance to revert changes.
                         */
                        __dev_set_mtu(dev, orig_mtu);
-                       call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+                       call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+                                                    new_mtu);
                }
        }
        return err;
index 8c0ed22..6bc4293 100644 (file)
@@ -2995,6 +2995,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
                                  struct genl_info *info,
                                  union devlink_param_value *value)
 {
+       int len;
+
        if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
            !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
                return -EINVAL;
@@ -3010,10 +3012,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
                value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
                break;
        case DEVLINK_PARAM_TYPE_STRING:
-               if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
-                   DEVLINK_PARAM_MAX_STRING_VALUE)
+               len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
+                             nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+               if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+                   len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
                        return -EINVAL;
-               value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               strcpy(value->vstr,
+                      nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
                break;
        case DEVLINK_PARAM_TYPE_BOOL:
                value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
@@ -3100,7 +3105,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
                return -EOPNOTSUPP;
 
        if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
-               param_item->driverinit_value = value;
+               if (param->type == DEVLINK_PARAM_TYPE_STRING)
+                       strcpy(param_item->driverinit_value.vstr, value.vstr);
+               else
+                       param_item->driverinit_value = value;
                param_item->driverinit_value_valid = true;
        } else {
                if (!param->set)
@@ -4540,7 +4548,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
                                              DEVLINK_PARAM_CMODE_DRIVERINIT))
                return -EOPNOTSUPP;
 
-       *init_val = param_item->driverinit_value;
+       if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+               strcpy(init_val->vstr, param_item->driverinit_value.vstr);
+       else
+               *init_val = param_item->driverinit_value;
 
        return 0;
 }
@@ -4571,7 +4582,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
                                              DEVLINK_PARAM_CMODE_DRIVERINIT))
                return -EOPNOTSUPP;
 
-       param_item->driverinit_value = init_val;
+       if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+               strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+       else
+               param_item->driverinit_value = init_val;
        param_item->driverinit_value_valid = true;
 
        devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
@@ -4603,6 +4617,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
 }
 EXPORT_SYMBOL_GPL(devlink_param_value_changed);
 
+/**
+ *     devlink_param_value_str_fill - Safely fill-up the string preventing
+ *                                    from overflow of the preallocated buffer
+ *
+ *     @dst_val: destination devlink_param_value
+ *     @src: source buffer
+ */
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+                                 const char *src)
+{
+       size_t len;
+
+       len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE);
+       WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
+
 /**
  *     devlink_region_create - create a new address region
  *
index b2c807f..428094b 100644 (file)
@@ -4452,14 +4452,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
  */
 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
 {
-       if (unlikely(start > skb_headlen(skb)) ||
-           unlikely((int)start + off > skb_headlen(skb) - 2)) {
-               net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
-                                    start, off, skb_headlen(skb));
+       u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
+       u32 csum_start = skb_headroom(skb) + (u32)start;
+
+       if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
+               net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
+                                    start, off, skb_headroom(skb), skb_headlen(skb));
                return false;
        }
        skb->ip_summed = CHECKSUM_PARTIAL;
-       skb->csum_start = skb_headroom(skb) + start;
+       skb->csum_start = csum_start;
        skb->csum_offset = off;
        skb_set_transport_header(skb, start);
        return true;
index 2998b0e..0113993 100644 (file)
@@ -1243,7 +1243,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct netdev_notifier_changeupper_info *info;
+       struct netdev_notifier_changeupper_info *upper_info = ptr;
+       struct netdev_notifier_info_ext *info_ext = ptr;
        struct in_device *in_dev;
        struct net *net = dev_net(dev);
        unsigned int flags;
@@ -1278,16 +1279,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
                        fib_sync_up(dev, RTNH_F_LINKDOWN);
                else
                        fib_sync_down_dev(dev, event, false);
-               /* fall through */
+               rt_cache_flush(net);
+               break;
        case NETDEV_CHANGEMTU:
+               fib_sync_mtu(dev, info_ext->ext.mtu);
                rt_cache_flush(net);
                break;
        case NETDEV_CHANGEUPPER:
-               info = ptr;
+               upper_info = ptr;
                /* flush all routes if dev is linked to or unlinked from
                 * an L3 master device (e.g., VRF)
                 */
-               if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+               if (upper_info->upper_dev &&
+                   netif_is_l3_master(upper_info->upper_dev))
                        fib_disable_ip(dev, NETDEV_DOWN, true);
                break;
        }
index f3c89cc..446204c 100644 (file)
@@ -1470,6 +1470,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
        return NOTIFY_DONE;
 }
 
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ *   larger MTUs on the path. With that limit raised, we can now
+ *   discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+       struct fnhe_hash_bucket *bucket;
+       int i;
+
+       bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+       if (!bucket)
+               return;
+
+       for (i = 0; i < FNHE_HASH_SIZE; i++) {
+               struct fib_nh_exception *fnhe;
+
+               for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+                    fnhe;
+                    fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+                       if (fnhe->fnhe_mtu_locked) {
+                               if (new <= fnhe->fnhe_pmtu) {
+                                       fnhe->fnhe_pmtu = new;
+                                       fnhe->fnhe_mtu_locked = false;
+                               }
+                       } else if (new < fnhe->fnhe_pmtu ||
+                                  orig == fnhe->fnhe_pmtu) {
+                               fnhe->fnhe_pmtu = new;
+                       }
+               }
+       }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+       unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+       struct hlist_head *head = &fib_info_devhash[hash];
+       struct fib_nh *nh;
+
+       hlist_for_each_entry(nh, head, nh_hash) {
+               if (nh->nh_dev == dev)
+                       nh_update_mtu(nh, dev->mtu, orig_mtu);
+       }
+}
+
 /* Event              force Flags           Description
  * NETDEV_CHANGE      0     LINKDOWN        Carrier OFF, not for scope host
  * NETDEV_DOWN        0     LINKDOWN|DEAD   Link down, not for scope host
index b678466..8501554 100644 (file)
@@ -1001,21 +1001,22 @@ out:    kfree_skb(skb);
 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
 {
        struct dst_entry *dst = &rt->dst;
+       u32 old_mtu = ipv4_mtu(dst);
        struct fib_result res;
        bool lock = false;
 
        if (ip_mtu_locked(dst))
                return;
 
-       if (ipv4_mtu(dst) < mtu)
+       if (old_mtu < mtu)
                return;
 
        if (mtu < ip_rt_min_pmtu) {
                lock = true;
-               mtu = ip_rt_min_pmtu;
+               mtu = min(old_mtu, ip_rt_min_pmtu);
        }
 
-       if (rt->rt_pmtu == mtu &&
+       if (rt->rt_pmtu == mtu && !lock &&
            time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
                return;
 
index 7d69dd6..c32a4c1 100644 (file)
@@ -1627,7 +1627,7 @@ busy_check:
        *err = error;
        return NULL;
 }
-EXPORT_SYMBOL_GPL(__skb_recv_udp);
+EXPORT_SYMBOL(__skb_recv_udp);
 
 /*
  *     This should be easy, if there is something there we
index 5516f55..cbe4617 100644 (file)
@@ -196,6 +196,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
                                *ppcpu_rt = NULL;
                        }
                }
+
+               free_percpu(f6i->rt6i_pcpu);
        }
 
        lwtstate_put(f6i->fib6_nh.nh_lwtstate);
index 57b3d5a..fe785ee 100644 (file)
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
        return ret;
 }
 
-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
+static int rds_send_mprds_hash(struct rds_sock *rs,
+                              struct rds_connection *conn, int nonblock)
 {
        int hash;
 
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
                 * used.  But if we are interrupted, we have to use the zero
                 * c_path in case the connection ends up being non-MP capable.
                 */
-               if (conn->c_npaths == 0)
+               if (conn->c_npaths == 0) {
+                       /* Cannot wait for the connection be made, so just use
+                        * the base c_path.
+                        */
+                       if (nonblock)
+                               return 0;
                        if (wait_event_interruptible(conn->c_hs_waitq,
                                                     conn->c_npaths != 0))
                                hash = 0;
+               }
                if (conn->c_npaths == 1)
                        hash = 0;
        }
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        }
 
        if (conn->c_trans->t_mp_capable)
-               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+               cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
        else
                cpath = &conn->c_path[0];
 
index ef95541..a6e6cae 100644 (file)
@@ -302,6 +302,7 @@ struct rxrpc_peer {
 
        /* calculated RTT cache */
 #define RXRPC_RTT_CACHE_SIZE 32
+       spinlock_t              rtt_input_lock; /* RTT lock for input routine */
        ktime_t                 rtt_last_req;   /* Time of last RTT request */
        u64                     rtt;            /* Current RTT estimate (in nS) */
        u64                     rtt_sum;        /* Sum of cache contents */
@@ -442,17 +443,17 @@ struct rxrpc_connection {
        spinlock_t              state_lock;     /* state-change lock */
        enum rxrpc_conn_cache_state cache_state;
        enum rxrpc_conn_proto_state state;      /* current state of connection */
-       u32                     local_abort;    /* local abort code */
-       u32                     remote_abort;   /* remote abort code */
+       u32                     abort_code;     /* Abort code of connection abort */
        int                     debug_id;       /* debug ID for printks */
        atomic_t                serial;         /* packet serial number counter */
        unsigned int            hi_serial;      /* highest serial number received */
        u32                     security_nonce; /* response re-use preventer */
-       u16                     service_id;     /* Service ID, possibly upgraded */
+       u32                     service_id;     /* Service ID, possibly upgraded */
        u8                      size_align;     /* data size alignment (for security) */
        u8                      security_size;  /* security header size */
        u8                      security_ix;    /* security type */
        u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+       short                   error;          /* Local error code */
 };
 
 static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
@@ -635,6 +636,8 @@ struct rxrpc_call {
        bool                    tx_phase;       /* T if transmission phase, F if receive phase */
        u8                      nr_jumbo_bad;   /* Number of jumbo dups/exceeds-windows */
 
+       spinlock_t              input_lock;     /* Lock for packet input to this call */
+
        /* receive-phase ACK management */
        u8                      ackr_reason;    /* reason to ACK */
        u16                     ackr_skew;      /* skew on packet being ACK'd */
@@ -720,8 +723,6 @@ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
                                           struct rxrpc_sock *,
-                                          struct rxrpc_peer *,
-                                          struct rxrpc_connection *,
                                           struct sk_buff *);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
 struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
@@ -891,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
 extern struct idr rxrpc_client_conn_ids;
 
 void rxrpc_destroy_client_conn_ids(void);
-int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
-                      struct sockaddr_rxrpc *, gfp_t);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
+                      struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
+                      gfp_t);
 void rxrpc_expose_client_call(struct rxrpc_call *);
 void rxrpc_disconnect_client_call(struct rxrpc_call *);
 void rxrpc_put_client_conn(struct rxrpc_connection *);
@@ -965,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
 /*
  * input.c
  */
-void rxrpc_data_ready(struct sock *);
+int rxrpc_input_packet(struct sock *, struct sk_buff *);
 
 /*
  * insecure.c
@@ -1045,10 +1047,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
  */
 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
                                         const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
                                     struct sockaddr_rxrpc *, gfp_t);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
+                            struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
index 9c7f26d..652e314 100644 (file)
@@ -287,7 +287,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
 
-                       rxrpc_new_incoming_peer(local, peer);
+                       rxrpc_new_incoming_peer(rx, local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -333,11 +333,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  */
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                                           struct rxrpc_sock *rx,
-                                          struct rxrpc_peer *peer,
-                                          struct rxrpc_connection *conn,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+       struct rxrpc_connection *conn;
+       struct rxrpc_peer *peer;
        struct rxrpc_call *call;
 
        _enter("");
@@ -354,6 +354,13 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
                goto out;
        }
 
+       /* The peer, connection and call may all have sprung into existence due
+        * to a duplicate packet being handled on another CPU in parallel, so
+        * we have to recheck the routing.  However, we're now holding
+        * rx->incoming_lock, so the values should remain stable.
+        */
+       conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
        call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
        if (!call) {
                skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
@@ -396,20 +403,22 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
 
        case RXRPC_CONN_SERVICE:
                write_lock(&call->state_lock);
-               if (rx->discard_new_call)
-                       call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
-               else
-                       call->state = RXRPC_CALL_SERVER_ACCEPTING;
+               if (call->state < RXRPC_CALL_COMPLETE) {
+                       if (rx->discard_new_call)
+                               call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+                       else
+                               call->state = RXRPC_CALL_SERVER_ACCEPTING;
+               }
                write_unlock(&call->state_lock);
                break;
 
        case RXRPC_CONN_REMOTELY_ABORTED:
                rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
-                                         conn->remote_abort, -ECONNABORTED);
+                                         conn->abort_code, conn->error);
                break;
        case RXRPC_CONN_LOCALLY_ABORTED:
                rxrpc_abort_call("CON", call, sp->hdr.seq,
-                                conn->local_abort, -ECONNABORTED);
+                                conn->abort_code, conn->error);
                break;
        default:
                BUG();
index 799f75b..8f1a8f8 100644 (file)
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
        init_waitqueue_head(&call->waitq);
        spin_lock_init(&call->lock);
        spin_lock_init(&call->notify_lock);
+       spin_lock_init(&call->input_lock);
        rwlock_init(&call->state_lock);
        atomic_set(&call->usage, 1);
        call->debug_id = debug_id;
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        /* Set up or get a connection record and set the protocol parameters,
         * including channel number and call ID.
         */
-       ret = rxrpc_connect_call(call, cp, srx, gfp);
+       ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
        if (ret < 0)
                goto error;
 
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
        /* Set up or get a connection record and set the protocol parameters,
         * including channel number and call ID.
         */
-       ret = rxrpc_connect_call(call, cp, srx, gfp);
+       ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
        if (ret < 0)
                goto error;
 
index 8acf74f..521189f 100644 (file)
@@ -276,7 +276,8 @@ dont_reuse:
  * If we return with a connection, the call will be on its waiting list.  It's
  * left to the caller to assign a channel and wake up the call.
  */
-static int rxrpc_get_client_conn(struct rxrpc_call *call,
+static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
+                                struct rxrpc_call *call,
                                 struct rxrpc_conn_parameters *cp,
                                 struct sockaddr_rxrpc *srx,
                                 gfp_t gfp)
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
 
        _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
 
-       cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
+       cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
        if (!cp->peer)
                goto error;
 
@@ -683,7 +684,8 @@ out:
  * find a connection for a call
  * - called in process context with IRQs enabled
  */
-int rxrpc_connect_call(struct rxrpc_call *call,
+int rxrpc_connect_call(struct rxrpc_sock *rx,
+                      struct rxrpc_call *call,
                       struct rxrpc_conn_parameters *cp,
                       struct sockaddr_rxrpc *srx,
                       gfp_t gfp)
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
        rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
        rxrpc_cull_active_client_conns(rxnet);
 
-       ret = rxrpc_get_client_conn(call, cp, srx, gfp);
+       ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
        if (ret < 0)
                goto out;
 
index 6df56ce..b6fca8e 100644 (file)
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
 
        switch (chan->last_type) {
        case RXRPC_PACKET_TYPE_ABORT:
-               _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
+               _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
                break;
        case RXRPC_PACKET_TYPE_ACK:
                trace_rxrpc_tx_ack(chan->call_debug_id, serial,
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  * pass a connection-level abort onto all calls on that connection
  */
 static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl,
-                             u32 abort_code, int error)
+                             enum rxrpc_call_completion compl)
 {
        struct rxrpc_call *call;
        int i;
 
-       _enter("{%d},%x", conn->debug_id, abort_code);
+       _enter("{%d},%x", conn->debug_id, conn->abort_code);
 
        spin_lock(&conn->channel_lock);
 
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
                                trace_rxrpc_abort(call->debug_id,
                                                  "CON", call->cid,
                                                  call->call_id, 0,
-                                                 abort_code, error);
+                                                 conn->abort_code,
+                                                 conn->error);
                        if (rxrpc_set_call_completion(call, compl,
-                                                     abort_code, error))
+                                                     conn->abort_code,
+                                                     conn->error))
                                rxrpc_notify_socket(call);
                }
        }
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
                return 0;
        }
 
+       conn->error = error;
+       conn->abort_code = abort_code;
        conn->state = RXRPC_CONN_LOCALLY_ABORTED;
        spin_unlock_bh(&conn->state_lock);
 
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
+       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
 
        msg.msg_name    = &conn->params.peer->srx.transport;
        msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
        whdr._rsvd      = 0;
        whdr.serviceId  = htons(conn->service_id);
 
-       word            = htonl(conn->local_abort);
+       word            = htonl(conn->abort_code);
 
        iov[0].iov_base = &whdr;
        iov[0].iov_len  = sizeof(whdr);
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
 
        serial = atomic_inc_return(&conn->serial);
        whdr.serial = htonl(serial);
-       _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
+       _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
        if (ret < 0) {
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                abort_code = ntohl(wtmp);
                _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
 
+               conn->error = -ECONNABORTED;
+               conn->abort_code = abort_code;
                conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
-                                 abort_code, -ECONNABORTED);
+               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
                return -ECONNABORTED;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
index 800f5b8..570b49d 100644 (file)
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
 /*
  * Apply a hard ACK by advancing the Tx window.
  */
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
                                   struct rxrpc_ack_summary *summary)
 {
        struct sk_buff *skb, *list = NULL;
+       bool rot_last = false;
        int ix;
        u8 annotation;
 
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
                skb->next = list;
                list = skb;
 
-               if (annotation & RXRPC_TX_ANNO_LAST)
+               if (annotation & RXRPC_TX_ANNO_LAST) {
                        set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+                       rot_last = true;
+               }
                if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
                        summary->nr_rot_new_acks++;
        }
 
        spin_unlock(&call->lock);
 
-       trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+       trace_rxrpc_transmit(call, (rot_last ?
                                    rxrpc_transmit_rotate_last :
                                    rxrpc_transmit_rotate));
        wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
                skb->next = NULL;
                rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
        }
+
+       return rot_last;
 }
 
 /*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
                               const char *abort_why)
 {
+       unsigned int state;
 
        ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
 
        write_lock(&call->state_lock);
 
-       switch (call->state) {
+       state = call->state;
+       switch (state) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
                if (reply_begun)
-                       call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+                       call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
                else
-                       call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+                       call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
                break;
 
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                __rxrpc_call_completed(call);
                rxrpc_notify_socket(call);
+               state = call->state;
                break;
 
        default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
        }
 
        write_unlock(&call->state_lock);
-       if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+       if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
                trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
-       } else {
+       else
                trace_rxrpc_transmit(call, rxrpc_transmit_end);
-       }
        _leave(" = ok");
        return true;
 
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
                trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
        }
 
-       if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
-               rxrpc_rotate_tx_window(call, top, &summary);
        if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
-               rxrpc_proto_abort("TXL", call, top);
-               return false;
+               if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+                       rxrpc_proto_abort("TXL", call, top);
+                       return false;
+               }
        }
        if (!rxrpc_end_tx_phase(call, true, "ETD"))
                return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
                }
        }
 
+       spin_lock(&call->input_lock);
+
        /* Received data implicitly ACKs all of the request packets we sent
         * when we're acting as a client.
         */
        if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
             state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
            !rxrpc_receiving_reply(call))
-               return;
+               goto unlock;
 
        call->ackr_prev_seq = seq;
 
@@ -488,12 +497,16 @@ next_subpacket:
 
        if (flags & RXRPC_LAST_PACKET) {
                if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   seq != call->rx_top)
-                       return rxrpc_proto_abort("LSN", call, seq);
+                   seq != call->rx_top) {
+                       rxrpc_proto_abort("LSN", call, seq);
+                       goto unlock;
+               }
        } else {
                if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   after_eq(seq, call->rx_top))
-                       return rxrpc_proto_abort("LSA", call, seq);
+                   after_eq(seq, call->rx_top)) {
+                       rxrpc_proto_abort("LSA", call, seq);
+                       goto unlock;
+               }
        }
 
        trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
 skip:
        offset += len;
        if (flags & RXRPC_JUMBO_PACKET) {
-               if (skb_copy_bits(skb, offset, &flags, 1) < 0)
-                       return rxrpc_proto_abort("XJF", call, seq);
+               if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
+                       rxrpc_proto_abort("XJF", call, seq);
+                       goto unlock;
+               }
                offset += sizeof(struct rxrpc_jumbo_header);
                seq++;
                serial++;
@@ -601,6 +616,9 @@ ack:
                trace_rxrpc_notify_socket(call->debug_id, serial);
                rxrpc_notify_socket(call);
        }
+
+unlock:
+       spin_unlock(&call->input_lock);
        _leave(" [queued]");
 }
 
@@ -687,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
 
        ping_time = call->ping_time;
        smp_rmb();
-       ping_serial = call->ping_serial;
+       ping_serial = READ_ONCE(call->ping_serial);
 
        if (orig_serial == call->acks_lost_ping)
                rxrpc_input_check_for_lost_ack(call);
 
-       if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
-           before(orig_serial, ping_serial))
+       if (before(orig_serial, ping_serial) ||
+           !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
                return;
-       clear_bit(RXRPC_CALL_PINGING, &call->flags);
        if (after(orig_serial, ping_serial))
                return;
 
@@ -861,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  rxrpc_propose_ack_respond_to_ack);
        }
 
+       /* Discard any out-of-order or duplicate ACKs. */
+       if (before_eq(sp->hdr.serial, call->acks_latest))
+               return;
+
+       buf.info.rxMTU = 0;
        ioffset = offset + nr_acks + 3;
-       if (skb->len >= ioffset + sizeof(buf.info)) {
-               if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
-                       return rxrpc_proto_abort("XAI", call, 0);
+       if (skb->len >= ioffset + sizeof(buf.info) &&
+           skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+               return rxrpc_proto_abort("XAI", call, 0);
+
+       spin_lock(&call->input_lock);
+
+       /* Discard any out-of-order or duplicate ACKs. */
+       if (before_eq(sp->hdr.serial, call->acks_latest))
+               goto out;
+       call->acks_latest_ts = skb->tstamp;
+       call->acks_latest = sp->hdr.serial;
+
+       /* Parse rwind and mtu sizes if provided. */
+       if (buf.info.rxMTU)
                rxrpc_input_ackinfo(call, skb, &buf.info);
-       }
 
-       if (first_soft_ack == 0)
-               return rxrpc_proto_abort("AK0", call, 0);
+       if (first_soft_ack == 0) {
+               rxrpc_proto_abort("AK0", call, 0);
+               goto out;
+       }
 
        /* Ignore ACKs unless we are or have just been transmitting. */
        switch (READ_ONCE(call->state)) {
@@ -879,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                break;
        default:
-               return;
-       }
-
-       /* Discard any out-of-order or duplicate ACKs. */
-       if (before_eq(sp->hdr.serial, call->acks_latest)) {
-               _debug("discard ACK %d <= %d",
-                      sp->hdr.serial, call->acks_latest);
-               return;
+               goto out;
        }
-       call->acks_latest_ts = skb->tstamp;
-       call->acks_latest = sp->hdr.serial;
 
        if (before(hard_ack, call->tx_hard_ack) ||
-           after(hard_ack, call->tx_top))
-               return rxrpc_proto_abort("AKW", call, 0);
-       if (nr_acks > call->tx_top - hard_ack)
-               return rxrpc_proto_abort("AKN", call, 0);
+           after(hard_ack, call->tx_top)) {
+               rxrpc_proto_abort("AKW", call, 0);
+               goto out;
+       }
+       if (nr_acks > call->tx_top - hard_ack) {
+               rxrpc_proto_abort("AKN", call, 0);
+               goto out;
+       }
 
-       if (after(hard_ack, call->tx_hard_ack))
-               rxrpc_rotate_tx_window(call, hard_ack, &summary);
+       if (after(hard_ack, call->tx_hard_ack)) {
+               if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+                       rxrpc_end_tx_phase(call, false, "ETA");
+                       goto out;
+               }
+       }
 
        if (nr_acks > 0) {
-               if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
-                       return rxrpc_proto_abort("XSA", call, 0);
+               if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
+                       rxrpc_proto_abort("XSA", call, 0);
+                       goto out;
+               }
                rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
                                      &summary);
        }
 
-       if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
-               rxrpc_end_tx_phase(call, false, "ETA");
-               return;
-       }
-
        if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
            RXRPC_TX_ANNO_LAST &&
            summary.nr_acks == call->tx_top - hard_ack &&
@@ -920,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
                                  false, true,
                                  rxrpc_propose_ack_ping_for_lost_reply);
 
-       return rxrpc_congestion_management(call, skb, &summary, acked_serial);
+       rxrpc_congestion_management(call, skb, &summary, acked_serial);
+out:
+       spin_unlock(&call->input_lock);
 }
 
 /*
@@ -933,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
 
        _proto("Rx ACKALL %%%u", sp->hdr.serial);
 
-       rxrpc_rotate_tx_window(call, call->tx_top, &summary);
-       if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+       spin_lock(&call->input_lock);
+
+       if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
                rxrpc_end_tx_phase(call, false, "ETL");
+
+       spin_unlock(&call->input_lock);
 }
 
 /*
@@ -1018,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
 }
 
 /*
- * Handle a new call on a channel implicitly completing the preceding call on
- * that channel.
+ * Handle a new service call on a channel implicitly completing the preceding
+ * call on that channel.  This does not apply to client conns.
  *
  * TODO: If callNumber > call_id + 1, renegotiate security.
  */
-static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
+static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
+                                         struct rxrpc_connection *conn,
                                          struct rxrpc_call *call)
 {
        switch (READ_ONCE(call->state)) {
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                rxrpc_call_completed(call);
-               break;
+               /* Fall through */
        case RXRPC_CALL_COMPLETE:
                break;
        default:
@@ -1037,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
                        set_bit(RXRPC_CALL_EV_ABORT, &call->events);
                        rxrpc_queue_call(call);
                }
+               trace_rxrpc_improper_term(call);
                break;
        }
 
-       trace_rxrpc_improper_term(call);
+       spin_lock(&rx->incoming_lock);
        __rxrpc_disconnect_call(conn, call);
+       spin_unlock(&rx->incoming_lock);
        rxrpc_notify_socket(call);
 }
 
@@ -1120,8 +1158,10 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
  * The socket is locked by the caller and this prevents the socket from being
  * shut down and the local endpoint from going away, thus sk_user_data will not
  * be cleared until this function returns.
+ *
+ * Called with the RCU read lock held from the IP layer via UDP.
  */
-void rxrpc_data_ready(struct sock *udp_sk)
+int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
@@ -1130,38 +1170,17 @@ void rxrpc_data_ready(struct sock *udp_sk)
        struct rxrpc_local *local = udp_sk->sk_user_data;
        struct rxrpc_peer *peer = NULL;
        struct rxrpc_sock *rx = NULL;
-       struct sk_buff *skb;
        unsigned int channel;
-       int ret, skew = 0;
+       int skew = 0;
 
        _enter("%p", udp_sk);
 
-       ASSERT(!irqs_disabled());
-
-       skb = skb_recv_udp(udp_sk, 0, 1, &ret);
-       if (!skb) {
-               if (ret == -EAGAIN)
-                       return;
-               _debug("UDP socket error %d", ret);
-               return;
-       }
-
        if (skb->tstamp == 0)
                skb->tstamp = ktime_get_real();
 
        rxrpc_new_skb(skb, rxrpc_skb_rx_received);
 
-       _net("recv skb %p", skb);
-
-       /* we'll probably need to checksum it (didn't call sock_recvmsg) */
-       if (skb_checksum_complete(skb)) {
-               rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
-               __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
-               _leave(" [CSUM failed]");
-               return;
-       }
-
-       __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
+       skb_pull(skb, sizeof(struct udphdr));
 
        /* The UDP protocol already released all skb resources;
         * we are free to add our own data there.
@@ -1176,11 +1195,13 @@ void rxrpc_data_ready(struct sock *udp_sk)
                static int lose;
                if ((lose++ & 7) == 7) {
                        trace_rxrpc_rx_lose(sp);
-                       rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
-                       return;
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
+                       return 0;
                }
        }
 
+       if (skb->tstamp == 0)
+               skb->tstamp = ktime_get_real();
        trace_rxrpc_rx_packet(sp);
 
        switch (sp->hdr.type) {
@@ -1234,8 +1255,6 @@ void rxrpc_data_ready(struct sock *udp_sk)
        if (sp->hdr.serviceId == 0)
                goto bad_message;
 
-       rcu_read_lock();
-
        if (rxrpc_to_server(sp)) {
                /* Weed out packets to services we're not offering.  Packets
                 * that would begin a call are explicitly rejected and the rest
@@ -1247,7 +1266,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
                            sp->hdr.seq == 1)
                                goto unsupported_service;
-                       goto discard_unlock;
+                       goto discard;
                }
        }
 
@@ -1257,17 +1276,23 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        goto wrong_security;
 
                if (sp->hdr.serviceId != conn->service_id) {
-                       if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
-                           conn->service_id != conn->params.service_id)
+                       int old_id;
+
+                       if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
+                               goto reupgrade;
+                       old_id = cmpxchg(&conn->service_id, conn->params.service_id,
+                                        sp->hdr.serviceId);
+
+                       if (old_id != conn->params.service_id &&
+                           old_id != sp->hdr.serviceId)
                                goto reupgrade;
-                       conn->service_id = sp->hdr.serviceId;
                }
 
                if (sp->hdr.callNumber == 0) {
                        /* Connection-level packet */
                        _debug("CONN %p {%d}", conn, conn->debug_id);
                        rxrpc_post_packet_to_conn(conn, skb);
-                       goto out_unlock;
+                       goto out;
                }
 
                /* Note the serial number skew here */
@@ -1286,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
 
                /* Ignore really old calls */
                if (sp->hdr.callNumber < chan->last_call)
-                       goto discard_unlock;
+                       goto discard;
 
                if (sp->hdr.callNumber == chan->last_call) {
                        if (chan->call ||
                            sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
-                               goto discard_unlock;
+                               goto discard;
 
                        /* For the previous service call, if completed
                         * successfully, we discard all further packets.
                         */
                        if (rxrpc_conn_is_service(conn) &&
                            chan->last_type == RXRPC_PACKET_TYPE_ACK)
-                               goto discard_unlock;
+                               goto discard;
 
                        /* But otherwise we need to retransmit the final packet
                         * from data cached in the connection record.
@@ -1309,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
                                                    sp->hdr.serial,
                                                    sp->hdr.flags, 0);
                        rxrpc_post_packet_to_conn(conn, skb);
-                       goto out_unlock;
+                       goto out;
                }
 
                call = rcu_dereference(chan->call);
 
                if (sp->hdr.callNumber > chan->call_id) {
-                       if (rxrpc_to_client(sp)) {
-                               rcu_read_unlock();
+                       if (rxrpc_to_client(sp))
                                goto reject_packet;
-                       }
                        if (call)
-                               rxrpc_input_implicit_end_call(conn, call);
+                               rxrpc_input_implicit_end_call(rx, conn, call);
                        call = NULL;
                }
 
@@ -1337,55 +1360,42 @@ void rxrpc_data_ready(struct sock *udp_sk)
        if (!call || atomic_read(&call->usage) == 0) {
                if (rxrpc_to_client(sp) ||
                    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
-                       goto bad_message_unlock;
+                       goto bad_message;
                if (sp->hdr.seq != 1)
-                       goto discard_unlock;
-               call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
-               if (!call) {
-                       rcu_read_unlock();
+                       goto discard;
+               call = rxrpc_new_incoming_call(local, rx, skb);
+               if (!call)
                        goto reject_packet;
-               }
                rxrpc_send_ping(call, skb, skew);
                mutex_unlock(&call->user_mutex);
        }
 
        rxrpc_input_call_packet(call, skb, skew);
-       goto discard_unlock;
+       goto discard;
 
-discard_unlock:
-       rcu_read_unlock();
 discard:
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
 out:
        trace_rxrpc_rx_done(0, 0);
-       return;
-
-out_unlock:
-       rcu_read_unlock();
-       goto out;
+       return 0;
 
 wrong_security:
-       rcu_read_unlock();
        trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
                          RXKADINCONSISTENCY, EBADMSG);
        skb->priority = RXKADINCONSISTENCY;
        goto post_abort;
 
 unsupported_service:
-       rcu_read_unlock();
        trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
                          RX_INVALID_OPERATION, EOPNOTSUPP);
        skb->priority = RX_INVALID_OPERATION;
        goto post_abort;
 
 reupgrade:
-       rcu_read_unlock();
        trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
                          RX_PROTOCOL_ERROR, EBADMSG);
        goto protocol_error;
 
-bad_message_unlock:
-       rcu_read_unlock();
 bad_message:
        trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
                          RX_PROTOCOL_ERROR, EBADMSG);
@@ -1397,4 +1407,5 @@ reject_packet:
        trace_rxrpc_rx_done(skb->mark, skb->priority);
        rxrpc_reject_packet(local, skb);
        _leave(" [badmsg]");
+       return 0;
 }
index 94d234e..cad0691 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/ip.h>
 #include <linux/hashtable.h>
 #include <net/sock.h>
+#include <net/udp.h>
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
  */
 static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
 {
-       struct sock *sock;
+       struct sock *usk;
        int ret, opt;
 
        _enter("%p{%d,%d}",
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                return ret;
        }
 
+       /* set the socket up */
+       usk = local->socket->sk;
+       inet_sk(usk)->mc_loop = 0;
+
+       /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+       inet_inc_convert_csum(usk);
+
+       rcu_assign_sk_user_data(usk, local);
+
+       udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
+       udp_sk(usk)->encap_rcv = rxrpc_input_packet;
+       udp_sk(usk)->encap_destroy = NULL;
+       udp_sk(usk)->gro_receive = NULL;
+       udp_sk(usk)->gro_complete = NULL;
+
+       udp_encap_enable();
+#if IS_ENABLED(CONFIG_IPV6)
+       if (local->srx.transport.family == AF_INET6)
+               udpv6_encap_enable();
+#endif
+       usk->sk_error_report = rxrpc_error_report;
+
        /* if a local address was supplied then bind it */
        if (local->srx.transport_len > sizeof(sa_family_t)) {
                _debug("bind");
@@ -191,11 +214,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                BUG();
        }
 
-       /* set the socket up */
-       sock = local->socket->sk;
-       sock->sk_user_data      = local;
-       sock->sk_data_ready     = rxrpc_data_ready;
-       sock->sk_error_report   = rxrpc_error_report;
        _leave(" = 0");
        return 0;
 
index f3e6fc6..05b51bd 100644 (file)
@@ -301,6 +301,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
        if (rtt < 0)
                return;
 
+       spin_lock(&peer->rtt_input_lock);
+
        /* Replace the oldest datum in the RTT buffer */
        sum -= peer->rtt_cache[cursor];
        sum += rtt;
@@ -312,6 +314,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
                peer->rtt_usage = usage;
        }
 
+       spin_unlock(&peer->rtt_input_lock);
+
        /* Now recalculate the average */
        if (usage == RXRPC_RTT_CACHE_SIZE) {
                avg = sum / RXRPC_RTT_CACHE_SIZE;
@@ -320,6 +324,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
                do_div(avg, usage);
        }
 
+       /* Don't need to update this under lock */
        peer->rtt = avg;
        trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
                           usage, avg);
index 01a9feb..5691b7d 100644 (file)
@@ -153,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
  * assess the MTU size for the network interface through which this peer is
  * reached
  */
-static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
+static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+                                 struct rxrpc_peer *peer)
 {
+       struct net *net = sock_net(&rx->sk);
        struct dst_entry *dst;
        struct rtable *rt;
        struct flowi fl;
@@ -169,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
        switch (peer->srx.transport.family) {
        case AF_INET:
                rt = ip_route_output_ports(
-                       &init_net, fl4, NULL,
+                       net, fl4, NULL,
                        peer->srx.transport.sin.sin_addr.s_addr, 0,
                        htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
                if (IS_ERR(rt)) {
@@ -188,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
                       sizeof(struct in6_addr));
                fl6->fl6_dport = htons(7001);
                fl6->fl6_sport = htons(7000);
-               dst = ip6_route_output(&init_net, NULL, fl6);
+               dst = ip6_route_output(net, NULL, fl6);
                if (dst->error) {
                        _leave(" [route err %d]", dst->error);
                        return;
@@ -223,6 +225,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
+               spin_lock_init(&peer->rtt_input_lock);
                peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
 
                if (RXRPC_TX_SMSS > 2190)
@@ -240,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
 /*
  * Initialise peer record.
  */
-static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+                           unsigned long hash_key)
 {
        peer->hash_key = hash_key;
-       rxrpc_assess_MTU_size(peer);
+       rxrpc_assess_MTU_size(rx, peer);
        peer->mtu = peer->if_mtu;
        peer->rtt_last_req = ktime_get_real();
 
@@ -275,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
 /*
  * Set up a new peer.
  */
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
+                                           struct rxrpc_local *local,
                                            struct sockaddr_rxrpc *srx,
                                            unsigned long hash_key,
                                            gfp_t gfp)
@@ -287,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
        peer = rxrpc_alloc_peer(local, gfp);
        if (peer) {
                memcpy(&peer->srx, srx, sizeof(*srx));
-               rxrpc_init_peer(peer, hash_key);
+               rxrpc_init_peer(rx, peer, hash_key);
        }
 
        _leave(" = %p", peer);
@@ -299,14 +304,15 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
  * since we've already done a search in the list from the non-reentrant context
  * (the data_ready handler) that is the only place we can add new peers.
  */
-void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
+void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
+                            struct rxrpc_peer *peer)
 {
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
        peer->local = local;
-       rxrpc_init_peer(peer, hash_key);
+       rxrpc_init_peer(rx, peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
        hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -317,7 +323,8 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 /*
  * obtain a remote transport endpoint for the specified address
  */
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
+                                    struct rxrpc_local *local,
                                     struct sockaddr_rxrpc *srx, gfp_t gfp)
 {
        struct rxrpc_peer *peer, *candidate;
@@ -337,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                /* The peer is not yet present in hash - create a candidate
                 * for a new record and then redo the search.
                 */
-               candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
+               candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
                if (!candidate) {
                        _leave(" = NULL [nomem]");
                        return NULL;
index f218ccf..b2c3406 100644 (file)
@@ -398,6 +398,7 @@ static int u32_init(struct tcf_proto *tp)
        rcu_assign_pointer(tp_c->hlist, root_ht);
        root_ht->tp_c = tp_c;
 
+       root_ht->refcnt++;
        rcu_assign_pointer(tp->root, root_ht);
        tp->data = tp_c;
        return 0;
@@ -610,7 +611,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
        struct tc_u_hnode __rcu **hn;
        struct tc_u_hnode *phn;
 
-       WARN_ON(ht->refcnt);
+       WARN_ON(--ht->refcnt);
 
        u32_clear_hnode(tp, ht, extack);
 
@@ -649,7 +650,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 
        WARN_ON(root_ht == NULL);
 
-       if (root_ht && --root_ht->refcnt == 0)
+       if (root_ht && --root_ht->refcnt == 1)
                u32_destroy_hnode(tp, root_ht, extack);
 
        if (--tp_c->refcnt == 0) {
@@ -698,7 +699,6 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
        }
 
        if (ht->refcnt == 1) {
-               ht->refcnt--;
                u32_destroy_hnode(tp, ht, extack);
        } else {
                NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
@@ -708,11 +708,11 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
 out:
        *last = true;
        if (root_ht) {
-               if (root_ht->refcnt > 1) {
+               if (root_ht->refcnt > 2) {
                        *last = false;
                        goto ret;
                }
-               if (root_ht->refcnt == 1) {
+               if (root_ht->refcnt == 2) {
                        if (!ht_empty(root_ht)) {
                                *last = false;
                                goto ret;
index c07c30b..793016d 100644 (file)
@@ -2644,7 +2644,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        for (i = 1; i <= CAKE_QUEUES; i++)
                quantum_div[i] = 65535 / i;
 
-       q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+       q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
                           GFP_KERNEL);
        if (!q->tins)
                goto nomem;
index fb886b5..f6552e4 100644 (file)
@@ -477,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
        l->in_session = false;
        l->bearer_id = bearer_id;
        l->tolerance = tolerance;
+       if (bc_rcvlink)
+               bc_rcvlink->tolerance = tolerance;
        l->net_plane = net_plane;
        l->advertised_mtu = mtu;
        l->mtu = mtu;
@@ -843,14 +845,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
 
 void tipc_link_reset(struct tipc_link *l)
 {
+       struct sk_buff_head list;
+
+       __skb_queue_head_init(&list);
+
        l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
+
        spin_lock_bh(&l->wakeupq.lock);
+       skb_queue_splice_init(&l->wakeupq, &list);
+       spin_unlock_bh(&l->wakeupq.lock);
+
        spin_lock_bh(&l->inputq->lock);
-       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       skb_queue_splice_init(&list, l->inputq);
        spin_unlock_bh(&l->inputq->lock);
-       spin_unlock_bh(&l->wakeupq.lock);
 
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
@@ -1031,7 +1040,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
        /* Detect repeated retransmit failures on same packet */
        if (r->last_retransm != buf_seqno(skb)) {
                r->last_retransm = buf_seqno(skb);
-               r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+               r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
        } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
                link_retransmit_failure(l, skb);
                if (link_is_bc_sndlink(l))
@@ -1576,9 +1585,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                strncpy(if_name, data, TIPC_MAX_IF_NAME);
 
                /* Update own tolerance if peer indicates a non-zero value */
-               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
                        l->tolerance = peers_tol;
-
+                       l->bc_rcvlink->tolerance = peers_tol;
+               }
                /* Update own priority if peer's priority is higher */
                if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
                        l->priority = peers_prio;
@@ -1604,9 +1614,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                l->rcv_nxt_state = msg_seqno(hdr) + 1;
 
                /* Update own tolerance if peer indicates a non-zero value */
-               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
                        l->tolerance = peers_tol;
-
+                       l->bc_rcvlink->tolerance = peers_tol;
+               }
                /* Update own prio if peer indicates a different value */
                if ((peers_prio != l->priority) &&
                    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2223,6 +2234,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
                             struct sk_buff_head *xmitq)
 {
        l->tolerance = tol;
+       if (l->bc_rcvlink)
+               l->bc_rcvlink->tolerance = tol;
        if (link_is_up(l))
                tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
 }
index b6f99b0..49810fd 100644 (file)
@@ -1196,6 +1196,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
  * @skb: pointer to message buffer.
  */
 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+                                  struct sk_buff_head *inputq,
                                   struct sk_buff_head *xmitq)
 {
        struct tipc_msg *hdr = buf_msg(skb);
@@ -1213,7 +1214,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
                tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
                                      tsk_peer_port(tsk));
                sk->sk_state_change(sk);
-               goto exit;
+
+               /* State change is ignored if socket already awake,
+                * - convert msg to abort msg and add to inqueue
+                */
+               msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+               msg_set_type(hdr, TIPC_CONN_MSG);
+               msg_set_size(hdr, BASIC_H_SIZE);
+               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+               __skb_queue_tail(inputq, skb);
+               return;
        }
 
        tsk->probe_unacked = false;
@@ -1936,7 +1946,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
 
        switch (msg_user(hdr)) {
        case CONN_MANAGER:
-               tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
+               tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
                return;
        case SOCK_WAKEUP:
                tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
index bd133ef..ad1ec70 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig SAMPLES
        bool "Sample kernel code"
+       depends on !UML
        help
          You can build and test sample kernel code here.
 
index 5a2d1c9..54da4b0 100644 (file)
@@ -219,7 +219,7 @@ else
 sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
        "$(if $(CONFIG_64BIT),64,32)" \
-       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
+       "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
        "$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
index efcaf6c..e46f51b 100644 (file)
@@ -204,14 +204,23 @@ from ctypes import *
 libpq = CDLL("libpq.so.5")
 PQconnectdb = libpq.PQconnectdb
 PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
 PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
 PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
 PQexec = libpq.PQexec
 PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
 PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
 PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
 PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
 PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
 PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
index f827bf7..e4bb82c 100644 (file)
@@ -440,7 +440,11 @@ def branch_type_table(*x):
 
 def sample_table(*x):
        if branches:
-               bind_exec(sample_query, 18, x)
+               for xx in x[0:15]:
+                       sample_query.addBindValue(str(xx))
+               for xx in x[19:22]:
+                       sample_query.addBindValue(str(xx))
+               do_query_(sample_query)
        else:
                bind_exec(sample_query, 22, x)
 
index c4acd20..111ae85 100644 (file)
@@ -2286,7 +2286,8 @@ static int append_inlines(struct callchain_cursor *cursor,
        if (!symbol_conf.inline_name || !map || !sym)
                return ret;
 
-       addr = map__rip_2objdump(map, ip);
+       addr = map__map_ip(map, ip);
+       addr = map__rip_2objdump(map, addr);
 
        inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
        if (!inline_node) {
@@ -2312,7 +2313,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
 {
        struct callchain_cursor *cursor = arg;
        const char *srcline = NULL;
-       u64 addr;
+       u64 addr = entry->ip;
 
        if (symbol_conf.hide_unresolved && entry->sym == NULL)
                return 0;
@@ -2324,7 +2325,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
         * Convert entry->ip from a virtual address to an offset in
         * its corresponding binary.
         */
-       addr = map__map_ip(entry->map, entry->ip);
+       if (entry->map)
+               addr = map__map_ip(entry->map, entry->ip);
 
        srcline = callchain_srcline(entry->map, entry->sym, addr);
        return callchain_cursor_append(cursor, entry->ip,
index 97efbca..1942f6d 100644 (file)
@@ -35,7 +35,7 @@ class install_lib(_install_lib):
 
 cflags = getenv('CFLAGS', '').split()
 # switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
 if cc != "clang":
     cflags += ['-Wno-cast-function-type' ]
 
index 08c341b..e101af5 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # This test is for checking rtnetlink callpaths, and get as much coverage as possible.
 #
index 850767b..99e537a 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 #
 # Run a series of udpgso benchmarks