Pick up these single-commit branches.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Coding style is all about readability and maintainability using commonly
available tools.
-The limit on the length of lines is 80 columns and this is a strongly
-preferred limit.
-
-Statements longer than 80 columns will be broken into sensible chunks, unless
-exceeding 80 columns significantly increases readability and does not hide
-information. Descendants are always substantially shorter than the parent and
-are placed substantially to the right. The same applies to function headers
-with a long argument list. However, never break user-visible strings such as
-printk messages, because that breaks the ability to grep for them.
+The preferred limit on the length of a single line is 80 columns.
+
+Statements longer than 80 columns should be broken into sensible chunks,
+unless exceeding 80 columns significantly increases readability and does
+not hide information.
+
+Descendants are always substantially shorter than the parent and are
+are placed substantially to the right. A very commonly used style
+is to align descendants to a function open parenthesis.
+
+These same rules are applied to function headers with a long argument list.
+
+However, never break user-visible strings such as printk messages because
+that breaks the ability to grep for them.
3) Placing Braces and Spaces
VERSION = 5
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
}
.table : ALIGN(4) {
_table_start = .;
- LONG(ZIMAGE_MAGIC(2))
+ LONG(ZIMAGE_MAGIC(4))
LONG(ZIMAGE_MAGIC(0x5a534c4b))
LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
LONG(ZIMAGE_MAGIC(_kernel_bss_size))
&cpsw_emac0 {
phy-handle = <ðphy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
};
&elm {
&cpsw_emac0 {
phy-handle = <ðphy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
};
&rtc {
&cpsw_emac0 {
phy-handle = <ðphy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy-handle = <ðphy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <2>;
};
&cpsw_port1 {
phy-handle = <ðphy0_sw>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
ti,dual-emac-pvid = <1>;
};
&cpsw_port2 {
phy-handle = <ðphy1_sw>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
ti,dual-emac-pvid = <2>;
};
&cpsw_emac0 {
phy-handle = <&phy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy-handle = <&phy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <2>;
};
&cpsw_emac0 {
phy-handle = <ðphy0>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <1>;
};
&cpsw_emac1 {
phy-handle = <ðphy1>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-rxid";
dual_emac_res_vlan = <2>;
};
timer@20200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x20200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&periph_clk>;
};
compatible = "arm,cortex-a9-twd-timer";
reg = <0x20600 0x20>;
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
- IRQ_TYPE_LEVEL_HIGH)>;
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&periph_clk>;
};
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x20620 0x20>;
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
- IRQ_TYPE_LEVEL_HIGH)>;
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&periph_clk>;
};
leds {
act {
- gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
+ gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
};
};
davinci_mdio: mdio@800 {
compatible = "ti,cpsw-mdio", "ti,davinci_mdio";
- clocks = <&alwon_ethernet_clkctrl DM814_ETHERNET_CPGMAC0_CLKCTRL 0>;
+ clocks = <&cpsw_125mhz_gclk>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <0>;
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
&ldb {
status = "okay";
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
- <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
-};
-
&ldb {
status = "okay";
};
};
-&clks {
- assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
- <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
- <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
- <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
- assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
- <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
- <&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
-};
-
&ldb {
fsl,dual-channel;
status = "okay";
#interrupt-cells = <1>;
};
};
+
+&clks {
+ assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+ <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+ <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
+ assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
+ <&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
+};
status = "okay";
};
-&ssp3 {
+&ssp1 {
status = "okay";
- cs-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
+ cs-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
firmware-flash@0 {
- compatible = "st,m25p80", "jedec,spi-nor";
+ compatible = "winbond,w25q32", "jedec,spi-nor";
reg = <0>;
- spi-max-frequency = <40000000>;
+ spi-max-frequency = <104000000>;
m25p,fast-read;
};
};
-&ssp4 {
- cs-gpios = <&gpio 56 GPIO_ACTIVE_HIGH>;
+&ssp2 {
+ cs-gpios = <&gpio 56 GPIO_ACTIVE_LOW>;
status = "okay";
};
};
hsic_phy0: hsic-phy@f0001800 {
- compatible = "marvell,mmp3-hsic-phy",
- "usb-nop-xceiv";
+ compatible = "marvell,mmp3-hsic-phy";
reg = <0xf0001800 0x40>;
#phy-cells = <0>;
status = "disabled";
};
hsic_phy1: hsic-phy@f0002800 {
- compatible = "marvell,mmp3-hsic-phy",
- "usb-nop-xceiv";
+ compatible = "marvell,mmp3-hsic-phy";
reg = <0xf0002800 0x40>;
#phy-cells = <0>;
status = "disabled";
};
soc_clocks: clocks@d4050000 {
- compatible = "marvell,mmp2-clock";
+ compatible = "marvell,mmp3-clock";
reg = <0xd4050000 0x1000>,
<0xd4282800 0x400>,
<0xd4015000 0x1000>;
#endif
#include <asm/ptrace.h>
-#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#define IOMEM(x) (x)
.size \name , . - \name
.endm
- .macro csdb
-#ifdef CONFIG_THUMB2_KERNEL
- .inst.w 0xf3af8014
-#else
- .inst 0xe320f014
-#endif
- .endm
-
- .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
-#ifndef CONFIG_CPU_USE_DOMAINS
- adds \tmp, \addr, #\size - 1
- sbcscc \tmp, \tmp, \limit
- bcs \bad
-#ifdef CONFIG_CPU_SPECTRE
- movcs \addr, #0
- csdb
-#endif
-#endif
- .endm
-
- .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
-#ifdef CONFIG_CPU_SPECTRE
- sub \tmp, \limit, #1
- subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
- addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
- subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
- movlo \addr, #0 @ if (tmp < 0) addr = NULL
- csdb
-#endif
- .endm
-
- .macro uaccess_disable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_DISABLE
- mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_enable, tmp, isb=1
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- /*
- * Whenever we re-enter userspace, the domains should always be
- * set appropriately.
- */
- mov \tmp, #DACR_UACCESS_ENABLE
- mcr p15, 0, \tmp, c3, c0, 0
- .if \isb
- instr_sync
- .endif
-#endif
- .endm
-
- .macro uaccess_save, tmp
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #SVC_DACR]
-#endif
- .endm
-
- .macro uaccess_restore
-#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #SVC_DACR]
- mcr p15, 0, r0, c3, c0, 0
-#endif
- .endm
-
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_UACCESS_ASM_H__
+#define __ASM_UACCESS_ASM_H__
+
+#include <asm/asm-offsets.h>
+#include <asm/domain.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+
+ .macro csdb
+#ifdef CONFIG_THUMB2_KERNEL
+ .inst.w 0xf3af8014
+#else
+ .inst 0xe320f014
+#endif
+ .endm
+
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcscc \tmp, \tmp, \limit
+ bcs \bad
+#ifdef CONFIG_CPU_SPECTRE
+ movcs \addr, #0
+ csdb
+#endif
+#endif
+ .endm
+
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
+#define DACR(x...) x
+#else
+#define DACR(x...)
+#endif
+
+ /*
+ * Save the address limit on entry to a privileged exception.
+ *
+ * If we are using the DACR for kernel access by the user accessors
+ * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
+ * back to client mode, whether or not \disable is set.
+ *
+ * If we are using SW PAN, set the DACR user domain to no access
+ * if \disable is set.
+ */
+ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
+ ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
+ mov \tmp2, #TASK_SIZE
+ str \tmp2, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mrc p15, 0, \tmp0, c3, c0, 0)
+ DACR( str \tmp0, [sp, #SVC_DACR])
+ str \tmp1, [sp, #SVC_ADDR_LIMIT]
+ .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
+ /* kernel=client, user=no access */
+ mov \tmp2, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
+ /* kernel=client */
+ bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
+ orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
+ mcr p15, 0, \tmp2, c3, c0, 0
+ instr_sync
+ .endif
+ .endm
+
+ /* Restore the user access state previously saved by uaccess_entry */
+ .macro uaccess_exit, tsk, tmp0, tmp1
+ ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
+ DACR( ldr \tmp0, [sp, #SVC_DACR])
+ str \tmp1, [\tsk, #TI_ADDR_LIMIT]
+ DACR( mcr p15, 0, \tmp0, c3, c0, 0)
+ .endm
+
+#undef DACR
+
+#endif /* __ASM_UACCESS_ASM_H__ */
size_t size;
if (tag->hdr.tag != ATAG_CORE) {
- pr_info("No ATAGs?");
+ pr_info("No ATAGs?\n");
return -EINVAL;
}
#include <asm/unistd.h>
#include <asm/tls.h>
#include <asm/system_info.h>
+#include <asm/uaccess-asm.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
stmia r7, {r2 - r6}
get_thread_info tsk
- ldr r0, [tsk, #TI_ADDR_LIMIT]
- mov r1, #TASK_SIZE
- str r1, [tsk, #TI_ADDR_LIMIT]
- str r0, [sp, #SVC_ADDR_LIMIT]
-
- uaccess_save r0
- .if \uaccess
- uaccess_disable r0
- .endif
+ uaccess_entry tsk, r0, r1, r2, \uaccess
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
#include <asm/asm-offsets.h>
#include <asm/errno.h>
#include <asm/thread_info.h>
+#include <asm/uaccess-asm.h>
#include <asm/v7m.h>
@ Bad Abort numbers
blne trace_hardirqs_off
#endif
.endif
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
- ldr r1, [sp, #SVC_ADDR_LIMIT]
- uaccess_restore
- str r1, [tsk, #TI_ADDR_LIMIT]
+ uaccess_exit tsk, r0, r1
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
};
static struct undef_hook thumb_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xde01,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
"venc_lt_sel";
assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>,
<&topckgen CLK_TOP_VENC_LT_SEL>;
- assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>,
- <&topckgen CLK_TOP_UNIVPLL1_D2>;
+ assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL>,
+ <&topckgen CLK_TOP_VCODECPLL_370P5>;
};
jpegdec: jpegdec@18004000 {
panic("CPU%u detected unsupported configuration\n", cpu);
}
- return ret;
+ return -EIO;
}
static void init_gic_priority_masking(void)
.endm
.macro RESTORE_ALL
- psrclr ie
ldw lr, (sp, 4)
ldw a0, (sp, 8)
mtcr a0, epc
movi r6, 0
cpwcr r6, cpcr31
.endm
-
-.macro ANDI_R3 rx, imm
- lsri \rx, 3
- andi \rx, (\imm >> 3)
-.endm
#endif /* __ASM_CSKY_ENTRY_H */
#define LSAVE_A1 28
#define LSAVE_A2 32
#define LSAVE_A3 36
+#define LSAVE_A4 40
+#define LSAVE_A5 44
#define KSPTOUSP
#define USPTOKSP
.endm
.macro RESTORE_ALL
- psrclr ie
ldw tls, (sp, 0)
ldw lr, (sp, 4)
ldw a0, (sp, 8)
jmpi 3f /* jump to va */
3:
.endm
-
-.macro ANDI_R3 rx, imm
- lsri \rx, 3
- andi \rx, (\imm >> 3)
-.endm
#endif /* __ASM_CSKY_ENTRY_H */
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE)
+
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT)
+
#endif /* _ASM_CSKY_THREAD_INFO_H */
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
-#ifdef CONFIG_RSEQ_DEBUG
- mov a0, sp
- jbsr rseq_syscall
-#endif
psrset ee, ie
- lrw r11, __NR_syscalls
- cmphs syscallid, r11 /* Check nr of syscall */
- bt ret_from_exception
+ lrw r9, __NR_syscalls
+ cmphs syscallid, r9 /* Check nr of syscall */
+ bt 1f
- lrw r13, sys_call_table
- ixw r13, syscallid
- ldw r11, (r13)
- cmpnei r11, 0
+ lrw r9, sys_call_table
+ ixw r9, syscallid
+ ldw syscallid, (r9)
+ cmpnei syscallid, 0
bf ret_from_exception
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
bt csky_syscall_trace
#if defined(__CSKYABIV2__)
subi sp, 8
stw r5, (sp, 0x4)
stw r4, (sp, 0x0)
- jsr r11 /* Do system call */
+ jsr syscallid /* Do system call */
addi sp, 8
#else
- jsr r11
+ jsr syscallid
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
+1:
+#ifdef CONFIG_DEBUG_RSEQ
+ mov a0, sp
+ jbsr rseq_syscall
+#endif
jmpi ret_from_exception
csky_syscall_trace:
ldw a3, (sp, LSAVE_A3)
#if defined(__CSKYABIV2__)
subi sp, 8
- stw r5, (sp, 0x4)
- stw r4, (sp, 0x0)
+ ldw r9, (sp, LSAVE_A4)
+ stw r9, (sp, 0x0)
+ ldw r9, (sp, LSAVE_A5)
+ stw r9, (sp, 0x4)
+ jsr syscallid /* Do system call */
+ addi sp, 8
#else
ldw r6, (sp, LSAVE_A4)
ldw r7, (sp, LSAVE_A5)
-#endif
- jsr r11 /* Do system call */
-#if defined(__CSKYABIV2__)
- addi sp, 8
+ jsr syscallid /* Do system call */
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
+#ifdef CONFIG_DEBUG_RSEQ
+ mov a0, sp
+ jbsr rseq_syscall
+#endif
mov a0, sp /* right now, sp --> pt_regs */
jbsr syscall_trace_exit
br ret_from_exception
mov r9, sp
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- ANDI_R3 r12, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_SYSCALL_WORK
+ and r10, r9
+ cmpnei r10, 0
bf ret_from_exception
mov a0, sp /* sp = pt_regs pointer */
jbsr syscall_trace_exit
ret_from_exception:
- ld syscallid, (sp, LSAVE_PSR)
- btsti syscallid, 31
- bt 1f
+ psrclr ie
+ ld r9, (sp, LSAVE_PSR)
+ btsti r9, 31
+ bt 1f
/*
* Load address of current->thread_info, Then get address of task_struct
* Get task_needreshed in task_struct
bmaski r10, THREAD_SHIFT
andn r9, r10
- ldw r12, (r9, TINFO_FLAGS)
- andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
- cmpnei r12, 0
+ ldw r10, (r9, TINFO_FLAGS)
+ lrw r9, _TIF_WORK_MASK
+ and r10, r9
+ cmpnei r10, 0
bt exit_work
1:
+#ifdef CONFIG_PREEMPTION
+ mov r9, sp
+ bmaski r10, THREAD_SHIFT
+ andn r9, r10
+
+ ldw r10, (r9, TINFO_PREEMPT)
+ cmpnei r10, 0
+ bt 2f
+ jbsr preempt_schedule_irq /* irq en/disable is done inside */
+2:
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
ld r10, (sp, LSAVE_PSR)
btsti r10, 6
RESTORE_ALL
exit_work:
- lrw syscallid, ret_from_exception
- mov lr, syscallid
+ lrw r9, ret_from_exception
+ mov lr, r9
- btsti r12, TIF_NEED_RESCHED
+ btsti r10, TIF_NEED_RESCHED
bt work_resched
+ psrset ie
mov a0, sp
- mov a1, r12
+ mov a1, r10
jmpi do_notify_resume
work_resched:
jbsr trace_hardirqs_off
#endif
-#ifdef CONFIG_PREEMPTION
- mov r9, sp /* Get current stack pointer */
- bmaski r10, THREAD_SHIFT
- andn r9, r10 /* Get thread_info */
-
- /*
- * Get task_struct->stack.preempt_count for current,
- * and increase 1.
- */
- ldw r12, (r9, TINFO_PREEMPT)
- addi r12, 1
- stw r12, (r9, TINFO_PREEMPT)
-#endif
mov a0, sp
jbsr csky_do_IRQ
-#ifdef CONFIG_PREEMPTION
- subi r12, 1
- stw r12, (r9, TINFO_PREEMPT)
- cmpnei r12, 0
- bt 2f
- ldw r12, (r9, TINFO_FLAGS)
- btsti r12, TIF_NEED_RESCHED
- bf 2f
- jbsr preempt_schedule_irq /* irq en/disable is done inside */
-#endif
-2:
jmpi ret_from_exception
/*
#define _ASM_IA64_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_INTEL_IOMMU
+#ifdef CONFIG_IOMMU_API
void *iommu; /* hook for IOMMU specific extension */
#endif
};
> BITS_PER_LONG);
high_memory = __va((max_pfn << PAGE_SHIFT));
- set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
+ set_max_mapnr(max_low_pfn);
memblock_free_all();
#ifdef CONFIG_PA11
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
GCOV_PROFILE_kprobes-ftrace.o := n
KCOV_INSTRUMENT_kprobes-ftrace.o := n
UBSAN_SANITIZE_kprobes-ftrace.o := n
+GCOV_PROFILE_syscall_64.o := n
+KCOV_INSTRUMENT_syscall_64.o := n
+UBSAN_SANITIZE_syscall_64.o := n
UBSAN_SANITIZE_vdso.o := n
# Necessary for booting with kcov enabled on book3e machines
GEN_COMMON facility_unavailable
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return
GEN_KVM facility_unavailable
GEN_COMMON h_facility_unavailable
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
+ REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
b interrupt_return
GEN_KVM h_facility_unavailable
#define _ASM_X86_DEVICE_H
struct dev_archdata {
-#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
+#ifdef CONFIG_IOMMU_API
void *iommu; /* hook for IOMMU specific extension */
#endif
};
#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
+#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
#ifdef CONFIG_X86_32
/* The maximum address that we can perform a DMA transfer to on this platform */
#ifdef CONFIG_X86_IOPL_IOPERM
void io_bitmap_share(struct task_struct *tsk);
-void io_bitmap_exit(void);
+void io_bitmap_exit(struct task_struct *tsk);
void native_tss_update_io_bitmap(void);
#else
static inline void io_bitmap_share(struct task_struct *tsk) { }
-static inline void io_bitmap_exit(void) { }
+static inline void io_bitmap_exit(struct task_struct *tsk) { }
static inline void tss_update_io_bitmap(void) { }
#endif
#ifndef _UAPI_ASM_X86_UNISTD_H
#define _UAPI_ASM_X86_UNISTD_H
-/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT 0x40000000UL
+/*
+ * x32 syscall flag bit. Some user programs expect syscall NR macros
+ * and __X32_SYSCALL_BIT to have type int, even though syscall numbers
+ * are, for practical purposes, unsigned long.
+ *
+ * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right
+ * thing regardless.
+ */
+#define __X32_SYSCALL_BIT 0x40000000
#ifndef __KERNEL__
# ifdef __i386__
static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
+ X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
{}
};
return true;
}
-/*
- * This is similar to user_regset_copyout(), but will not add offset to
- * the source data pointer or increment pos, count, kbuf, and ubuf.
- */
-static inline void
-__copy_xstate_to_kernel(void *kbuf, const void *data,
- unsigned int offset, unsigned int size, unsigned int size_total)
+static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
{
- if (offset < size_total) {
- unsigned int copy = min(size, size_total - offset);
+ if (*pos < to) {
+ unsigned size = to - *pos;
+
+ if (size > *count)
+ size = *count;
+ memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
+ *kbuf += size;
+ *pos += size;
+ *count -= size;
+ }
+}
- memcpy(kbuf + offset, data, copy);
+static void copy_part(unsigned offset, unsigned size, void *from,
+ void **kbuf, unsigned *pos, unsigned *count)
+{
+ fill_gap(offset, kbuf, pos, count);
+ if (size > *count)
+ size = *count;
+ if (size) {
+ memcpy(*kbuf, from, size);
+ *kbuf += size;
+ *pos += size;
+ *count -= size;
}
}
*/
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
{
- unsigned int offset, size;
struct xstate_header header;
+ const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
+ unsigned count = size_total;
int i;
/*
header.xfeatures = xsave->header.xfeatures;
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+ if (header.xfeatures & XFEATURE_MASK_FP)
+ copy_part(0, off_mxcsr,
+ &xsave->i387, &kbuf, &offset_start, &count);
+ if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
+ copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
+ &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
+ if (header.xfeatures & XFEATURE_MASK_FP)
+ copy_part(offsetof(struct fxregs_state, st_space), 128,
+ &xsave->i387.st_space, &kbuf, &offset_start, &count);
+ if (header.xfeatures & XFEATURE_MASK_SSE)
+ copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
+ &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
+ /*
+ * Fill xsave->i387.sw_reserved value for ptrace frame:
+ */
+ copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
+ xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
/*
* Copy xregs_state->header:
*/
- offset = offsetof(struct xregs_state, header);
- size = sizeof(header);
-
- __copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
+ copy_part(offsetof(struct xregs_state, header), sizeof(header),
+ &header, &kbuf, &offset_start, &count);
- for (i = 0; i < XFEATURE_MAX; i++) {
+ for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
/*
* Copy only in-use xstates:
*/
if ((header.xfeatures >> i) & 1) {
void *src = __raw_xsave_addr(xsave, i);
- offset = xstate_offsets[i];
- size = xstate_sizes[i];
-
- /* The next component has to fit fully into the output buffer: */
- if (offset + size > size_total)
- break;
-
- __copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
+ copy_part(xstate_offsets[i], xstate_sizes[i],
+ src, &kbuf, &offset_start, &count);
}
}
-
- if (xfeatures_mxcsr_quirk(header.xfeatures)) {
- offset = offsetof(struct fxregs_state, mxcsr);
- size = MXCSR_AND_FLAGS_SIZE;
- __copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
- }
-
- /*
- * Fill xsave->i387.sw_reserved value for ptrace frame:
- */
- offset = offsetof(struct fxregs_state, sw_reserved);
- size = sizeof(xstate_fx_sw_bytes);
-
- __copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
+ fill_gap(size_total, &kbuf, &offset_start, &count);
return 0;
}
set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
}
-static void task_update_io_bitmap(void)
+static void task_update_io_bitmap(struct task_struct *tsk)
{
- struct thread_struct *t = ¤t->thread;
+ struct thread_struct *t = &tsk->thread;
if (t->iopl_emul == 3 || t->io_bitmap) {
/* TSS update is handled on exit to user space */
- set_thread_flag(TIF_IO_BITMAP);
+ set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
} else {
- clear_thread_flag(TIF_IO_BITMAP);
+ clear_tsk_thread_flag(tsk, TIF_IO_BITMAP);
/* Invalidate TSS */
preempt_disable();
tss_update_io_bitmap();
}
}
-void io_bitmap_exit(void)
+void io_bitmap_exit(struct task_struct *tsk)
{
- struct io_bitmap *iobm = current->thread.io_bitmap;
+ struct io_bitmap *iobm = tsk->thread.io_bitmap;
- current->thread.io_bitmap = NULL;
- task_update_io_bitmap();
+ tsk->thread.io_bitmap = NULL;
+ task_update_io_bitmap(tsk);
if (iobm && refcount_dec_and_test(&iobm->refcnt))
kfree(iobm);
}
if (!iobm)
return -ENOMEM;
refcount_set(&iobm->refcnt, 1);
- io_bitmap_exit();
+ io_bitmap_exit(current);
}
/*
}
/* All permissions dropped? */
if (max_long == UINT_MAX) {
- io_bitmap_exit();
+ io_bitmap_exit(current);
return 0;
}
}
t->iopl_emul = level;
- task_update_io_bitmap();
+ task_update_io_bitmap(current);
return 0;
}
}
/*
- * Free current thread data structures etc..
+ * Free thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
struct fpu *fpu = &t->fpu;
if (test_thread_flag(TIF_IO_BITMAP))
- io_bitmap_exit();
+ io_bitmap_exit(tsk);
free_vm86(t);
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
+ { /* Handle problems with rebooting on Apple MacBook6,1 */
+ .callback = set_pci_reboot,
+ .ident = "Apple MacBook6,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"),
+ },
+ },
{ /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
.ident = "Apple MacBookPro5",
}
/*
- * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
- * with BLK_STS_AGAIN status in order to catch -EAGAIN and
- * to give a chance to the caller to repeat request gracefully.
+ * For a REQ_NOWAIT based request, return -EOPNOTSUPP
+ * if queue is not a request based queue.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
- status = BLK_STS_AGAIN;
- goto end_io;
- }
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
+ goto not_supported;
if (should_fail_bio(bio))
goto end_io;
config SM_GCC_8250
tristate "SM8250 Global Clock Controller"
+ select QCOM_GDSC
help
Support for the global clock controller on SM8250 devices.
Say Y if you want to use peripheral devices such as UART,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "bi_tcxo",
- .name = "bi_tcxo",
+ .hw = &gpll0.clkr.hw,
},
.num_parents = 1,
.ops = &clk_trion_pll_postdiv_ops,
make_tx_data_wr(sk, skb, immdlen, len,
credits_needed, completion);
tp->snd_nxt += len;
- tp->lsndtime = tcp_time_stamp(tp);
+ tp->lsndtime = tcp_jiffies32;
if (completion)
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
} else {
kona_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kona_gpio->reg_base)) {
- ret = -ENXIO;
+ ret = PTR_ERR(kona_gpio->reg_base);
goto err_irq_domain;
}
mutex_init(&exar_gpio->lock);
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0)
- goto err_destroy;
+ if (index < 0) {
+ ret = index;
+ goto err_mutex_destroy;
+ }
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
err_destroy:
ida_simple_remove(&ida_index, index);
+err_mutex_destroy:
mutex_destroy(&exar_gpio->lock);
return ret;
}
{
u32 arm_gpio_lock_val;
- spin_lock(&gs->gc.bgpio_lock);
mutex_lock(yu_arm_gpio_lock_param.lock);
+ spin_lock(&gs->gc.bgpio_lock);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- mutex_unlock(yu_arm_gpio_lock_param.lock);
spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- mutex_unlock(yu_arm_gpio_lock_param.lock);
spin_unlock(&gs->gc.bgpio_lock);
+ mutex_unlock(yu_arm_gpio_lock_param.lock);
}
/*
"marvell,armada-370-gpio"))
return 0;
+ /*
+ * There are only two sets of PWM configuration registers for
+ * all the GPIO lines on those SoCs which this driver reserves
+ * for the first two GPIO chips. So if the resource is missing
+ * we can't treat it as an error.
+ */
+ if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwm"))
+ return 0;
+
if (IS_ERR(mvchip->clk))
return PTR_ERR(mvchip->clk);
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- /*
- * There are only two sets of PWM configuration registers for
- * all the GPIO lines on those SoCs which this driver reserves
- * for the first two GPIO chips. So if the resource is missing
- * we can't treat it as an error.
- */
mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
if (IS_ERR(mvpwm->membase))
return PTR_ERR(mvpwm->membase);
pchip->irq1 = irq1;
gpio_reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (!gpio_reg_base)
- return -EINVAL;
+ if (IS_ERR(gpio_reg_base))
+ return PTR_ERR(gpio_reg_base);
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
if (ret)
goto out_free_descs;
}
+
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
dev_dbg(&gdev->dev, "registered chardev handle for line %d\n",
offset);
}
if (ret)
goto out_free_desc;
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
le->irq = gpiod_to_irq(desc);
if (le->irq <= 0) {
ret = -ENODEV;
}
done:
spin_unlock_irqrestore(&gpio_lock, flags);
- atomic_notifier_call_chain(&desc->gdev->notifier,
- GPIOLINE_CHANGED_REQUESTED, desc);
return ret;
}
}
}
- if (test_bit(FLAG_IS_OUT, &desc->flags)) {
+ /* To be valid for IRQ the line needs to be input or open drain */
+ if (test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
chip_err(gc,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
if (!IS_ERR(desc) &&
!WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags));
+ /*
+ * We must not be output when using IRQ UNLESS we are
+ * open drain.
+ */
+ WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(gpiod_get_index);
return ERR_PTR(ret);
}
+ atomic_notifier_call_chain(&desc->gdev->notifier,
+ GPIOLINE_CHANGED_REQUESTED, desc);
+
return desc;
}
EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
{
-#if defined(CONFIG_CGROUP_DEVICE)
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev;
return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
return -EINVAL;
}
- if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
- new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
- DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
- new_plane_state->crtc_x, new_plane_state->crtc_y);
- return -EINVAL;
- }
-
return 0;
}
return;
/* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
udelay(us_to_vupdate + us_vupdate);
}
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
- if (state->mode.hdisplay > priv->soc_info->max_height ||
- state->mode.vdisplay > priv->soc_info->max_width)
+ if (state->mode.hdisplay > priv->soc_info->max_width ||
+ state->mode.vdisplay > priv->soc_info->max_height)
return -EINVAL;
rate = clk_round_rate(priv->pix_clk,
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
- struct ingenic_drm *priv = arg;
+ struct ingenic_drm *priv = drm_device_get_priv(arg);
unsigned int state;
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
uobj->context = NULL;
/*
- * For DESTROY the usecnt is held write locked, the caller is expected
- * to put it unlock and put the object when done with it. Only DESTROY
- * can remove the IDR handle.
+ * For DESTROY the usecnt is not changed, the caller is expected to
+ * manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
+ * handle.
*/
if (reason != RDMA_REMOVE_DESTROY)
atomic_set(&uobj->usecnt, 0);
/*
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
* sequence. It should only be used from command callbacks. On success the
- * caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
+ * caller must pair this with uobj_put_destroy(). This
* version requires the caller to have already obtained an
* LOOKUP_DESTROY uobject kref.
*/
down_read(&ufile->hw_destroy_rwsem);
+ /*
+ * Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
+ * write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
+ * This is because any other concurrent thread can still see the object
+ * in the xarray due to RCU. Leaving it locked ensures nothing else will
+ * touch it.
+ */
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
if (ret)
goto out_unlock;
/*
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
* with a NULL object pointer. The caller must pair this with
- * uverbs_put_destroy.
+ * uobj_put_destroy().
*/
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
u32 id, struct uverbs_attr_bundle *attrs)
uobj = __uobj_get_destroy(obj, id, attrs);
if (IS_ERR(uobj))
return PTR_ERR(uobj);
-
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ uobj_put_destroy(uobj);
return 0;
}
if (is_odp_mr(mr)) {
to_ib_umem_odp(mr->umem)->private = mr;
+ init_waitqueue_head(&mr->q_deferred_work);
atomic_set(&mr->num_deferred_work, 0);
err = xa_err(xa_store(&dev->odp_mkeys,
mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
qib_dev_err(dd,
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail;
+ goto bail_link;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
qib_dev_err(dd,
"Skipping sl2vl sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_link;
+ goto bail_sl;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
qib_dev_err(dd,
"Skipping diag_counters sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_sl;
+ goto bail_diagc;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
- goto bail_diagc;
+ goto bail_cc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
+ kobject_put(&ppd->diagc_kobj);
kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
}
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
ret = -ENOMEM;
- goto err_free_device;
+ goto err_disable_pdev;
}
ret = pci_request_regions(pdev, DRV_NAME);
struct ipoib_rx_buf *rx_ring;
struct ipoib_tx_buf *tx_ring;
+ /* cyclic ring variables for managing tx_ring, for UD only */
unsigned int tx_head;
unsigned int tx_tail;
+ /* cyclic ring variables for counting overall outstanding send WRs */
+ unsigned int global_tx_head;
+ unsigned int global_tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr;
struct ib_wc send_wc[MAX_SEND_CQE];
return;
}
- if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
} else {
netif_trans_update(dev);
++tx->tx_head;
- ++priv->tx_head;
+ ++priv->global_tx_head;
}
}
netif_tx_lock(dev);
++tx->tx_tail;
- ++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock_bh(p->dev);
++p->tx_tail;
- ++priv->tx_tail;
- if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
+ ++priv->global_tx_tail;
+ if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
- ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
+ ((priv->global_tx_head - priv->global_tx_tail) <=
+ ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
- if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
+ if ((priv->global_tx_head - priv->global_tx_tail) ==
+ ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
netif_stop_queue(dev);
}
rc = priv->tx_head;
++priv->tx_head;
+ ++priv->global_tx_head;
}
return rc;
}
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
+ ++priv->global_tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
- ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
- netif_queue_stopped(dev),
- priv->tx_head, priv->tx_tail);
+ ipoib_warn(priv,
+ "queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
+ netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
+ priv->global_tx_head, priv->global_tx_tail);
+
/* XXX reset QP, etc. */
}
goto out_rx_ring_cleanup;
}
- /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
+ /* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
if (ipoib_transport_dev_init(dev, priv->ca)) {
pr_warn("%s: ipoib_transport_dev_init failed\n",
return fasync_helper(fd, file, on, &client->fasync);
}
-static int evdev_flush(struct file *file, fl_owner_t id)
-{
- struct evdev_client *client = file->private_data;
- struct evdev *evdev = client->evdev;
-
- mutex_lock(&evdev->mutex);
-
- if (evdev->exist && !client->revoked)
- input_flush_device(&evdev->handle, file);
-
- mutex_unlock(&evdev->mutex);
- return 0;
-}
-
static void evdev_free(struct device *dev)
{
struct evdev *evdev = container_of(dev, struct evdev, dev);
unsigned int i;
mutex_lock(&evdev->mutex);
+
+ if (evdev->exist && !client->revoked)
+ input_flush_device(&evdev->handle, file);
+
evdev_ungrab(evdev, client);
mutex_unlock(&evdev->mutex);
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
- .flush = evdev_flush,
.llseek = no_llseek,
};
0x05, 0x20, 0x00, 0x01, 0x00
};
+/*
+ * This packet is required for Xbox One S (0x045e:0x02ea)
+ * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
+ * initialize the controller that was previously used in
+ * Bluetooth mode.
+ */
+static const u8 xboxone_s_init[] = {
+ 0x05, 0x20, 0x00, 0x0f, 0x06
+};
+
/*
* This packet is required for the Titanfall 2 Xbox One pads
* (0x0e6f:0x0165) to finish initialization and for Hori pads
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
+ XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
u8 number_of_fingers;
u8 clicked2;
u8 unknown3[16];
- struct tp_finger fingers[0];
+ struct tp_finger fingers[];
};
/**
params->info_type = info_type;
params->event_type = event_type;
- ret = cros_ec_cmd_xfer(ec_dev, msg);
- if (ret < 0) {
- dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
- (int)info_type, (int)event_type, ret);
- } else if (msg->result == EC_RES_INVALID_VERSION) {
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret == -ENOTSUPP) {
/* With older ECs we just return 0 for everything */
memset(result, 0, result_size);
ret = 0;
- } else if (msg->result != EC_RES_SUCCESS) {
- dev_warn(ec_dev->dev, "Error getting info %d/%d: %d\n",
- (int)info_type, (int)event_type, msg->result);
- ret = -EPROTO;
+ } else if (ret < 0) {
+ dev_warn(ec_dev->dev, "Transfer error %d/%d: %d\n",
+ (int)info_type, (int)event_type, ret);
} else if (ret != result_size) {
dev_warn(ec_dev->dev, "Wrong size %d/%d: %d != %zu\n",
(int)info_type, (int)event_type,
static struct i2c_driver dir685_tk_i2c_driver = {
.driver = {
- .name = "dlin-dir685-touchkeys",
+ .name = "dlink-dir685-touchkeys",
.of_match_table = of_match_ptr(dir685_tk_of_match),
},
.probe = dir685_tk_probe,
static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
{
- struct input_dev *idev = pwr;
- struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
+ struct axp20x_pek *axp20x_pek = pwr;
+ struct input_dev *idev = axp20x_pek->input;
+
+ if (!idev)
+ return IRQ_HANDLED;
/*
* The power-button is connected to ground so a falling edge (dbf)
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
struct platform_device *pdev)
{
- struct axp20x_dev *axp20x = axp20x_pek->axp20x;
struct input_dev *idev;
int error;
- axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
- if (axp20x_pek->irq_dbr < 0)
- return axp20x_pek->irq_dbr;
- axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
- axp20x_pek->irq_dbr);
-
- axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
- if (axp20x_pek->irq_dbf < 0)
- return axp20x_pek->irq_dbf;
- axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
- axp20x_pek->irq_dbf);
-
axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
if (!axp20x_pek->input)
return -ENOMEM;
input_set_drvdata(idev, axp20x_pek);
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbr", idev);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
- axp20x_pek->irq_dbr, error);
- return error;
- }
-
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbf", idev);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
- axp20x_pek->irq_dbf, error);
- return error;
- }
-
error = input_register_device(idev);
if (error) {
dev_err(&pdev->dev, "Can't register input device: %d\n",
return error;
}
- device_init_wakeup(&pdev->dev, true);
-
return 0;
}
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
+ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+ if (axp20x_pek->irq_dbr < 0)
+ return axp20x_pek->irq_dbr;
+ axp20x_pek->irq_dbr = regmap_irq_get_virq(
+ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
+
+ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+ if (axp20x_pek->irq_dbf < 0)
+ return axp20x_pek->irq_dbf;
+ axp20x_pek->irq_dbf = regmap_irq_get_virq(
+ axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
+
if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
axp20x_pek->info = (struct axp20x_info *)match->driver_data;
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbr", axp20x_pek);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ axp20x_pek->irq_dbr, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbf", axp20x_pek);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ axp20x_pek->irq_dbf, error);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+
platform_set_drvdata(pdev, axp20x_pek);
return 0;
"LEN005b", /* P50 */
"LEN005e", /* T560 */
"LEN006c", /* T470s */
+ "LEN007a", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
if (count) {
kfree(attn_data.data);
- attn_data.data = NULL;
+ drvdata->attn_data.data = NULL;
}
if (!kfifo_is_empty(&drvdata->attn_fifo))
if (data->input) {
rmi_driver_set_input_name(rmi_dev, data->input);
if (!rmi_dev->xport->input) {
- if (input_register_device(data->input)) {
+ retval = input_register_device(data->input);
+ if (retval) {
dev_err(dev, "%s: Failed to register input device.\n",
__func__);
goto err_destroy_functions;
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
},
},
+ {
+ /* Lenovo ThinkPad Twist S230u */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
+ },
+ },
{ }
};
*/
+#include <linux/bits.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#define FW_POS_STATE 1
#define FW_POS_TOTAL 2
#define FW_POS_XY 3
+#define FW_POS_TOOL_TYPE 33
#define FW_POS_CHECKSUM 34
#define FW_POS_WIDTH 35
#define FW_POS_PRESSURE 45
{
struct input_dev *input = ts->input;
unsigned int n_fingers;
+ unsigned int tool_type;
u16 finger_state;
int i;
dev_dbg(&ts->client->dev,
"n_fingers: %u, state: %04x\n", n_fingers, finger_state);
+ /* Note: all fingers have the same tool type */
+ tool_type = buf[FW_POS_TOOL_TYPE] & BIT(0) ?
+ MT_TOOL_FINGER : MT_TOOL_PALM;
+
for (i = 0; i < MAX_CONTACT_NUM && n_fingers; i++) {
if (finger_state & 1) {
unsigned int x, y, p, w;
i, x, y, p, w);
input_mt_slot(input, i);
- input_mt_report_slot_state(input, MT_TOOL_FINGER, true);
+ input_mt_report_slot_state(input, tool_type, true);
input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
input_event(input, EV_ABS, ABS_MT_PRESSURE, p);
input_set_abs_params(ts->input, ABS_MT_POSITION_Y, 0, ts->y_max, 0, 0);
input_set_abs_params(ts->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_set_abs_params(ts->input, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_PALM, 0, 0);
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL)
BUG();
- /* Write register: use repeated start */
+ /* Write register */
xfer[0].addr = client->addr;
- xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART;
+ xfer[0].flags = client->flags & I2C_M_TEN;
xfer[0].len = 1;
xfer[0].buf = &buf;
/* Read data */
xfer[1].addr = client->addr;
- xfer[1].flags = I2C_M_RD;
+ xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD;
xfer[1].len = len;
xfer[1].buf = val;
const void *match_data;
int error;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_PROTOCOL_MANGLING)) {
- dev_err(&client->dev,
- "Need i2c bus that supports protocol mangling\n");
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "Not supported I2C adapter\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
+ {USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
NULL, "%d", group->id);
if (ret) {
ida_simple_remove(&iommu_group_ida, group->id);
- kfree(group);
+ kobject_put(&group->kobj);
return ERR_PTR(ret);
}
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
struct mmc_rpmb_data, chrdev);
- put_device(&rpmb->dev);
mmc_blk_put(rpmb->md);
+ put_device(&rpmb->dev);
return 0;
}
mmc_hostname(mmc), host->version);
}
- if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
- mmc->caps2 &= ~MMC_CAP2_CQE;
-
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
else if (!(host->caps & SDHCI_CAN_DO_SDMA))
struct mmc_host *mmc = host->mmc;
int ret;
+ if ((mmc->caps2 & MMC_CAP2_CQE) &&
+ (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
+ mmc->caps2 &= ~MMC_CAP2_CQE;
+ mmc->cqe_ops = NULL;
+ }
+
host->complete_wq = alloc_workqueue("sdhci", flags, 0);
if (!host->complete_wq)
return -ENOMEM;
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
&(slave->dev->dev.kobj), "bonding_slave");
- if (err)
+ if (err) {
+ kobject_put(&slave->kobj);
return err;
+ }
for (a = slave_attrs; *a; ++a) {
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
const struct switchdev_obj_port_vlan *vlan)
{
struct ocelot *ocelot = ds->priv;
+ u16 flags = vlan->flags;
u16 vid;
int err;
+ if (dsa_is_cpu_port(ds, port))
+ flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
+
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
err = ocelot_vlan_add(ocelot, port, vid,
- vlan->flags & BRIDGE_VLAN_INFO_PVID,
- vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
if (err) {
dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
vid, port, err);
int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
- __le32 *resp_len;
u8 *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
- u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
bar_offset = BNXT_GRCPF_REG_KONG_COMM;
doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
resp = bp->hwrm_cmd_kong_resp_addr;
- resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
}
memset(resp, 0, PAGE_SIZE);
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
if (intr_process) {
u16 seq_id = bp->hwrm_intr_seq_id;
le16_to_cpu(req->req_type));
return -EBUSY;
}
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
- valid = resp_addr + len - 1;
+ len = le16_to_cpu(resp->resp_len);
+ valid = ((u8 *)resp) + len - 1;
} else {
int j;
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
return -EBUSY;
- len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
- HWRM_RESP_LEN_SFT;
+ len = le16_to_cpu(resp->resp_len);
if (len)
break;
/* on first few passes, just barely sleep */
}
/* Last byte of resp contains valid bit */
- valid = resp_addr + len - 1;
+ valid = ((u8 *)resp) + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
- if (bp->bnapi)
+ if (bp->bnapi && irq_re_init)
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
-#define HWRM_RESP_ERR_CODE_MASK 0xffff
-#define HWRM_RESP_LEN_OFFSET 4
-#define HWRM_RESP_LEN_MASK 0xffff0000
-#define HWRM_RESP_LEN_SFT 16
-#define HWRM_RESP_VALID_MASK 0xff000000
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
bnxt_hwrm_fw_set_time(bp);
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, &item_len, NULL) != 0) {
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL);
+ if (rc) {
netdev_err(dev, "PKG update area not created in nvram\n");
- return -ENOBUFS;
+ return rc;
}
rc = request_firmware(&fw, filename, &dev->dev);
}
/* Do this here, so we can be verbose early */
- SET_NETDEV_DEV(net_dev, dev);
+ SET_NETDEV_DEV(net_dev, dev->parent);
dev_set_drvdata(dev, net_dev);
priv = netdev_priv(net_dev);
dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
break;
}
- dev_info(dev, "Partner protocol version is %d\n",
- crq->version_exchange_rsp.version);
- if (be16_to_cpu(crq->version_exchange_rsp.version) <
- ibmvnic_version)
- ibmvnic_version =
+ ibmvnic_version =
be16_to_cpu(crq->version_exchange_rsp.version);
+ dev_info(dev, "Partner protocol version is %d\n",
+ ibmvnic_version);
send_cap_queries(adapter);
break;
case QUERY_CAPABILITY_RSP:
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
default y
help
Say Y here if you want to support offloading connection tracking rules
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
- u8 cq_period_mode);
+
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
+
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
- do { \
- u16 *__policy = &(policy); \
- bool _write = (write); \
- \
- if (_write && *__policy) \
- *__policy = find_first_bit((u_long *)__policy, \
- sizeof(u16) * BITS_PER_BYTE);\
- MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
- if (!_write && *__policy) \
- *__policy = 1 << *__policy; \
+#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
+ do { \
+ unsigned long policy_long; \
+ u16 *__policy = &(policy); \
+ bool _write = (write); \
+ \
+ policy_long = *__policy; \
+ if (_write && *__policy) \
+ *__policy = find_first_bit(&policy_long, \
+ sizeof(policy_long) * BITS_PER_BYTE);\
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
+ if (!_write && *__policy) \
+ *__policy = 1 << *__policy; \
} while (0)
/* get/set FEC admin field for a given speed */
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
+ bool reset_rx, reset_tx;
int err = 0;
- bool reset;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
}
/* we are opened */
- reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) ||
- (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled);
+ reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
+ reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
- if (!reset) {
+ if (!reset_rx && !reset_tx) {
mlx5e_set_priv_channels_coalesce(priv, coal);
priv->channels.params = new_channels.params;
goto out;
}
+ if (reset_rx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER);
+
+ mlx5e_reset_rx_moderation(&new_channels.params, mode);
+ }
+ if (reset_tx) {
+ u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER);
+
+ mlx5e_reset_tx_moderation(&new_channels.params, mode);
+ }
+
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
- u_long active_fec = 0;
+ unsigned long active_fec_long;
+ u32 active_fec;
u32 bitn;
int err;
- err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL);
+ err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
+ active_fec_long = active_fec;
/* active fec is a bit set, find out which bit is set and
* advertise the corresponding ethtool bit
*/
- bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE);
+ bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
__set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
link_ksettings->link_modes.advertising);
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- u16 fec_configured = 0;
- u32 fec_active = 0;
+ u16 fec_configured;
+ u32 fec_active;
int err;
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
if (err)
return err;
- fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
- sizeof(u32) * BITS_PER_BYTE);
+ fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
+ sizeof(unsigned long) * BITS_PER_BYTE);
if (!fecparam->active_fec)
return -EOPNOTSUPP;
- fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
- sizeof(u16) * BITS_PER_BYTE);
+ fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
+ sizeof(unsigned long) * BITS_PER_BYTE);
return 0;
}
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->tx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
} else {
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
}
-
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->rx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
} else {
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
+}
+
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_tx_moderation(params, cq_period_mode);
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ mlx5e_reset_rx_moderation(params, cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
- return -EINVAL;
+ return -ENOENT;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
return 0;
if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
- netdev_warn(priv->netdev,
- "devices %s %s not on same switch HW, can't offload forwarding\n",
- priv->netdev->name,
- out_dev->name);
return -EOPNOTSUPP;
}
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
- flow_stats_update(&ma->stats, dpkts, dbytes, jiffies,
+ flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
FLOW_ACTION_HW_STATS_DELAYED);
}
mlx5_pci_disable_device(dev);
}
+static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_unload_one(dev, false);
+
+ return 0;
+}
+
+static int mlx5_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ return mlx5_load_one(dev, false);
+}
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
+ .suspend = mlx5_suspend,
+ .resume = mlx5_resume,
.shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
priv->stats[ctx_id].pkts += pkts;
priv->stats[ctx_id].bytes += bytes;
- max_t(u64, priv->stats[ctx_id].used, used);
+ priv->stats[ctx_id].used = max_t(u64, used,
+ priv->stats[ctx_id].used);
}
}
ahw->diag_cnt = 0;
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
if (ret)
- goto fail_diag_irq;
+ goto fail_mbx_args;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[0].id;
done:
qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
fail_diag_irq:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- ts_event_en = PTP_TCR_TSEVNTENA;
+ if (priv->synopsys_id != DWMAC_CORE_5_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
memcpy(atr_res->gbi, atr_req->gbi, gb_len);
r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi,
gb_len);
- if (r < 0)
+ if (r < 0) {
+ kfree_skb(skb);
return r;
+ }
}
info->dep_info.curr_nfc_dep_pni = 0;
/*
* Called only on a device that has been disabled and after all other threads
- * that can check this device's completion queues have synced. This is the
- * last chance for the driver to see a natural completion before
- * nvme_cancel_request() terminates all incomplete requests.
+ * that can check this device's completion queues have synced, except
+ * nvme_poll(). This is the last chance for the driver to see a natural
+ * completion before nvme_cancel_request() terminates all incomplete requests.
*/
static void nvme_reap_pending_cqes(struct nvme_dev *dev)
{
int i;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+ spin_lock(&dev->queues[i].cq_poll_lock);
nvme_process_cq(&dev->queues[i]);
+ spin_unlock(&dev->queues[i].cq_poll_lock);
+ }
}
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
spin_unlock_irqrestore(&client->lock, flags);
}
- mbox_send_message(client->chan, pkt);
+ err = mbox_send_message(client->chan, pkt);
+ if (err < 0)
+ return err;
/* We can send next packet immediately, so just call txdone. */
mbox_client_txdone(client->chan, 0);
(!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset_size(t->task, regset);
- void *data = kmalloc(size, GFP_KERNEL);
+ void *data = kzalloc(size, GFP_KERNEL);
if (unlikely(!data))
return 0;
ret = regset->get(t->task, regset,
__ceph_queue_cap_release(session, cap);
spin_unlock(&session->s_cap_lock);
}
- goto done;
+ goto flush_cap_releases;
}
/* these will work even if we don't have a cap yet */
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
- unsigned int max_bio_size = 2 * 1024 * 1024;
+ unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
struct page *page = NULL;
- bool bio_chained = false, done = false;
+ bool done = false;
errseq_t since;
memset(head, 0, sizeof(*head));
off = 0;
}
- if (!bio || (bio_chained && !off) ||
- bio->bi_iter.bi_size >= max_bio_size) {
- /* start new bio */
- } else {
+ if (bio && (off || block < blocks_submitted + max_blocks)) {
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
(PAGE_SIZE - off) >> bsize_shift;
bio = gfs2_chain_bio(bio, blocks);
- bio_chained = true;
goto add_block_to_new_bio;
}
}
if (bio) {
- blocks_submitted = block + 1;
+ blocks_submitted = block;
submit_bio(bio);
}
bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
bio->bi_opf = REQ_OP_READ;
- bio_chained = false;
add_block_to_new_bio:
sz = bio_add_page(bio, page, bsize, off);
BUG_ON(sz != bsize);
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
- if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
+ if (blocks_submitted <= blocks_read + max_blocks) {
/* Keep at least one bio in flight */
continue;
}
BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19);
mask = fanotify_group_event_mask(group, iter_info, mask, data,
data_type);
struct simple_xattr *new_xattr = NULL;
int err = 0;
+ if (removed_size)
+ *removed_size = -1;
+
/* value == NULL means remove */
if (value) {
new_xattr = simple_xattr_alloc(value, size);
list_add(&new_xattr->list, &xattrs->head);
xattr = NULL;
}
-
- if (removed_size)
- *removed_size = -1;
out:
spin_unlock(&xattrs->lock);
if (xattr) {
#ifdef CONFIG_NEED_MULTIPLE_NODES
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
- #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #define cpumask_of_node(node) ((void)(node), cpu_online_mask)
#endif
#endif
#ifndef pcibus_to_node
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/fs.h>
-#include <linux/bpf-cgroup.h>
#define DEVCG_ACC_MKNOD 1
#define DEVCG_ACC_READ 2
#define DEVCG_DEV_CHAR 2
#define DEVCG_DEV_ALL 4 /* this represents all devices */
-#ifdef CONFIG_CGROUP_DEVICE
-int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access);
-#else
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
-{ return 0; }
-#endif
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
}
#else
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
+{ return 0; }
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{ return 0; }
static inline int devcgroup_inode_mknod(int mode, dev_t dev)
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
- FAN_DIR_MODIFY)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
}
/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
/*
* public include for LM8333 keypad driver - same license as driver
- * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#ifndef _LM8333_H
extern void kvfree(const void *addr);
+/*
+ * Mapcount of compound page as a whole, does not include mapped sub-pages.
+ *
+ * Must be called only for compound pages or any their tail sub-pages.
+ */
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
int __page_mapcount(struct page *page);
+/*
+ * Mapcount of 0-order page; when compound sub-page, includes
+ * compound_mapcount().
+ *
+ * Result is undefined for pages which cannot be mapped into userspace.
+ * For example SLAB or special types of pages. See function page_has_type().
+ * They use this place in struct page differently.
+ */
static inline int page_mapcount(struct page *page)
{
- VM_BUG_ON_PAGE(PageSlab(page), page);
-
if (unlikely(PageCompound(page)))
return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1;
#include <net/netfilter/nf_conntrack_expect.h>
#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
-extern const char *const pptp_msg_name[];
+const char *pptp_msg_name(u_int16_t msg);
/* state of the control session */
enum pptp_ctrlsess_state {
{
unsigned int gso_type = 0;
unsigned int thlen = 0;
+ unsigned int p_off = 0;
unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
- if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
+ p_off = skb_transport_offset(skb) + thlen;
+ if (p_off > skb_headlen(skb))
return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
return -EINVAL;
}
- if (keys.control.thoff + thlen > skb_headlen(skb) ||
+ p_off = keys.control.thoff + thlen;
+ if (p_off > skb_headlen(skb) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
+ } else if (gso_type) {
+ p_off = thlen;
+ if (p_off > skb_headlen(skb))
+ return -EINVAL;
}
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size);
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
- skb_shinfo(skb)->gso_size = gso_size;
- skb_shinfo(skb)->gso_type = gso_type;
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - p_off > gso_size) {
+ shinfo->gso_size = gso_size;
+ shinfo->gso_type = gso_type;
- /* Header must be checked, and gso_segs computed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ /* Header must be checked, and gso_segs computed. */
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
+ }
}
return 0;
struct espintcp_msg partial;
void (*saved_data_ready)(struct sock *sk);
void (*saved_write_space)(struct sock *sk);
+ void (*saved_destruct)(struct sock *sk);
struct work_struct work;
bool tx_running;
};
#endif
int fib_unmerge(struct net *net);
+static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc,
+const struct net_device *dev)
+{
+ if (nhc->nhc_dev == dev ||
+ l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex)
+ return true;
+
+ return false;
+}
+
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+ const struct flowi4 *flp);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
};
struct nh_group {
+ struct nh_group *spare; /* spare group for removals */
u16 num_nh;
bool mpath;
bool has_v4;
{
unsigned int rc = 1;
- if (nexthop_is_multipath(nh)) {
+ if (nh->is_group) {
struct nh_group *nh_grp;
nh_grp = rcu_dereference_rtnl(nh->nh_grp);
- rc = nh_grp->num_nh;
+ if (nh_grp->mpath)
+ rc = nh_grp->num_nh;
}
return rc;
}
static inline
-struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
+struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel)
{
- const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
-
/* for_nexthops macros in fib_semantics.c grabs a pointer to
* the nexthop before checking nhsel
*/
{
const struct nh_info *nhi;
- if (nexthop_is_multipath(nh)) {
- if (nexthop_num_path(nh) > 1)
- return false;
- nh = nexthop_mpath_select(nh, 0);
- if (!nh)
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->num_nh > 1)
return false;
+
+ nh = nh_grp->nh_entries[0].nh;
}
nhi = rcu_dereference_rtnl(nh->nh_info);
BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0);
BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0);
- if (nexthop_is_multipath(nh)) {
- nh = nexthop_mpath_select(nh, nhsel);
- if (!nh)
- return NULL;
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ if (nh_grp->mpath) {
+ nh = nexthop_mpath_select(nh_grp, nhsel);
+ if (!nh)
+ return NULL;
+ }
}
nhi = rcu_dereference_rtnl(nh->nh_info);
return &nhi->fib_nhc;
}
+/* called from fib_table_lookup with rcu_lock */
+static inline
+struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh,
+ int fib_flags,
+ const struct flowi4 *flp,
+ int *nhsel)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = i;
+ return &nhi->fib_nhc;
+ }
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) {
+ *nhsel = 0;
+ return &nhi->fib_nhc;
+ }
+ }
+
+ return NULL;
+}
+
+static inline bool nexthop_uses_dev(const struct nexthop *nh,
+ const struct net_device *dev)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nhg = rcu_dereference(nh->nh_grp);
+ int i;
+
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nexthop *nhe = nhg->nh_entries[i].nh;
+
+ nhi = rcu_dereference(nhe->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+ } else {
+ nhi = rcu_dereference(nh->nh_info);
+ if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev))
+ return true;
+ }
+
+ return false;
+}
+
static inline unsigned int fib_info_num_path(const struct fib_info *fi)
{
if (unlikely(fi->nh))
{
struct nh_info *nhi;
- if (nexthop_is_multipath(nh)) {
- nh = nexthop_mpath_select(nh, 0);
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_rtnl(nh->nh_grp);
+ nh = nexthop_mpath_select(nh_grp, 0);
if (!nh)
return NULL;
}
struct tls_rec *open_rec;
struct list_head tx_list;
atomic_t encrypt_pending;
+ /* protect crypto_wait with encrypt_pending */
+ spinlock_t encrypt_compl_lock;
int async_notify;
u8 async_capable:1;
u8 async_capable:1;
u8 decrypted:1;
atomic_t decrypt_pending;
+ /* protect crypto_wait with decrypt_pending*/
+ spinlock_t decrypt_compl_lock;
bool async_notify;
};
static inline void uobj_put_destroy(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
}
static inline void uobj_put_read(struct ib_uobject *uobj)
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
- XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
XFRMA_SET_MARK, /* __u32 */
XFRMA_SET_MARK_MASK, /* __u32 */
XFRMA_IF_ID, /* __u32 */
* but must be positive otherwise set to worse case bounds
* and refine later from tnum.
*/
- if (reg->s32_min_value > 0)
- reg->smin_value = reg->s32_min_value;
- else
- reg->smin_value = 0;
- if (reg->s32_max_value > 0)
+ if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
reg->smax_value = reg->s32_max_value;
else
reg->smax_value = U32_MAX;
+ if (reg->s32_min_value >= 0)
+ reg->smin_value = reg->s32_min_value;
+ else
+ reg->smin_value = 0;
}
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
}
#define SECURITY_PREFIX "security_"
-static int check_attach_modify_return(struct bpf_verifier_env *env)
+static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
{
- struct bpf_prog *prog = env->prog;
- unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr;
-
- /* This is expected to be cleaned up in the future with the KRSI effort
- * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h.
- */
if (within_error_injection_list(addr) ||
!strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
sizeof(SECURITY_PREFIX) - 1))
return 0;
- verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n",
- prog->aux->attach_btf_id, prog->aux->attach_func_name);
-
return -EINVAL;
}
goto out;
}
}
+
+ if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
+ ret = check_attach_modify_return(prog, addr);
+ if (ret)
+ verbose(env, "%s() is not modifiable\n",
+ prog->aux->attach_func_name);
+ }
+
+ if (ret)
+ goto out;
tr->func.addr = (void *)addr;
prog->aux->trampoline = tr;
-
- if (prog->expected_attach_type == BPF_MODIFY_RETURN)
- ret = check_attach_modify_return(env);
out:
mutex_unlock(&tr->mutex);
if (ret)
return;
/*
- * Paired with the one in cgroup_rstat_cpu_pop_updated(). Either we
- * see NULL updated_next or they see our updated stat.
- */
- smp_mb();
-
- /*
+ * Speculative already-on-list test. This may race leading to
+ * temporary inaccuracies, which is fine.
+ *
* Because @parent's updated_children is terminated with @parent
* instead of NULL, we can tell whether @cgrp is on the list by
* testing the next pointer for NULL.
*nextp = rstatc->updated_next;
rstatc->updated_next = NULL;
- /*
- * Paired with the one in cgroup_rstat_cpu_updated().
- * Either they see NULL updated_next or we see their
- * updated stat.
- */
- smp_mb();
-
return pos;
}
/*
* We don't care about NUMA placement if we don't have memory.
*/
- if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
+ if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work)
return;
/*
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL)) {
result = SCAN_PAGE_HAS_PRIVATE;
+ putback_lru_page(page);
goto out_unlock;
}
#include <linux/spinlock.h>
#include <linux/zpool.h>
#include <linux/magic.h>
+#include <linux/kmemleak.h>
/*
* NCHUNKS_ORDER determines the internal allocation granularity, effectively
(gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
if (slots) {
+ /* It will be freed separately in free_handle(). */
+ kmemleak_not_leak(slots);
memset(slots->slot, 0, sizeof(slots->slot));
slots->pool = (unsigned long)pool;
rwlock_init(&slots->lock);
free_percpu(br->mcast_stats);
}
-static void mcast_stats_add_dir(u64 *dst, u64 *src)
+/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
+static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
{
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
eth->h_proto = eth_hdr(oldskb)->h_proto;
skb_pull(nskb, ETH_HLEN);
+
+ if (skb_vlan_tag_present(oldskb)) {
+ u16 vid = skb_vlan_tag_get(oldskb);
+
+ __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
+ }
}
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
* supported.
*/
req->r_t.target_oloc.pool = m.redirect.oloc.pool;
- req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
+ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
+ CEPH_OSD_FLAG_IGNORE_OVERLAY |
+ CEPH_OSD_FLAG_IGNORE_CACHE;
req->r_tid = 0;
__submit_request(req, false);
goto out_unlock_osdc;
}
if (neigh->nud_state & NUD_IN_TIMER) {
- if (time_before(next, jiffies + HZ/2))
- next = jiffies + HZ/2;
+ if (time_before(next, jiffies + HZ/100))
+ next = jiffies + HZ/100;
if (!mod_timer(&neigh->timer, next))
neigh_hold(neigh);
}
if (ds->ops->port_vlan_add && ds->ops->port_vlan_del)
slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
slave_dev->hw_features |= NETIF_F_HW_TC;
+ slave_dev->features |= NETIF_F_LLTX;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
if (!IS_ERR_OR_NULL(port->mac))
ether_addr_copy(slave_dev->dev_addr, port->mac);
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
+ neigh_parms_release(&arp_tbl, in_dev->arp_parms);
in_dev_put(in_dev);
in_dev = NULL;
goto out;
sp->olen++;
xo = xfrm_offload(skb);
- if (!xo) {
- xfrm_state_put(x);
+ if (!xo)
goto out_reset;
- }
}
xo->flags |= XFRM_GRO;
struct xfrm_offload *xo = xfrm_offload(skb);
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
- int proto = xo->proto;
+ u8 proto = xo->proto;
skb->transport_header += x->props.header_len;
- if (proto == IPPROTO_BEETPH) {
- struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
+ if (x->sel.family != AF_INET6) {
+ if (proto == IPPROTO_BEETPH) {
+ struct ip_beet_phdr *ph =
+ (struct ip_beet_phdr *)skb->data;
+
+ skb->transport_header += ph->hdrlen * 8;
+ proto = ph->nexthdr;
+ } else {
+ skb->transport_header -= IPV4_BEET_PHMAXLEN;
+ }
+ } else {
+ __be16 frag;
- skb->transport_header += ph->hdrlen * 8;
- proto = ph->nexthdr;
- } else if (x->sel.family != AF_INET6) {
- skb->transport_header -= IPV4_BEET_PHMAXLEN;
- } else if (proto == IPPROTO_TCP) {
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ skb->transport_header +=
+ ipv6_skip_exthdr(skb, 0, &proto, &frag);
+ if (proto == IPPROTO_TCP)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
}
__skb_pull(skb, skb_transport_offset(skb));
{
bool dev_match = false;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int ret;
+ if (unlikely(fi->nh)) {
+ dev_match = nexthop_uses_dev(fi->nh, dev);
+ } else {
+ int ret;
- for (ret = 0; ret < fib_info_num_path(fi); ret++) {
- const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
+ for (ret = 0; ret < fib_info_num_path(fi); ret++) {
+ const struct fib_nh_common *nhc = fib_info_nhc(fi, ret);
- if (nhc->nhc_dev == dev) {
- dev_match = true;
- break;
- } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) {
- dev_match = true;
- break;
+ if (nhc_l3mdev_matches_dev(nhc, dev)) {
+ dev_match = true;
+ break;
+ }
}
}
#else
return (key ^ prefix) & (prefix | -prefix);
}
+bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags,
+ const struct flowi4 *flp)
+{
+ if (nhc->nhc_flags & RTNH_F_DEAD)
+ return false;
+
+ if (ip_ignore_linkdown(nhc->nhc_dev) &&
+ nhc->nhc_flags & RTNH_F_LINKDOWN &&
+ !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
+ return false;
+
+ if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
+ if (flp->flowi4_oif &&
+ flp->flowi4_oif != nhc->nhc_oif)
+ return false;
+ }
+
+ return true;
+}
+
/* should be called with rcu_read_lock */
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags)
/* Step 3: Process the leaf, if that fails fall back to backtracing */
hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
+ struct fib_nh_common *nhc;
int nhsel, err;
if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
if (fi->fib_flags & RTNH_F_DEAD)
continue;
- if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) {
- err = fib_props[RTN_BLACKHOLE].error;
- goto out_reject;
+ if (unlikely(fi->nh)) {
+ if (nexthop_is_blackhole(fi->nh)) {
+ err = fib_props[RTN_BLACKHOLE].error;
+ goto out_reject;
+ }
+
+ nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp,
+ &nhsel);
+ if (nhc)
+ goto set_result;
+ goto miss;
}
for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
- struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
+ nhc = fib_info_nhc(fi, nhsel);
- if (nhc->nhc_flags & RTNH_F_DEAD)
+ if (!fib_lookup_good_nhc(nhc, fib_flags, flp))
continue;
- if (ip_ignore_linkdown(nhc->nhc_dev) &&
- nhc->nhc_flags & RTNH_F_LINKDOWN &&
- !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
- continue;
- if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
- if (flp->flowi4_oif &&
- flp->flowi4_oif != nhc->nhc_oif)
- continue;
- }
-
+set_result:
if (!(fib_flags & FIB_LOOKUP_NOREF))
refcount_inc(&fi->fib_clntref);
return err;
}
}
+miss:
#ifdef CONFIG_IP_FIB_TRIE_STATS
this_cpu_inc(stats->semantic_match_miss);
#endif
static int vti_rcv_tunnel(struct sk_buff *skb)
{
- return vti_rcv(skb, ip_hdr(skb)->saddr, true);
+ struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
+ const struct iphdr *iph = ip_hdr(skb);
+ struct ip_tunnel *tunnel;
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ if (tunnel) {
+ struct tnl_ptk_info tpi = {
+ .proto = htons(ETH_P_IP),
+ };
+
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi.proto, false))
+ goto drop;
+ return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
+ }
+
+ return -EINVAL;
+drop:
+ kfree_skb(skb);
+ return 0;
}
static int vti_rcv_cb(struct sk_buff *skb, int err)
break;
default:
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
- pptp_msg_name[0]);
+ pptp_msg_name(msg));
fallthrough;
case PPTP_SET_LINK_INFO:
/* only need to NAT in case PAC is behind NAT box */
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
break;
default:
- pr_debug("unknown inbound packet %s\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
- pptp_msg_name[0]);
+ pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
fallthrough;
case PPTP_START_SESSION_REQUEST:
case PPTP_START_SESSION_REPLY:
int i;
nhg = rcu_dereference_raw(nh->nh_grp);
- for (i = 0; i < nhg->num_nh; ++i)
- WARN_ON(nhg->nh_entries[i].nh);
+ for (i = 0; i < nhg->num_nh; ++i) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+
+ WARN_ON(!list_empty(&nhge->nh_list));
+ nexthop_put(nhge->nh);
+ }
+
+ WARN_ON(nhg->spare == nhg);
+ kfree(nhg->spare);
kfree(nhg);
}
}
}
-static void remove_nh_grp_entry(struct nh_grp_entry *nhge,
- struct nh_group *nhg,
+static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
struct nl_info *nlinfo)
{
+ struct nh_grp_entry *nhges, *new_nhges;
+ struct nexthop *nhp = nhge->nh_parent;
struct nexthop *nh = nhge->nh;
- struct nh_grp_entry *nhges;
- bool found = false;
- int i;
+ struct nh_group *nhg, *newg;
+ int i, j;
WARN_ON(!nh);
- nhges = nhg->nh_entries;
- for (i = 0; i < nhg->num_nh; ++i) {
- if (found) {
- nhges[i-1].nh = nhges[i].nh;
- nhges[i-1].weight = nhges[i].weight;
- list_del(&nhges[i].nh_list);
- list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list);
- } else if (nhg->nh_entries[i].nh == nh) {
- found = true;
- }
- }
+ nhg = rtnl_dereference(nhp->nh_grp);
+ newg = nhg->spare;
- if (WARN_ON(!found))
+ /* last entry, keep it visible and remove the parent */
+ if (nhg->num_nh == 1) {
+ remove_nexthop(net, nhp, nlinfo);
return;
+ }
+
+ newg->has_v4 = nhg->has_v4;
+ newg->mpath = nhg->mpath;
+ newg->num_nh = nhg->num_nh;
- nhg->num_nh--;
- nhg->nh_entries[nhg->num_nh].nh = NULL;
+ /* copy old entries to new except the one getting removed */
+ nhges = nhg->nh_entries;
+ new_nhges = newg->nh_entries;
+ for (i = 0, j = 0; i < nhg->num_nh; ++i) {
+ /* current nexthop getting removed */
+ if (nhg->nh_entries[i].nh == nh) {
+ newg->num_nh--;
+ continue;
+ }
- nh_group_rebalance(nhg);
+ list_del(&nhges[i].nh_list);
+ new_nhges[j].nh_parent = nhges[i].nh_parent;
+ new_nhges[j].nh = nhges[i].nh;
+ new_nhges[j].weight = nhges[i].weight;
+ list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
+ j++;
+ }
- nexthop_put(nh);
+ nh_group_rebalance(newg);
+ rcu_assign_pointer(nhp->nh_grp, newg);
+
+ list_del(&nhge->nh_list);
+ nexthop_put(nhge->nh);
if (nlinfo)
- nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo);
+ nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
}
static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
{
struct nh_grp_entry *nhge, *tmp;
- list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) {
- struct nh_group *nhg;
-
- list_del(&nhge->nh_list);
- nhg = rtnl_dereference(nhge->nh_parent->nh_grp);
- remove_nh_grp_entry(nhge, nhg, nlinfo);
+ list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
+ remove_nh_grp_entry(net, nhge, nlinfo);
- /* if this group has no more entries then remove it */
- if (!nhg->num_nh)
- remove_nexthop(net, nhge->nh_parent, nlinfo);
- }
+ /* make sure all see the newly published array before releasing rtnl */
+ synchronize_rcu();
}
static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
if (WARN_ON(!nhge->nh))
continue;
- list_del(&nhge->nh_list);
- nexthop_put(nhge->nh);
- nhge->nh = NULL;
- nhg->num_nh--;
+ list_del_init(&nhge->nh_list);
}
}
{
struct nlattr *grps_attr = cfg->nh_grp;
struct nexthop_grp *entry = nla_data(grps_attr);
+ u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
struct nh_group *nhg;
struct nexthop *nh;
int i;
nh->is_group = 1;
- nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry));
+ nhg = nexthop_grp_alloc(num_nh);
if (!nhg) {
kfree(nh);
return ERR_PTR(-ENOMEM);
}
+ /* spare group used for removals */
+ nhg->spare = nexthop_grp_alloc(num_nh);
+ if (!nhg) {
+ kfree(nhg);
+ kfree(nh);
+ return NULL;
+ }
+ nhg->spare->spare = nhg;
+
for (i = 0; i < nhg->num_nh; ++i) {
struct nexthop *nhe;
struct nh_info *nhi;
for (; i >= 0; --i)
nexthop_put(nhg->nh_entries[i].nh);
+ kfree(nhg->spare);
kfree(nhg);
kfree(nh);
sp->olen++;
xo = xfrm_offload(skb);
- if (!xo) {
- xfrm_state_put(x);
+ if (!xo)
goto out_reset;
- }
}
xo->flags |= XFRM_GRO;
struct ip_esp_hdr *esph;
struct ipv6hdr *iph = ipv6_hdr(skb);
struct xfrm_offload *xo = xfrm_offload(skb);
- int proto = iph->nexthdr;
+ u8 proto = iph->nexthdr;
skb_push(skb, -skb_network_offset(skb));
+
+ if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
+ __be16 frag;
+
+ ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
+ }
+
esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
struct xfrm_offload *xo = xfrm_offload(skb);
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
- int proto = xo->proto;
+ u8 proto = xo->proto;
skb->transport_header += x->props.header_len;
- if (proto == IPPROTO_BEETPH) {
- struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
-
- skb->transport_header += ph->hdrlen * 8;
- proto = ph->nexthdr;
- }
-
if (x->sel.family != AF_INET6) {
skb->transport_header -=
(sizeof(struct ipv6hdr) - sizeof(struct iphdr));
+ if (proto == IPPROTO_BEETPH) {
+ struct ip_beet_phdr *ph =
+ (struct ip_beet_phdr *)skb->data;
+
+ skb->transport_header += ph->hdrlen * 8;
+ proto = ph->nexthdr;
+ } else {
+ skb->transport_header -= IPV4_BEET_PHMAXLEN;
+ }
+
if (proto == IPPROTO_TCP)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ } else {
+ __be16 frag;
+
+ skb->transport_header +=
+ ipv6_skip_exthdr(skb, 0, &proto, &frag);
}
__skb_pull(skb, skb_transport_offset(skb));
if (sk->sk_type != SOCK_DGRAM)
return -EPROTONOSUPPORT;
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return -EPROTONOSUPPORT;
+
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
return -EPROTONOSUPPORT;
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
-#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
return 0;
}
-static int l2tp_ip_open(struct sock *sk)
+static int l2tp_ip_hash(struct sock *sk)
{
- /* Prevent autobind. We don't have ports. */
- inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip_lock);
+ sk_add_node(sk, &l2tp_ip_table);
+ write_unlock_bh(&l2tp_ip_lock);
+ }
+ return 0;
+}
+static void l2tp_ip_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
write_lock_bh(&l2tp_ip_lock);
- sk_add_node(sk, &l2tp_ip_table);
+ sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip_lock);
+}
+
+static int l2tp_ip_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ l2tp_ip_hash(sk);
return 0;
}
.sendmsg = l2tp_ip_sendmsg,
.recvmsg = l2tp_ip_recvmsg,
.backlog_rcv = l2tp_ip_backlog_recv,
- .hash = inet_hash,
- .unhash = inet_unhash,
+ .hash = l2tp_ip_hash,
+ .unhash = l2tp_ip_unhash,
.obj_size = sizeof(struct l2tp_ip_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
return 0;
}
-static int l2tp_ip6_open(struct sock *sk)
+static int l2tp_ip6_hash(struct sock *sk)
{
- /* Prevent autobind. We don't have ports. */
- inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ if (sk_unhashed(sk)) {
+ write_lock_bh(&l2tp_ip6_lock);
+ sk_add_node(sk, &l2tp_ip6_table);
+ write_unlock_bh(&l2tp_ip6_lock);
+ }
+ return 0;
+}
+static void l2tp_ip6_unhash(struct sock *sk)
+{
+ if (sk_unhashed(sk))
+ return;
write_lock_bh(&l2tp_ip6_lock);
- sk_add_node(sk, &l2tp_ip6_table);
+ sk_del_node_init(sk);
write_unlock_bh(&l2tp_ip6_lock);
+}
+
+static int l2tp_ip6_open(struct sock *sk)
+{
+ /* Prevent autobind. We don't have ports. */
+ inet_sk(sk)->inet_num = IPPROTO_L2TP;
+ l2tp_ip6_hash(sk);
return 0;
}
.sendmsg = l2tp_ip6_sendmsg,
.recvmsg = l2tp_ip6_recvmsg,
.backlog_rcv = l2tp_ip6_backlog_recv,
- .hash = inet6_hash,
- .unhash = inet_unhash,
+ .hash = l2tp_ip6_hash,
+ .unhash = l2tp_ip6_unhash,
.obj_size = sizeof(struct l2tp_ip6_sock),
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
target_flags, mpath->dst, mpath->sn, da, 0,
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
+
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_DELETED) {
+ spin_unlock_bh(&mpath->state_lock);
+ goto enddiscovery;
+ }
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+ spin_unlock_bh(&mpath->state_lock);
enddiscovery:
rcu_read_unlock();
pr_debug("block timeout %ld", timeo);
mptcp_wait_data(sk, &timeo);
- if (unlikely(__mptcp_tcp_fallback(msk)))
+ ssock = __mptcp_tcp_fallback(msk);
+ if (unlikely(ssock))
goto fallback;
}
lock_sock(sk);
- mptcp_token_destroy(msk->token);
inet_sk_state_store(sk, TCP_CLOSE);
- __mptcp_flush_join_list(msk);
-
+ /* be sure to always acquire the join list lock, to sync vs
+ * mptcp_finish_join().
+ */
+ spin_lock_bh(&msk->join_list_lock);
+ list_splice_tail_init(&msk->join_list, &msk->conn_list);
+ spin_unlock_bh(&msk->join_list_lock);
list_splice_init(&msk->conn_list, &conn_list);
data_fin_tx_seq = msk->write_seq;
{
struct mptcp_sock *msk = mptcp_sk(sk);
+ mptcp_token_destroy(msk->token);
if (msk->cached_ext)
__skb_ext_put(msk->cached_ext);
if (!msk->pm.server_side)
return true;
- /* passive connection, attach to msk socket */
+ if (!mptcp_pm_allow_new_subflow(msk))
+ return false;
+
+ /* active connections are already on conn_list, and we can't acquire
+ * msk lock here.
+ * use the join list lock as synchronization point and double-check
+ * msk status to avoid racing with mptcp_close()
+ */
+ spin_lock_bh(&msk->join_list_lock);
+ ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
+ if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
+ list_add_tail(&subflow->node, &msk->join_list);
+ spin_unlock_bh(&msk->join_list_lock);
+ if (!ret)
+ return false;
+
+ /* attach to msk socket only after we are sure he will deal with us
+ * at close time
+ */
parent_sock = READ_ONCE(parent->sk_socket);
if (parent_sock && !sk->sk_socket)
mptcp_sock_graft(sk, parent_sock);
-
- ret = mptcp_pm_allow_new_subflow(msk);
- if (ret) {
- subflow->map_seq = msk->ack_seq;
-
- /* active connections are already on conn_list */
- spin_lock_bh(&msk->join_list_lock);
- if (!WARN_ON_ONCE(!list_empty(&subflow->node)))
- list_add_tail(&subflow->node, &msk->join_list);
- spin_unlock_bh(&msk->join_list_lock);
- }
- return ret;
+ subflow->map_seq = msk->ack_seq;
+ return true;
}
bool mptcp_sk_is_subflow(const struct sock *sk)
int err;
lock_sock(sock->sk);
+ if (sock->state != SS_UNCONNECTED && msk->subflow) {
+ /* pending connection or invalid state, let existing subflow
+ * cope with that
+ */
+ ssock = msk->subflow;
+ goto do_connect;
+ }
+
ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
if (IS_ERR(ssock)) {
err = PTR_ERR(ssock);
mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
#endif
+do_connect:
err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
- inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
- mptcp_copy_inaddrs(sock->sk, ssock->sk);
+ sock->state = ssock->state;
+
+ /* on successful connect, the msk state will be moved to established by
+ * subflow_finish_connect()
+ */
+ if (!err || err == EINPROGRESS)
+ mptcp_copy_inaddrs(sock->sk, ssock->sk);
+ else
+ inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
unlock:
release_sock(sock->sk);
/* Don't lookup sub-counters at all */
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
- opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
+ opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
list_for_each_entry_rcu(e, &map->members, list) {
ret = ip_set_test(e->id, skb, par, opt);
if (ret <= 0)
nf_conntrack_get(skb_nfct(nskb));
}
-static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
- enum ip_conntrack_info ctinfo;
struct nf_nat_hook *nat_hook;
unsigned int status;
- struct nf_conn *ct;
int dataoff;
u16 l3num;
u8 l4num;
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || nf_ct_is_confirmed(ct))
- return 0;
-
l3num = nf_ct_l3num(ct);
dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
return 0;
}
+/* This packet is coming from userspace via nf_queue, complete the packet
+ * processing after the helper invocation in nf_confirm().
+ */
+static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ const struct nf_conntrack_helper *helper;
+ const struct nf_conn_help *help;
+ int protoff;
+
+ help = nfct_help(ct);
+ if (!help)
+ return 0;
+
+ helper = rcu_dereference(help->helper);
+ if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
+ return 0;
+
+ switch (nf_ct_l3num(ct)) {
+ case NFPROTO_IPV4:
+ protoff = skb_network_offset(skb) + ip_hdrlen(skb);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case NFPROTO_IPV6: {
+ __be16 frag_off;
+ u8 pnum;
+
+ pnum = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
+ &frag_off);
+ if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+ return 0;
+ break;
+ }
+#endif
+ default:
+ return 0;
+ }
+
+ if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
+ !nf_is_loopback_packet(skb)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
+ NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
+ return -1;
+ }
+ }
+
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
+}
+
+static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ int err;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return 0;
+
+ if (!nf_ct_is_confirmed(ct)) {
+ err = __nf_conntrack_update(net, skb, ct, ctinfo);
+ if (err < 0)
+ return err;
+ }
+
+ return nf_confirm_cthelper(skb, ct, ctinfo);
+}
+
static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
/* PptpControlMessageType names */
-const char *const pptp_msg_name[] = {
- "UNKNOWN_MESSAGE",
- "START_SESSION_REQUEST",
- "START_SESSION_REPLY",
- "STOP_SESSION_REQUEST",
- "STOP_SESSION_REPLY",
- "ECHO_REQUEST",
- "ECHO_REPLY",
- "OUT_CALL_REQUEST",
- "OUT_CALL_REPLY",
- "IN_CALL_REQUEST",
- "IN_CALL_REPLY",
- "IN_CALL_CONNECT",
- "CALL_CLEAR_REQUEST",
- "CALL_DISCONNECT_NOTIFY",
- "WAN_ERROR_NOTIFY",
- "SET_LINK_INFO"
+static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
+ [0] = "UNKNOWN_MESSAGE",
+ [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
+ [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
+ [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
+ [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
+ [PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
+ [PPTP_ECHO_REPLY] = "ECHO_REPLY",
+ [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
+ [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
+ [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
+ [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
+ [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
+ [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
+ [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
+ [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
+ [PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
};
+
+const char *pptp_msg_name(u_int16_t msg)
+{
+ if (msg > PPTP_MSG_MAX)
+ return pptp_msg_name_array[0];
+
+ return pptp_msg_name_array[msg];
+}
EXPORT_SYMBOL(pptp_msg_name);
#endif
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
msg = ntohs(ctlh->messageType);
- pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
+ pr_debug("inbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REPLY:
pcid = pptpReq->ocack.peersCallID;
if (info->pns_call_id != pcid)
goto invalid;
- pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
+ pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
goto invalid;
cid = pptpReq->icreq.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_IN_REQ;
info->pac_call_id = cid;
break;
if (info->pns_call_id != pcid)
goto invalid;
- pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
+ pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
info->cstate = PPTP_CALL_IN_CONF;
/* we expect a GRE connection from PAC to PNS */
case PPTP_CALL_DISCONNECT_NOTIFY:
/* server confirms disconnect */
cid = pptpReq->disc.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->cstate = PPTP_CALL_NONE;
/* untrack this call id, unexpect GRE packets */
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+ pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
msg = ntohs(ctlh->messageType);
- pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
+ pr_debug("outbound control message %s\n", pptp_msg_name(msg));
switch (msg) {
case PPTP_START_SESSION_REQUEST:
info->cstate = PPTP_CALL_OUT_REQ;
/* track PNS call id */
cid = pptpReq->ocreq.callID;
- pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
+ pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
info->pns_call_id = cid;
break;
pcid = pptpReq->icack.peersCallID;
if (info->pac_call_id != pcid)
goto invalid;
- pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
+ pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
ntohs(cid), ntohs(pcid));
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
invalid:
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
- msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
+ pptp_msg_name(msg),
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
return NF_ACCEPT;
if (help->helper->data_len == 0)
return -EINVAL;
- nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
+ nla_memcpy(help->data, attr, sizeof(help->data));
return 0;
}
ret = -ENOMEM;
goto err2;
}
+ helper->data_len = size;
helper->flags |= NF_CT_HELPER_F_USERSPACE;
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
goto err_sock;
}
+ qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
+ if (!qrtr_ns.workqueue)
+ goto err_sock;
+
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
sq.sq_port = QRTR_PORT_CTRL;
ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq));
if (ret < 0) {
pr_err("failed to bind to socket\n");
- goto err_sock;
+ goto err_wq;
}
qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR;
qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST;
qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL;
- qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
- if (!qrtr_ns.workqueue)
- goto err_sock;
-
ret = say_hello(&qrtr_ns.bcast_sq);
if (ret < 0)
goto err_wq;
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
struct nf_conntrack_tuple target;
+ if (!(ct->status & IPS_NAT_MASK))
+ return 0;
+
nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
switch (tuple->src.l3num) {
goto flow_error;
}
q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
- if (!q->flows_cnt || q->flows_cnt > 65536) {
+ if (!q->flows_cnt || q->flows_cnt >= 65536) {
NL_SET_ERR_MSG_MOD(extack,
- "Number of flows must be < 65536");
+ "Number of flows must range in [1..65535]");
goto flow_error;
}
}
homing at either or both ends of an association."
To compile this protocol support as a module, choose M here: the
- module will be called sctp. Debug messages are handeled by the
+ module will be called sctp. Debug messages are handled by the
kernel's dynamic debugging framework.
If in doubt, say N.
struct sockaddr_storage addr;
struct sctp_ulpevent *event;
+ if (asoc->state < SCTP_STATE_ESTABLISHED)
+ return;
+
memset(&addr, 0, sizeof(struct sockaddr_storage));
memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
kfree(aead_req);
+ spin_lock_bh(&ctx->decrypt_compl_lock);
pending = atomic_dec_return(&ctx->decrypt_pending);
- if (!pending && READ_ONCE(ctx->async_notify))
+ if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
}
static int tls_do_decryption(struct sock *sk,
ready = true;
}
+ spin_lock_bh(&ctx->encrypt_compl_lock);
pending = atomic_dec_return(&ctx->encrypt_pending);
- if (!pending && READ_ONCE(ctx->async_notify))
+ if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
if (!ready)
return;
int num_zc = 0;
int orig_size;
int ret = 0;
+ int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -EOPNOTSUPP;
goto send_end;
} else if (num_zc) {
/* Wait for pending encryptions to get completed */
- smp_store_mb(ctx->async_notify, true);
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ ctx->async_notify = true;
- if (atomic_read(&ctx->encrypt_pending))
+ pending = atomic_read(&ctx->encrypt_pending);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+ if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
+ /* There can be no concurrent accesses, since we have no
+ * pending encrypt operations
+ */
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err) {
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
int num_async = 0;
+ int pending;
flags |= nonblock;
recv_end:
if (num_async) {
/* Wait for all previously submitted records to be decrypted */
- smp_store_mb(ctx->async_notify, true);
- if (atomic_read(&ctx->decrypt_pending)) {
+ spin_lock_bh(&ctx->decrypt_compl_lock);
+ ctx->async_notify = true;
+ pending = atomic_read(&ctx->decrypt_pending);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
+ if (pending) {
err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
if (err) {
/* one of async decrypt failed */
} else {
reinit_completion(&ctx->async_wait.completion);
}
+
+ /* There can be no concurrent accesses, since we have no
+ * pending decrypt operations
+ */
WRITE_ONCE(ctx->async_notify, false);
/* Drain records from the rx_list & copy if required */
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
+ spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
sw_ctx_tx->tx_work.sk = sk;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
+ spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);
/* Wait for children sockets to appear; these are the new sockets
* created upon connection establishment.
*/
- timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
+ timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
while ((connected = vsock_dequeue_accept(listener)) == NULL &&
lock_sock(sk);
+ /* Check if sk has been released before lock_sock */
+ if (sk->sk_shutdown == SHUTDOWN_MASK) {
+ (void)virtio_transport_reset_no_sock(t, pkt);
+ release_sock(sk);
+ sock_put(sk);
+ goto free_pkt;
+ }
+
/* Update CID in case it has changed after a transport reset event */
vsk->local_addr.svm_cid = dst.svm_cid;
if (result)
return result;
- if (rdev->wiphy.debugfsdir)
+ if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
rdev->wiphy.debugfsdir,
rdev->wiphy.debugfsdir->d_parent, newname);
{
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+ u64 npgs, addr = mr->addr, size = mr->len;
unsigned int chunks, chunks_per_page;
- u64 addr = mr->addr, size = mr->len;
int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
if ((addr + size) < addr)
return -EINVAL;
+ npgs = div_u64(size, PAGE_SIZE);
+ if (npgs > U32_MAX)
+ return -EINVAL;
+
chunks = (unsigned int)div_u64(size, chunk_size);
if (chunks == 0)
return -EINVAL;
umem->size = size;
umem->headroom = headroom;
umem->chunk_size_nohr = chunk_size - headroom;
- umem->npgs = size / PAGE_SIZE;
+ umem->npgs = (u32)npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
{
struct espintcp_ctx *ctx = espintcp_getctx(sk);
+ ctx->saved_destruct(sk);
kfree(ctx);
}
sk->sk_socket->ops = &espintcp_ops;
ctx->saved_data_ready = sk->sk_data_ready;
ctx->saved_write_space = sk->sk_write_space;
+ ctx->saved_destruct = sk->sk_destruct;
sk->sk_data_ready = espintcp_data_ready;
sk->sk_write_space = espintcp_write_space;
sk->sk_destruct = espintcp_destruct;
struct xfrm_offload *xo = xfrm_offload(skb);
skb_reset_mac_len(skb);
- pskb_pull(skb, skb->mac_len + hsize + x->props.header_len);
-
- if (xo->flags & XFRM_GSO_SEGMENT) {
- skb_reset_transport_header(skb);
+ if (xo->flags & XFRM_GSO_SEGMENT)
skb->transport_header -= x->props.header_len;
- }
+
+ pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
}
static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
dev_put(skb->dev);
spin_lock(&x->lock);
- if (nexthdr <= 0) {
+ if (nexthdr < 0) {
if (nexthdr == -EBADMSG) {
xfrm_audit_state_icvfail(x, skb,
x->type->proto);
.get_link_net = xfrmi_get_link_net,
};
+static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
+{
+ struct net *net;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
+ struct xfrm_if __rcu **xip;
+ struct xfrm_if *xi;
+
+ for (xip = &xfrmn->xfrmi[0];
+ (xi = rtnl_dereference(*xip)) != NULL;
+ xip = &xi->next)
+ unregister_netdevice_queue(xi->dev, &list);
+ }
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
static struct pernet_operations xfrmi_net_ops = {
+ .exit_batch = xfrmi_exit_batch_net,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
};
xfrm_state_hold(x);
if (skb_is_gso(skb)) {
- skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+ if (skb->inner_protocol)
+ return xfrm_output_gso(net, sk, skb);
- return xfrm_output2(net, sk, skb);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
+ goto out;
}
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
goto out;
+ } else {
+ if (skb_is_gso(skb))
+ return xfrm_output_gso(net, sk, skb);
}
- if (skb_is_gso(skb))
- return xfrm_output_gso(net, sk, skb);
-
if (skb->ip_summed == CHECKSUM_PARTIAL) {
err = skb_checksum_help(skb);
if (err) {
if (skb->protocol == htons(ETH_P_IP))
proto = AF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6) &&
+ skb->sk->sk_family == AF_INET6)
proto = AF_INET6;
else
return;
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
struct xfrm_policy *pol)
{
- u32 mark = policy->mark.v & policy->mark.m;
-
- if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
- return true;
-
- if ((mark & pol->mark.m) == pol->mark.v &&
+ if (policy->mark.v == pol->mark.v &&
policy->priority == pol->priority)
return true;
my @ignore = ();
my $help = 0;
my $configuration_file = ".checkpatch.conf";
-my $max_line_length = 80;
+my $max_line_length = 100;
my $ignore_perl_version = 0;
my $minimum_perl_version = 5.10.0;
my $min_conf_desc_length = 4;
--types TYPE(,TYPE2...) show only these comma separated message types
--ignore TYPE(,TYPE2...) ignore various comma separated message types
--show-types show the specific message type in the output
- --max-line-length=n set the maximum line length, if exceeded, warn
+ --max-line-length=n set the maximum line length, (default $max_line_length)
+ if exceeded, warn on patches
+ requires --strict for use with --file
--min-conf-desc-length=n set the min description length, if shorter, warn
- --tab-size=n set the number of spaces for tab (default 8)
+ --tab-size=n set the number of spaces for tab (default $tabsize)
--root=PATH PATH to the kernel tree root
--no-summary suppress the per-file summary
--mailback only produce a report in case of warnings/errors
if ($msg_type ne "" &&
(show_type("LONG_LINE") || show_type($msg_type))) {
- WARN($msg_type,
- "line over $max_line_length characters\n" . $herecurr);
+ my $msg_level = \&WARN;
+ $msg_level = \&CHK if ($file);
+ &{$msg_level}($msg_type,
+ "line length of $length exceeds $max_line_length columns\n" . $herecurr);
}
}
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_SECURITY_SAFESETID) += safesetid/
obj-$(CONFIG_SECURITY_LOCKDOWN_LSM) += lockdown/
-obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_CGROUPS) += device_cgroup.o
obj-$(CONFIG_BPF_LSM) += bpf/
# Object integrity file lists
int ret;
kuid_t root_uid;
+ new->cap_ambient = old->cap_ambient;
if (WARN_ON(!cap_ambient_invariant_ok(old)))
return -EPERM;
#include <linux/rcupdate.h>
#include <linux/mutex.h>
+#ifdef CONFIG_CGROUP_DEVICE
+
static DEFINE_MUTEX(devcgroup_mutex);
enum devcg_behavior {
};
/**
- * __devcgroup_check_permission - checks if an inode operation is permitted
+ * devcgroup_legacy_check_permission - checks if an inode operation is permitted
* @dev_cgroup: the dev cgroup to be tested against
* @type: device type
* @major: device major number
*
* returns 0 on success, -EPERM case the operation is not permitted
*/
-static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
short access)
{
struct dev_cgroup *dev_cgroup;
return 0;
}
+#endif /* CONFIG_CGROUP_DEVICE */
+
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
+
int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
{
int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
if (rc)
return -EPERM;
- return __devcgroup_check_permission(type, major, minor, access);
+ #ifdef CONFIG_CGROUP_DEVICE
+ return devcgroup_legacy_check_permission(type, major, minor, access);
+
+ #else /* CONFIG_CGROUP_DEVICE */
+ return 0;
+
+ #endif /* CONFIG_CGROUP_DEVICE */
}
EXPORT_SYMBOL(devcgroup_check_permission);
+#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */
if (info.index >= 32)
return -EINVAL;
/* check whether the dsp was already loaded */
- if (hw->dsp_loaded & (1 << info.index))
+ if (hw->dsp_loaded & (1u << info.index))
return -EBUSY;
err = hw->ops.dsp_load(hw, &info);
if (err < 0)
return err;
- hw->dsp_loaded |= (1 << info.index);
+ hw->dsp_loaded |= (1u << info.index);
return 0;
}
case 0x10ec0282:
case 0x10ec0283:
case 0x10ec0286:
+ case 0x10ec0287:
case 0x10ec0288:
case 0x10ec0285:
case 0x10ec0298:
{ 0x19, 0x21a11010 }, /* dock mic */
{ }
};
- /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
- * the speaker output becomes too low by some reason on Thinkpads with
- * ALC298 codec
- */
- static const hda_nid_t preferred_pairs[] = {
- 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
- 0
- };
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->gen.preferred_dacs = preferred_pairs;
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
snd_hda_apply_pincfgs(codec, pincfgs);
} else if (action == HDA_FIXUP_ACT_INIT) {
}
}
+static void alc_fixup_tpt470_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+ * the speaker output becomes too low by some reason on Thinkpads with
+ * ALC298 codec
+ */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+ 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ spec->gen.preferred_dacs = preferred_pairs;
+}
+
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
ALC700_FIXUP_INTEL_REFERENCE,
ALC274_FIXUP_DELL_BIND_DACS,
ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+ ALC298_FIXUP_TPT470_DOCK_FIX,
ALC298_FIXUP_TPT470_DOCK,
ALC255_FIXUP_DUMMY_LINEOUT_VERB,
ALC255_FIXUP_DELL_HEADSET_MIC,
.chained = true,
.chain_id = ALC274_FIXUP_DELL_BIND_DACS
},
- [ALC298_FIXUP_TPT470_DOCK] = {
+ [ALC298_FIXUP_TPT470_DOCK_FIX] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_tpt470_dock,
.chained = true,
.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE
},
+ [ALC298_FIXUP_TPT470_DOCK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_tpt470_dacs,
+ .chained = true,
+ .chain_id = ALC298_FIXUP_TPT470_DOCK_FIX
+ },
[ALC255_FIXUP_DUMMY_LINEOUT_VERB] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ {.id = ALC298_FIXUP_TPT470_DOCK_FIX, .name = "tpt470-dock-fix"},
{.id = ALC298_FIXUP_TPT470_DOCK, .name = "tpt470-dock"},
{.id = ALC233_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
{.id = ALC700_FIXUP_INTEL_REFERENCE, .name = "alc700-ref"},
case 0x10ec0215:
case 0x10ec0245:
case 0x10ec0285:
+ case 0x10ec0287:
case 0x10ec0289:
spec->codec_variant = ALC269_TYPE_ALC215;
spec->shutup = alc225_shutup;
HDA_CODEC_ENTRY(0x10ec0284, "ALC284", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0287, "ALC287", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
cval->res = 384;
}
break;
+ case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */
+ if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
+ strstr(kctl->id.name, "Capture Volume") != NULL) {
+ cval->min >>= 8;
+ cval->max = 0;
+ cval->res = 1;
+ }
+ break;
}
}
{}
};
+/* Rear panel + front mic on Gigabyte TRX40 Aorus Master with ALC1220-VB */
+static const struct usbmix_name_map aorus_master_alc1220vb_map[] = {
+ { 17, NULL }, /* OT, IEC958?, disabled */
+ { 19, NULL, 12 }, /* FU, Input Gain Pad - broken response, disabled */
+ { 16, "Line Out" }, /* OT */
+ { 22, "Line Out Playback" }, /* FU */
+ { 7, "Line" }, /* IT */
+ { 19, "Line Capture" }, /* FU */
+ { 8, "Mic" }, /* IT */
+ { 20, "Mic Capture" }, /* FU */
+ { 9, "Front Mic" }, /* IT */
+ { 21, "Front Mic Capture" }, /* FU */
+ {}
+};
+
/*
* Control map entries
*/
.id = USB_ID(0x1b1c, 0x0a42),
.map = corsair_virtuoso_map,
},
+ { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */
+ .id = USB_ID(0x0414, 0xa001),
+ .map = aorus_master_alc1220vb_map,
+ },
{ /* Gigabyte TRX40 Aorus Pro WiFi */
.id = USB_ID(0x0414, 0xa002),
.map = trx40_mobo_map,
ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
#undef ALC1220_VB_DESKTOP
+/* Two entries for Gigabyte TRX40 Aorus Master:
+ * TRX40 Aorus Master has two USB-audio devices, one for the front headphone
+ * with ESS SABRE9218 DAC chip, while another for the rest I/O (the rear
+ * panel and the front mic) with Realtek ALC1220-VB.
+ * Here we provide two distinct names for making UCM profiles easier.
+ */
+{
+ USB_DEVICE(0x0414, 0xa000),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Gigabyte",
+ .product_name = "Aorus Master Front Headphone",
+ .profile_name = "Gigabyte-Aorus-Master-Front-Headphone",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
+{
+ USB_DEVICE(0x0414, 0xa001),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Gigabyte",
+ .product_name = "Aorus Master Main Audio",
+ .profile_name = "Gigabyte-Aorus-Master-Main-Audio",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
#define _UAPI_ASM_X86_UNISTD_H
/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT 0x40000000UL
+#define __X32_SYSCALL_BIT 0x40000000
#ifndef __KERNEL__
# ifdef __i386__
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* exit */
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
- .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
- .result = REJECT
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
+ .result_unpriv = REJECT,
+ .errstr = "value -4294967168 makes map_value pointer be out of bounds",
+ .result = REJECT,
},
{
"bounds check after truncation of boundary-crossing range (2)",
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
/* r1 = [0x00, 0xff] */
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
* [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
*/
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
- /* r1 = 0 or
- * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
- */
- BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
/* error on OOB pointer computation */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
/* exit */
},
.fixup_map_hash_8b = { 3 },
/* not actually fully unbounded, but the bound is very high */
- .errstr = "value 72057594021150720 makes map_value pointer be out of bounds",
- .result = REJECT
+ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
+ .result_unpriv = REJECT,
+ .errstr = "value -4294967168 makes map_value pointer be out of bounds",
+ .result = REJECT,
},
{
"bounds check after wrapping 32-bit addition",
},
.result = ACCEPT
},
+{
+ "assigning 32bit bounds to 64bit for wA = 0, wB = wA",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_MOV32_IMM(BPF_REG_9, 0),
+ BPF_MOV32_REG(BPF_REG_2, BPF_REG_9),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
--- /dev/null
+[
+ {
+ "id": "83be",
+ "name": "Create FQ-PIE with invalid number of flows",
+ "category": [
+ "qdisc",
+ "fq_pie"
+ ],
+ "setup": [
+ "$IP link add dev $DUMMY type dummy || /bin/true"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $DUMMY",
+ "matchPattern": "qdisc",
+ "matchCount": "0",
+ "teardown": [
+ "$IP link del dev $DUMMY"
+ ]
+ }
+]