Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <kuba@kernel.org>
Fri, 29 Jul 2022 01:21:16 +0000 (18:21 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 29 Jul 2022 01:21:16 +0000 (18:21 -0700)
No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
152 files changed:
.mailmap
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/fsl,fec.yaml
Documentation/networking/ip-sysctl.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/lan966x.dtsi
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/icontrol.c
arch/arm/mach-pxa/littleton.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/z2.c
arch/riscv/Makefile
arch/riscv/boot/dts/canaan/canaan_kd233.dts
arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
arch/riscv/kernel/Makefile
arch/riscv/kernel/elf_kexec.c
arch/s390/include/asm/archrandom.h
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/events/intel/lbr.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/x86.c
certs/Kconfig
drivers/acpi/cppc_acpi.c
drivers/clk/clk-lan966x.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-xilinx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/drm_gem_ttm_helper.c
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_lrc.h
drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
drivers/gpu/drm/imx/dcss/dcss-dev.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/scheduler/sched_entity.c
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/md/raid5.c
drivers/misc/lkdtm/Makefile
drivers/mmc/host/sdhci-omap.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/net/ethernet/fungible/funeth/funeth_rx.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/fungible/funeth/funeth_txrx.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
drivers/net/ethernet/netronome/nfp/bpf/jit.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ipa/ipa_qmi_msg.h
drivers/net/macsec.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/sungem_phy.c
drivers/net/virtio_net.c
drivers/pinctrl/Kconfig
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/ralink/pinctrl-ralink.c
drivers/pinctrl/sunplus/sppctl.c
drivers/ptp/Kconfig
drivers/s390/net/qeth_core_main.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-cadence.c
drivers/spi/spi-rspi.c
drivers/virt/coco/sev-guest/sev-guest.c
fs/io_uring.c
fs/ntfs/attrib.c
fs/ocfs2/ocfs2.h
fs/ocfs2/slot_map.c
fs/ocfs2/super.c
fs/read_write.c
fs/userfaultfd.c
include/asm-generic/io.h
include/asm-generic/tlb.h
include/drm/gpu_scheduler.h
include/linux/mm.h
include/net/addrconf.h
include/net/bluetooth/l2cap.h
include/net/inet_connection_sock.h
include/net/sock.h
include/net/tcp.h
include/uapi/asm-generic/fcntl.h
include/uapi/linux/kvm.h
kernel/rcu/srcutree.c
kernel/sched/deadline.c
kernel/watch_queue.c
mm/gup.c
mm/hugetlb.c
mm/kfence/core.c
mm/memory.c
mm/memremap.c
mm/secretmem.c
mm/shmem.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/bridge/br_netlink.c
net/caif/caif_socket.c
net/decnet/af_decnet.c
net/dsa/switch.c
net/ipv4/fib_trie.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_output.c
net/ipv6/mcast.c
net/ipv6/ping.c
net/ipv6/tcp_ipv6.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_queue.c
net/sctp/associola.c
net/sctp/stream.c
net/sctp/stream_sched.c
net/tipc/socket.c
net/tls/tls_device.c
sound/soc/rockchip/rockchip_i2s.c
tools/include/uapi/asm-generic/fcntl.h
tools/include/uapi/linux/kvm.h
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/kvm/rseq_test.c

index 13e4f50..71577c3 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -60,6 +60,10 @@ Arnd Bergmann <arnd@arndb.de>
 Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
 Axel Dyks <xl@xlsigned.net>
 Axel Lin <axel.lin@gmail.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@linaro.org>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@spreadtrum.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
+Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
@@ -135,6 +139,8 @@ Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
+Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
@@ -371,6 +377,7 @@ Sean Nyekjaer <sean@geanix.com> <sean.nyekjaer@prevas.dk>
 Sebastian Reichel <sre@kernel.org> <sebastian.reichel@collabora.co.uk>
 Sebastian Reichel <sre@kernel.org> <sre@debian.org>
 Sedat Dilek <sedat.dilek@gmail.com> <sedat.dilek@credativ.de>
+Seth Forshee <sforshee@kernel.org> <seth.forshee@canonical.com>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
index f2d26cb..c0fdb04 100644 (file)
                        expediting.  Set to zero to disable automatic
                        expediting.
 
+       srcutree.srcu_max_nodelay [KNL]
+                       Specifies the number of no-delay instances
+                       per jiffy for which the SRCU grace period
+                       worker thread will be rescheduled with zero
+                       delay. Beyond this limit, worker thread will
+                       be rescheduled with a sleep delay of one jiffy.
+
+       srcutree.srcu_max_nodelay_phase [KNL]
+                       Specifies the per-grace-period phase, number of
+                       non-sleeping polls of readers. Beyond this limit,
+                       grace period worker thread will be rescheduled
+                       with a sleep delay of one jiffy, between each
+                       rescan of the readers, for a grace period phase.
+
+       srcutree.srcu_retry_check_delay [KNL]
+                       Specifies number of microseconds of non-sleeping
+                       delay between each non-sleeping poll of readers.
+
        srcutree.small_contention_lim [KNL]
                        Specifies the number of update-side contention
                        events per jiffy will be tolerated before
index 56d9aca..c138a10 100644 (file)
@@ -155,70 +155,65 @@ properties:
       - in-band-status
 
   fixed-link:
-    allOf:
-      - if:
-          type: array
-        then:
-          deprecated: true
-          items:
-            - minimum: 0
-              maximum: 31
-              description:
-                Emulated PHY ID, choose any but unique to the all
-                specified fixed-links
-
-            - enum: [0, 1]
-              description:
-                Duplex configuration. 0 for half duplex or 1 for
-                full duplex
-
-            - enum: [10, 100, 1000, 2500, 10000]
-              description:
-                Link speed in Mbits/sec.
-
-            - enum: [0, 1]
-              description:
-                Pause configuration. 0 for no pause, 1 for pause
-
-            - enum: [0, 1]
-              description:
-                Asymmetric pause configuration. 0 for no asymmetric
-                pause, 1 for asymmetric pause
-
-
-      - if:
-          type: object
-        then:
-          properties:
-            speed:
-              description:
-                Link speed.
-              $ref: /schemas/types.yaml#/definitions/uint32
-              enum: [10, 100, 1000, 2500, 10000]
-
-            full-duplex:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that full-duplex is used. When absent, half
-                duplex is assumed.
-
-            pause:
-              $ref: /schemas/types.yaml#definitions/flag
-              description:
-                Indicates that pause should be enabled.
-
-            asym-pause:
-              $ref: /schemas/types.yaml#/definitions/flag
-              description:
-                Indicates that asym_pause should be enabled.
-
-            link-gpios:
-              maxItems: 1
-              description:
-                GPIO to determine if the link is up
-
-          required:
-            - speed
+    oneOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+        deprecated: true
+        items:
+          - minimum: 0
+            maximum: 31
+            description:
+              Emulated PHY ID, choose any but unique to the all
+              specified fixed-links
+
+          - enum: [0, 1]
+            description:
+              Duplex configuration. 0 for half duplex or 1 for
+              full duplex
+
+          - enum: [10, 100, 1000, 2500, 10000]
+            description:
+              Link speed in Mbits/sec.
+
+          - enum: [0, 1]
+            description:
+              Pause configuration. 0 for no pause, 1 for pause
+
+          - enum: [0, 1]
+            description:
+              Asymmetric pause configuration. 0 for no asymmetric
+              pause, 1 for asymmetric pause
+      - type: object
+        additionalProperties: false
+        properties:
+          speed:
+            description:
+              Link speed.
+            $ref: /schemas/types.yaml#/definitions/uint32
+            enum: [10, 100, 1000, 2500, 10000]
+
+          full-duplex:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that full-duplex is used. When absent, half
+              duplex is assumed.
+
+          pause:
+            $ref: /schemas/types.yaml#definitions/flag
+            description:
+              Indicates that pause should be enabled.
+
+          asym-pause:
+            $ref: /schemas/types.yaml#/definitions/flag
+            description:
+              Indicates that asym_pause should be enabled.
+
+          link-gpios:
+            maxItems: 1
+            description:
+              GPIO to determine if the link is up
+
+        required:
+          - speed
 
 allOf:
   - if:
index 85a8d8f..924af2d 100644 (file)
@@ -187,6 +187,7 @@ properties:
       Should specify the gpio for phy reset.
 
   phy-reset-duration:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Reset duration in milliseconds.  Should present only if property
@@ -195,12 +196,14 @@ properties:
       and 1 millisecond will be used instead.
 
   phy-reset-active-high:
+    type: boolean
     deprecated: true
     description:
       If present then the reset sequence using the GPIO specified in the
       "phy-reset-gpios" property is reversed (H=reset state, L=operation state).
 
   phy-reset-post-delay:
+    $ref: /schemas/types.yaml#/definitions/uint32
     deprecated: true
     description:
       Post reset delay in milliseconds. If present then a delay of phy-reset-post-delay
index 5879ef3..3c39776 100644 (file)
@@ -2884,7 +2884,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
        Default: 4K
 
 sctp_wmem  - vector of 3 INTEGERs: min, default, max
-       Currently this tunable has no effect.
+       Only the first value ("min") is used, "default" and "max" are
+       ignored.
+
+       min: Minimum size of send buffer that can be used by SCTP sockets.
+       It is guaranteed to each SCTP socket (but not association) even
+       under moderate memory pressure.
+
+       Default: 4K
 
 addr_scope_policy - INTEGER
        Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
index 6e090fb..98a2839 100644 (file)
@@ -5658,7 +5658,7 @@ by a string of size ``name_size``.
        #define KVM_STATS_UNIT_SECONDS          (0x2 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_CYCLES           (0x3 << KVM_STATS_UNIT_SHIFT)
        #define KVM_STATS_UNIT_BOOLEAN          (0x4 << KVM_STATS_UNIT_SHIFT)
-       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_CYCLES
+       #define KVM_STATS_UNIT_MAX              KVM_STATS_UNIT_BOOLEAN
 
        #define KVM_STATS_BASE_SHIFT            8
        #define KVM_STATS_BASE_MASK             (0xF << KVM_STATS_BASE_SHIFT)
index 46b345d..1920d82 100644 (file)
@@ -15862,7 +15862,7 @@ PIN CONTROLLER - FREESCALE
 M:     Dong Aisheng <aisheng.dong@nxp.com>
 M:     Fabio Estevam <festevam@gmail.com>
 M:     Shawn Guo <shawnguo@kernel.org>
-M:     Stefan Agner <stefan@agner.ch>
+M:     Jacky Bai <ping.bai@nxp.com>
 R:     Pengutronix Kernel Team <kernel@pengutronix.de>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
index 00fd80c..b79c1c1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index 3cb02ff..38e90a3 100644 (file)
@@ -38,7 +38,7 @@
                sys_clk: sys_clk {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <162500000>;
+                       clock-frequency = <165625000>;
                };
 
                cpu_clk: cpu_clk {
index c546356..5738496 100644 (file)
@@ -549,7 +549,7 @@ static struct pxa2xx_spi_controller corgi_spi_info = {
 };
 
 static struct gpiod_lookup_table corgi_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", CORGI_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index 2ae06ed..2fd6659 100644 (file)
@@ -635,7 +635,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_HX4700_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 753fe16..6240882 100644 (file)
@@ -140,7 +140,7 @@ struct platform_device pxa_spi_ssp4 = {
 };
 
 static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
-       .dev_id = "pxa2xx-spi.3",
+       .dev_id = "spi3",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS1, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS2, "cs", 1, GPIO_ACTIVE_LOW),
@@ -149,7 +149,7 @@ static struct gpiod_lookup_table pxa_ssp3_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp4_gpio_table = {
-       .dev_id = "pxa2xx-spi.4",
+       .dev_id = "spi4",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS3, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", ICONTROL_MCP251x_nCS4, "cs", 1, GPIO_ACTIVE_LOW),
index f98dc61..98423a9 100644 (file)
@@ -207,7 +207,7 @@ static struct spi_board_info littleton_spi_devices[] __initdata = {
 };
 
 static struct gpiod_lookup_table littleton_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", LITTLETON_GPIO_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 20456a5..0827ebc 100644 (file)
@@ -994,7 +994,7 @@ static struct pxa2xx_spi_controller magician_spi_info = {
 };
 
 static struct gpiod_lookup_table magician_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                /* NOTICE must be GPIO, incompatibility with hw PXA SPI framing */
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO14_MAGICIAN_TSC2046_CS, "cs", 0, GPIO_ACTIVE_LOW),
index dd88953..9964729 100644 (file)
@@ -578,7 +578,7 @@ static struct pxa2xx_spi_controller spitz_spi_info = {
 };
 
 static struct gpiod_lookup_table spitz_spi_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_ADS7846_CS, "cs", 0, GPIO_ACTIVE_LOW),
                GPIO_LOOKUP_IDX("gpio-pxa", SPITZ_GPIO_LCDCON_CS, "cs", 1, GPIO_ACTIVE_LOW),
index d035205..c4d4162 100644 (file)
@@ -623,7 +623,7 @@ static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
 };
 
 static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
-       .dev_id = "pxa2xx-spi.1",
+       .dev_id = "spi1",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO24_ZIPITZ2_WIFI_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
@@ -631,7 +631,7 @@ static struct gpiod_lookup_table pxa_ssp1_gpio_table = {
 };
 
 static struct gpiod_lookup_table pxa_ssp2_gpio_table = {
-       .dev_id = "pxa2xx-spi.2",
+       .dev_id = "spi2",
        .table = {
                GPIO_LOOKUP_IDX("gpio-pxa", GPIO88_ZIPITZ2_LCD_CS, "cs", 0, GPIO_ACTIVE_LOW),
                { },
index 34cf8a5..a4c46a0 100644 (file)
@@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
 endif
 
 KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
 
 # GCC versions that support the "-mstrict-align" option default to allowing
 # unaligned accesses.  While unaligned accesses are explicitly allowed in the
index 039b92a..f72540b 100644 (file)
@@ -35,7 +35,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               key0 {
+               key {
                        label = "KEY0";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
index b9e30df..8abdbe2 100644 (file)
@@ -47,7 +47,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 8d23401..3c6df1e 100644 (file)
@@ -52,7 +52,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index 24fd83b..03c9843 100644 (file)
        gpio-keys {
                compatible = "gpio-keys";
 
-               up {
+               key-up {
                        label = "UP";
                        linux,code = <BTN_1>;
                        gpios = <&gpio1_0 7 GPIO_ACTIVE_LOW>;
                };
 
-               press {
+               key-press {
                        label = "PRESS";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
                };
 
-               down {
+               key-down {
                        label = "DOWN";
                        linux,code = <BTN_2>;
                        gpios = <&gpio0 1 GPIO_ACTIVE_LOW>;
index 25341f3..7164ad0 100644 (file)
@@ -23,7 +23,7 @@
        gpio-keys {
                compatible = "gpio-keys";
 
-               boot {
+               key-boot {
                        label = "BOOT";
                        linux,code = <BTN_0>;
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
index c71d659..33bb60a 100644 (file)
@@ -78,7 +78,7 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
 endif
 obj-$(CONFIG_HOTPLUG_CPU)      += cpu-hotplug.o
 obj-$(CONFIG_KGDB)             += kgdb.o
-obj-$(CONFIG_KEXEC)            += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_KEXEC_CORE)       += kexec_relocate.o crash_save_regs.o machine_kexec.o
 obj-$(CONFIG_KEXEC_FILE)       += elf_kexec.o machine_kexec_file.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 
index 9cb8509..0cb9499 100644 (file)
@@ -349,7 +349,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
 {
        const char *strtab, *name, *shstrtab;
        const Elf_Shdr *sechdrs;
-       Elf_Rela *relas;
+       Elf64_Rela *relas;
        int i, r_type;
 
        /* String & section header string table */
index 2c6e1c6..4120c42 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Kernel interface for the s390 arch_random_* functions
  *
- * Copyright IBM Corp. 2017, 2020
+ * Copyright IBM Corp. 2017, 2022
  *
  * Author: Harald Freudenberger <freude@de.ibm.com>
  *
@@ -14,6 +14,7 @@
 #ifdef CONFIG_ARCH_RANDOM
 
 #include <linux/static_key.h>
+#include <linux/preempt.h>
 #include <linux/atomic.h>
 #include <asm/cpacf.h>
 
@@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
 
 static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
                cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
                atomic64_add(sizeof(*v), &s390_arch_random_counter);
                return true;
@@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
 
 static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
 {
-       if (static_branch_likely(&s390_arch_random_available)) {
+       if (static_branch_likely(&s390_arch_random_available) &&
+           in_task()) {
                cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
                atomic64_add(sizeof(*v), &s390_arch_random_counter);
                return true;
index 7fff10e..52a7f91 100644 (file)
@@ -2474,7 +2474,7 @@ config RETHUNK
        bool "Enable return-thunks"
        depends on RETPOLINE && CC_HAS_RETURN_THUNK
        select OBJTOOL if HAVE_OBJTOOL
-       default y
+       default y if X86_64
        help
          Compile the kernel with the return-thunks compiler option to guard
          against kernel-to-user data leaks by avoiding return speculation.
@@ -2483,21 +2483,21 @@ config RETHUNK
 
 config CPU_UNRET_ENTRY
        bool "Enable UNRET on kernel entry"
-       depends on CPU_SUP_AMD && RETHUNK
+       depends on CPU_SUP_AMD && RETHUNK && X86_64
        default y
        help
          Compile the kernel with support for the retbleed=unret mitigation.
 
 config CPU_IBPB_ENTRY
        bool "Enable IBPB on kernel entry"
-       depends on CPU_SUP_AMD
+       depends on CPU_SUP_AMD && X86_64
        default y
        help
          Compile the kernel with support for the retbleed=ibpb mitigation.
 
 config CPU_IBRS_ENTRY
        bool "Enable IBRS on kernel entry"
-       depends on CPU_SUP_INTEL
+       depends on CPU_SUP_INTEL && X86_64
        default y
        help
          Compile the kernel with support for the spectre_v2=ibrs mitigation.
index 1f40dad..7854685 100644 (file)
@@ -27,6 +27,7 @@ RETHUNK_CFLAGS                := -mfunction-return=thunk-extern
 RETPOLINE_CFLAGS       += $(RETHUNK_CFLAGS)
 endif
 
+export RETHUNK_CFLAGS
 export RETPOLINE_CFLAGS
 export RETPOLINE_VDSO_CFLAGS
 
index 13179f3..4f70fb6 100644 (file)
@@ -278,9 +278,9 @@ enum {
 };
 
 /*
- * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
- * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
- * TSX is not supported they have no consistent behavior:
+ * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x
+ * are the TSX flags when TSX is supported, but when TSX is not supported
+ * they have no consistent behavior:
  *
  *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
  *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
@@ -288,7 +288,7 @@ enum {
  *
  * Therefore, if:
  *
- *   1) LBR has TSX format
+ *   1) LBR format LBR_FORMAT_EIP_FLAGS2
  *   2) CPU has no TSX support enabled
  *
  * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
@@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void)
        bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
                           boot_cpu_has(X86_FEATURE_RTM);
 
-       return !tsx_support && x86_pmu.lbr_has_tsx;
+       return !tsx_support;
 }
 
 static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@@ -1609,9 +1609,6 @@ void intel_pmu_lbr_init_hsw(void)
        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
        x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
-
-       if (lbr_from_signext_quirk_needed())
-               static_branch_enable(&lbr_from_quirk_key);
 }
 
 /* skylake */
@@ -1702,7 +1699,11 @@ void intel_pmu_lbr_init(void)
        switch (x86_pmu.intel_cap.lbr_format) {
        case LBR_FORMAT_EIP_FLAGS2:
                x86_pmu.lbr_has_tsx = 1;
-               fallthrough;
+               x86_pmu.lbr_from_flags = 1;
+               if (lbr_from_signext_quirk_needed())
+                       static_branch_enable(&lbr_from_quirk_key);
+               break;
+
        case LBR_FORMAT_EIP_FLAGS:
                x86_pmu.lbr_from_flags = 1;
                break;
index 00f5227..a77b915 100644 (file)
 #define X86_FEATURE_RETPOLINE_LFENCE   (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
 #define X86_FEATURE_RETHUNK            (11*32+14) /* "" Use REturn THUNK */
 #define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW                (11*32+16) /* "" Use IBPB during runtime firmware calls */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
index 10a3bfc..38a3e86 100644 (file)
@@ -297,6 +297,8 @@ do {                                                                        \
        alternative_msr_write(MSR_IA32_SPEC_CTRL,                       \
                              spec_ctrl_current() | SPEC_CTRL_IBRS,     \
                              X86_FEATURE_USE_IBRS_FW);                 \
+       alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,         \
+                             X86_FEATURE_USE_IBPB_FW);                 \
 } while (0)
 
 #define firmware_restrict_branch_speculation_end()                     \
index d685853..62f6b8b 100644 (file)
@@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
                        dest = addr + insn.length + insn.immediate.value;
 
                if (__static_call_fixup(addr, op, dest) ||
-                   WARN_ON_ONCE(dest != &__x86_return_thunk))
+                   WARN_ONCE(dest != &__x86_return_thunk,
+                             "missing return thunk: %pS-%pS: %*ph",
+                             addr, dest, 5, addr))
                        continue;
 
                DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
index aa34f90..6454bc7 100644 (file)
@@ -975,6 +975,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; }
 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
+#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
 
 #ifdef CONFIG_BPF_SYSCALL
 void unpriv_ebpf_notify(int new_state)
@@ -1415,6 +1416,8 @@ static void __init spectre_v2_select_mitigation(void)
 
        case SPECTRE_V2_IBRS:
                setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
+               if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
+                       pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
                break;
 
        case SPECTRE_V2_LFENCE:
@@ -1516,7 +1519,16 @@ static void __init spectre_v2_select_mitigation(void)
         * the CPU supports Enhanced IBRS, kernel might un-intentionally not
         * enable IBRS around firmware calls.
         */
-       if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+       if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
+           (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
+            boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
+
+               if (retbleed_cmd != RETBLEED_CMD_IBPB) {
+                       setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
+                       pr_info("Enabling Speculation Barrier for firmware calls\n");
+               }
+
+       } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
                setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
index 143e372..e5fa335 100644 (file)
@@ -6029,6 +6029,11 @@ split_irqchip_unlock:
                r = 0;
                break;
        case KVM_CAP_X86_USER_SPACE_MSR:
+               r = -EINVAL;
+               if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
+                                    KVM_MSR_EXIT_REASON_UNKNOWN |
+                                    KVM_MSR_EXIT_REASON_FILTER))
+                       break;
                kvm->arch.user_space_msr_mask = cap->args[0];
                r = 0;
                break;
@@ -6183,6 +6188,9 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
                empty &= !filter.ranges[i].nmsrs;
 
index 4767557..bf9b511 100644 (file)
@@ -43,6 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
        bool "Provide system-wide ring of trusted keys"
        depends on KEYS
        depends on ASYMMETRIC_KEY_TYPE
+       depends on X509_CERTIFICATE_PARSER
        help
          Provide a system keyring to which trusted keys can be added.  Keys in
          the keyring are considered to be trusted.  Keys may be added at will
index 6ff1901..3c6d4ef 100644 (file)
@@ -782,7 +782,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
 
                                        if (!osc_cpc_flexible_adr_space_confirmed) {
                                                pr_debug("Flexible address space capability not supported\n");
-                                               goto out_free;
+                                               if (!cpc_supported_by_cpu())
+                                                       goto out_free;
                                        }
 
                                        addr = ioremap(gas_t->address, gas_t->bit_width/8);
@@ -809,7 +810,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
                                }
                                if (!osc_cpc_flexible_adr_space_confirmed) {
                                        pr_debug("Flexible address space capability not supported\n");
-                                       goto out_free;
+                                       if (!cpc_supported_by_cpu())
+                                               goto out_free;
                                }
                        } else {
                                if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
index d1535ac..81cb909 100644 (file)
@@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
 
                hw_data->hws[i] =
                        devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
-                                                 "lan966x", 0, base,
+                                                 "lan966x", 0, gate_base,
                                                  clk_gate_desc[idx].bit_idx,
                                                  0, &clk_gate_lock);
 
index 08bc52c..ecd7d16 100644 (file)
@@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = {
        .reg_bits = 8,
        .val_bits = 8,
 
+       .use_single_read = true,
+       .use_single_write = true,
+
        .readable_reg = pca953x_readable_register,
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
@@ -906,15 +909,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert)
 {
        DECLARE_BITMAP(val, MAX_LINE);
+       u8 regaddr;
        int ret;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->output,
-                                  chip->regs->output + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
-       ret = regcache_sync_region(chip->regmap, chip->regs->direction,
-                                  chip->regs->direction + NBANK(chip));
+       regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
+       ret = regcache_sync_region(chip->regmap, regaddr,
+                                  regaddr + NBANK(chip) - 1);
        if (ret)
                goto out;
 
@@ -1127,14 +1133,14 @@ static int pca953x_regcache_sync(struct device *dev)
         * sync these registers first and only then sync the rest.
         */
        regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
                return ret;
        }
 
        regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
-       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
+       ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1);
        if (ret) {
                dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
                return ret;
@@ -1144,7 +1150,7 @@ static int pca953x_regcache_sync(struct device *dev)
        if (chip->driver_data & PCA_PCAL) {
                regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT latch registers: %d\n",
                                ret);
@@ -1153,7 +1159,7 @@ static int pca953x_regcache_sync(struct device *dev)
 
                regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
                ret = regcache_sync_region(chip->regmap, regaddr,
-                                          regaddr + NBANK(chip));
+                                          regaddr + NBANK(chip) - 1);
                if (ret) {
                        dev_err(dev, "Failed to sync INT mask registers: %d\n",
                                ret);
index b6d3a57..7f8e2fe 100644 (file)
@@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v)
        const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
 
        map[index] &= ~(0xFFFFFFFFul << offset);
-       map[index] |= v << offset;
+       map[index] |= (unsigned long)v << offset;
 }
 
 static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch)
index 6b6d46e..4608599 100644 (file)
@@ -1364,16 +1364,10 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
                                    struct amdgpu_vm *vm)
 {
        struct amdkfd_process_info *process_info = vm->process_info;
-       struct amdgpu_bo *pd = vm->root.bo;
 
        if (!process_info)
                return;
 
-       /* Release eviction fence from PD */
-       amdgpu_bo_reserve(pd, false);
-       amdgpu_bo_fence(pd, NULL, false);
-       amdgpu_bo_unreserve(pd);
-
        /* Update process info */
        mutex_lock(&process_info->lock);
        process_info->n_vms--;
index 714178f..2168163 100644 (file)
@@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
 {
        struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
                                                   rhead);
-
+       mutex_destroy(&list->bo_list_mutex);
        kvfree(list);
 }
 
@@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 
        trace_amdgpu_cs_bo_status(list->num_entries, total_size);
 
+       mutex_init(&list->bo_list_mutex);
        *result = list;
        return 0;
 
index 529d52a..9caea16 100644 (file)
@@ -47,6 +47,10 @@ struct amdgpu_bo_list {
        struct amdgpu_bo *oa_obj;
        unsigned first_userptr;
        unsigned num_entries;
+
+       /* Protect access during command submission.
+        */
+       struct mutex bo_list_mutex;
 };
 
 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
index b28af04..d8f1335 100644 (file)
@@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                        return r;
        }
 
+       mutex_lock(&p->bo_list->bo_list_mutex);
+
        /* One for TTM and one for the CS job */
        amdgpu_bo_list_for_each_entry(e, p->bo_list)
                e->tv.num_shared = 2;
@@ -651,6 +653,7 @@ out_free_user_pages:
                        kvfree(e->user_pages);
                        e->user_pages = NULL;
                }
+               mutex_unlock(&p->bo_list->bo_list_mutex);
        }
        return r;
 }
@@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
 {
        unsigned i;
 
-       if (error && backoff)
+       if (error && backoff) {
                ttm_eu_backoff_reservation(&parser->ticket,
                                           &parser->validated);
+               mutex_unlock(&parser->bo_list->bo_list_mutex);
+       }
 
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
@@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        continue;
 
                r = amdgpu_vm_bo_update(adev, bo_va, false);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
 
                r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
-               if (r)
+               if (r) {
+                       mutex_unlock(&p->bo_list->bo_list_mutex);
                        return r;
+               }
        }
 
        r = amdgpu_vm_handle_moved(adev, vm);
@@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
        mutex_unlock(&p->adev->notifier_lock);
+       mutex_unlock(&p->bo_list->bo_list_mutex);
 
        return 0;
 
index 93ac33a..3087dd1 100644 (file)
@@ -1653,7 +1653,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
        adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
 #endif
-       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
                adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
                if (!adev->dm.dmub_notify) {
@@ -1689,6 +1689,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                goto error;
        }
 
+       /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+        * It is expected that DMUB will resend any pending notifications at this point, for
+        * example HPD from DPIA.
+        */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc))
+               dc_enable_dmub_outbox(adev->dm.dc);
+
        /* create fake encoders for MST */
        dm_dp_create_fake_mst_encoders(adev);
 
@@ -2678,9 +2685,6 @@ static int dm_resume(void *handle)
                 */
                link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
 
-               if (dc_enable_dmub_notifications(adev->dm.dc))
-                       amdgpu_dm_outbox_init(adev);
-
                r = dm_dmub_hw_init(adev);
                if (r)
                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2698,6 +2702,11 @@ static int dm_resume(void *handle)
                        }
                }
 
+               if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+                       amdgpu_dm_outbox_init(adev);
+                       dc_enable_dmub_outbox(adev->dm.dc);
+               }
+
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -2719,13 +2728,15 @@ static int dm_resume(void *handle)
        /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
        dc_resource_state_construct(dm->dc, dm_state->context);
 
-       /* Re-enable outbox interrupts for DPIA. */
-       if (dc_enable_dmub_notifications(adev->dm.dc))
-               amdgpu_dm_outbox_init(adev);
-
        /* Before powering on DC we need to re-initialize DMUB. */
        dm_dmub_hw_resume(adev);
 
+       /* Re-enable outbox interrupts for DPIA. */
+       if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+               amdgpu_dm_outbox_init(adev);
+               dc_enable_dmub_outbox(adev->dm.dc);
+       }
+
        /* power on hardware */
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
 
index d5962a3..e5fc875 100644 (file)
@@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
                     struct iosys_map *map)
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+       int ret;
+
+       dma_resv_lock(gem->resv, NULL);
+       ret = ttm_bo_vmap(bo, map);
+       dma_resv_unlock(gem->resv);
 
-       return ttm_bo_vmap(bo, map);
+       return ret;
 }
 EXPORT_SYMBOL(drm_gem_ttm_vmap);
 
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
 {
        struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
 
+       dma_resv_lock(gem->resv, NULL);
        ttm_bo_vunmap(bo, map);
+       dma_resv_unlock(gem->resv);
 }
 EXPORT_SYMBOL(drm_gem_ttm_vunmap);
 
index 09f8254..44e7339 100644 (file)
@@ -273,10 +273,17 @@ struct intel_context {
                u8 child_index;
                /** @guc: GuC specific members for parallel submission */
                struct {
-                       /** @wqi_head: head pointer in work queue */
+                       /** @wqi_head: cached head pointer in work queue */
                        u16 wqi_head;
-                       /** @wqi_tail: tail pointer in work queue */
+                       /** @wqi_tail: cached tail pointer in work queue */
                        u16 wqi_tail;
+                       /** @wq_head: pointer to the actual head in work queue */
+                       u32 *wq_head;
+                       /** @wq_tail: pointer to the actual head in work queue */
+                       u32 *wq_tail;
+                       /** @wq_status: pointer to the status in work queue */
+                       u32 *wq_status;
+
                        /**
                         * @parent_page: page in context state (ce->state) used
                         * by parent for work queue, process descriptor
index 86f7a9a..2b0266c 100644 (file)
@@ -661,6 +661,16 @@ static inline void execlists_schedule_out(struct i915_request *rq)
        i915_request_put(rq);
 }
 
+static u32 map_i915_prio_to_lrc_desc_prio(int prio)
+{
+       if (prio > I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_HIGH;
+       else if (prio < I915_PRIORITY_NORMAL)
+               return GEN12_CTX_PRIORITY_LOW;
+       else
+               return GEN12_CTX_PRIORITY_NORMAL;
+}
+
 static u64 execlists_update_context(struct i915_request *rq)
 {
        struct intel_context *ce = rq->context;
@@ -669,7 +679,7 @@ static u64 execlists_update_context(struct i915_request *rq)
 
        desc = ce->lrc.desc;
        if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
-               desc |= lrc_desc_priority(rq_prio(rq));
+               desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
 
        /*
         * WaIdleLiteRestore:bdw,skl
index 31be734..a390f08 100644 (file)
@@ -111,16 +111,6 @@ enum {
 #define XEHP_SW_COUNTER_SHIFT                  58
 #define XEHP_SW_COUNTER_WIDTH                  6
 
-static inline u32 lrc_desc_priority(int prio)
-{
-       if (prio > I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_HIGH;
-       else if (prio < I915_PRIORITY_NORMAL)
-               return GEN12_CTX_PRIORITY_LOW;
-       else
-               return GEN12_CTX_PRIORITY_NORMAL;
-}
-
 static inline void lrc_runtime_start(struct intel_context *ce)
 {
        struct intel_context_stats *stats = &ce->stats;
index 4ef9990..29ef8af 100644 (file)
@@ -122,6 +122,9 @@ enum intel_guc_action {
        INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
        INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY = 0x1005,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
        INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
        INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
        INTEL_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES = 0x100B,
index 966e69a..9feda10 100644 (file)
@@ -170,6 +170,11 @@ struct intel_guc {
        /** @ads_engine_usage_size: size of engine usage in the ADS */
        u32 ads_engine_usage_size;
 
+       /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
+       struct i915_vma *lrc_desc_pool_v69;
+       /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */
+       void *lrc_desc_pool_vaddr_v69;
+
        /**
         * @context_lookup: used to resolve intel_context from guc_id, if a
         * context is present in this structure it is registered with the GuC
index 42cb7a9..89a7e5e 100644 (file)
@@ -203,6 +203,20 @@ struct guc_wq_item {
        u32 fence_id;
 } __packed;
 
+struct guc_process_desc_v69 {
+       u32 stage_id;
+       u64 db_base_addr;
+       u32 head;
+       u32 tail;
+       u32 error_offset;
+       u64 wq_base_addr;
+       u32 wq_size_bytes;
+       u32 wq_status;
+       u32 engine_presence;
+       u32 priority;
+       u32 reserved[36];
+} __packed;
+
 struct guc_sched_wq_desc {
        u32 head;
        u32 tail;
@@ -227,6 +241,37 @@ struct guc_ctxt_registration_info {
 };
 #define CONTEXT_REGISTRATION_FLAG_KMD  BIT(0)
 
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69        BIT(0)
+
+/*
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
+ */
+struct guc_lrc_desc_v69 {
+       u32 hw_context_desc;
+       u32 slpm_perf_mode_hint;        /* SPLC v1 only */
+       u32 slpm_freq_hint;
+       u32 engine_submit_mask;         /* In logical space */
+       u8 engine_class;
+       u8 reserved0[3];
+       u32 priority;
+       u32 process_desc;
+       u32 wq_addr;
+       u32 wq_size;
+       u32 context_flags;              /* CONTEXT_REGISTRATION_* */
+       /* Time for one workload to execute. (in micro seconds) */
+       u32 execution_quantum;
+       /* Time to wait for a preemption request to complete before issuing a
+        * reset. (in micro seconds).
+        */
+       u32 preemption_timeout;
+       u32 policy_flags;               /* CONTEXT_POLICY_* */
+       u32 reserved1[19];
+} __packed;
+
 /* 32-bit KLV structure as used by policy updates and others */
 struct guc_klv_generic_dw_t {
        u32 kl;
index 1726f0f..9ffb343 100644 (file)
@@ -414,12 +414,15 @@ struct sync_semaphore {
 };
 
 struct parent_scratch {
-       struct guc_sched_wq_desc wq_desc;
+       union guc_descs {
+               struct guc_sched_wq_desc wq_desc;
+               struct guc_process_desc_v69 pdesc;
+       } descs;
 
        struct sync_semaphore go;
        struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
 
-       u8 unused[WQ_OFFSET - sizeof(struct guc_sched_wq_desc) -
+       u8 unused[WQ_OFFSET - sizeof(union guc_descs) -
                sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
 
        u32 wq[WQ_SIZE / sizeof(u32)];
@@ -456,17 +459,23 @@ __get_parent_scratch(struct intel_context *ce)
                   LRC_STATE_OFFSET) / sizeof(u32)));
 }
 
+static struct guc_process_desc_v69 *
+__get_process_desc_v69(struct intel_context *ce)
+{
+       struct parent_scratch *ps = __get_parent_scratch(ce);
+
+       return &ps->descs.pdesc;
+}
+
 static struct guc_sched_wq_desc *
-__get_wq_desc(struct intel_context *ce)
+__get_wq_desc_v70(struct intel_context *ce)
 {
        struct parent_scratch *ps = __get_parent_scratch(ce);
 
-       return &ps->wq_desc;
+       return &ps->descs.wq_desc;
 }
 
-static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
-                          struct intel_context *ce,
-                          u32 wqi_size)
+static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
 {
        /*
         * Check for space in work queue. Caching a value of head pointer in
@@ -476,7 +485,7 @@ static u32 *get_wq_pointer(struct guc_sched_wq_desc *wq_desc,
 #define AVAILABLE_SPACE        \
        CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
        if (wqi_size > AVAILABLE_SPACE) {
-               ce->parallel.guc.wqi_head = READ_ONCE(wq_desc->head);
+               ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
 
                if (wqi_size > AVAILABLE_SPACE)
                        return NULL;
@@ -495,11 +504,55 @@ static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
        return ce;
 }
 
+static struct guc_lrc_desc_v69 *__get_lrc_desc_v69(struct intel_guc *guc, u32 index)
+{
+       struct guc_lrc_desc_v69 *base = guc->lrc_desc_pool_vaddr_v69;
+
+       if (!base)
+               return NULL;
+
+       GEM_BUG_ON(index >= GUC_MAX_CONTEXT_ID);
+
+       return &base[index];
+}
+
+static int guc_lrc_desc_pool_create_v69(struct intel_guc *guc)
+{
+       u32 size;
+       int ret;
+
+       size = PAGE_ALIGN(sizeof(struct guc_lrc_desc_v69) *
+                         GUC_MAX_CONTEXT_ID);
+       ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool_v69,
+                                            (void **)&guc->lrc_desc_pool_vaddr_v69);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void guc_lrc_desc_pool_destroy_v69(struct intel_guc *guc)
+{
+       if (!guc->lrc_desc_pool_vaddr_v69)
+               return;
+
+       guc->lrc_desc_pool_vaddr_v69 = NULL;
+       i915_vma_unpin_and_release(&guc->lrc_desc_pool_v69, I915_VMA_RELEASE_MAP);
+}
+
 static inline bool guc_submission_initialized(struct intel_guc *guc)
 {
        return guc->submission_initialized;
 }
 
+static inline void _reset_lrc_desc_v69(struct intel_guc *guc, u32 id)
+{
+       struct guc_lrc_desc_v69 *desc = __get_lrc_desc_v69(guc, id);
+
+       if (desc)
+               memset(desc, 0, sizeof(*desc));
+}
+
 static inline bool ctx_id_mapped(struct intel_guc *guc, u32 id)
 {
        return __get_context(guc, id);
@@ -526,6 +579,8 @@ static inline void clr_ctx_id_mapping(struct intel_guc *guc, u32 id)
        if (unlikely(!guc_submission_initialized(guc)))
                return;
 
+       _reset_lrc_desc_v69(guc, id);
+
        /*
         * xarray API doesn't have xa_erase_irqsave wrapper, so calling
         * the lower level functions directly.
@@ -611,7 +666,7 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
                                              true, timeout);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop);
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
 static int try_context_registration(struct intel_context *ce, bool loop);
 
 static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
@@ -639,7 +694,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
        GEM_BUG_ON(context_guc_id_invalid(ce));
 
        if (context_policy_required(ce)) {
-               err = guc_context_policy_init(ce, false);
+               err = guc_context_policy_init_v70(ce, false);
                if (err)
                        return err;
        }
@@ -737,9 +792,7 @@ static u32 wq_space_until_wrap(struct intel_context *ce)
        return (WQ_SIZE - ce->parallel.guc.wqi_tail);
 }
 
-static void write_wqi(struct guc_sched_wq_desc *wq_desc,
-                     struct intel_context *ce,
-                     u32 wqi_size)
+static void write_wqi(struct intel_context *ce, u32 wqi_size)
 {
        BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
 
@@ -750,13 +803,12 @@ static void write_wqi(struct guc_sched_wq_desc *wq_desc,
 
        ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
                (WQ_SIZE - 1);
-       WRITE_ONCE(wq_desc->tail, ce->parallel.guc.wqi_tail);
+       WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
 }
 
 static int guc_wq_noop_append(struct intel_context *ce)
 {
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
-       u32 *wqi = get_wq_pointer(wq_desc, ce, wq_space_until_wrap(ce));
+       u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
        u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
 
        if (!wqi)
@@ -775,7 +827,6 @@ static int __guc_wq_item_append(struct i915_request *rq)
 {
        struct intel_context *ce = request_to_scheduling_context(rq);
        struct intel_context *child;
-       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
        unsigned int wqi_size = (ce->parallel.number_children + 4) *
                sizeof(u32);
        u32 *wqi;
@@ -795,7 +846,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
                        return ret;
        }
 
-       wqi = get_wq_pointer(wq_desc, ce, wqi_size);
+       wqi = get_wq_pointer(ce, wqi_size);
        if (!wqi)
                return -EBUSY;
 
@@ -810,7 +861,7 @@ static int __guc_wq_item_append(struct i915_request *rq)
        for_each_child(ce, child)
                *wqi++ = child->ring->tail / sizeof(u64);
 
-       write_wqi(wq_desc, ce, wqi_size);
+       write_wqi(ce, wqi_size);
 
        return 0;
 }
@@ -1868,20 +1919,34 @@ static void reset_fail_worker_func(struct work_struct *w);
 int intel_guc_submission_init(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
+       int ret;
 
        if (guc->submission_initialized)
                return 0;
 
+       if (guc->fw.major_ver_found < 70) {
+               ret = guc_lrc_desc_pool_create_v69(guc);
+               if (ret)
+                       return ret;
+       }
+
        guc->submission_state.guc_ids_bitmap =
                bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
-       if (!guc->submission_state.guc_ids_bitmap)
-               return -ENOMEM;
+       if (!guc->submission_state.guc_ids_bitmap) {
+               ret = -ENOMEM;
+               goto destroy_pool;
+       }
 
        guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
        guc->timestamp.shift = gpm_timestamp_shift(gt);
        guc->submission_initialized = true;
 
        return 0;
+
+destroy_pool:
+       guc_lrc_desc_pool_destroy_v69(guc);
+
+       return ret;
 }
 
 void intel_guc_submission_fini(struct intel_guc *guc)
@@ -1890,6 +1955,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
                return;
 
        guc_flush_destroyed_contexts(guc);
+       guc_lrc_desc_pool_destroy_v69(guc);
        i915_sched_engine_put(guc->sched_engine);
        bitmap_free(guc->submission_state.guc_ids_bitmap);
        guc->submission_initialized = false;
@@ -2147,10 +2213,34 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
        spin_unlock_irqrestore(&guc->submission_state.lock, flags);
 }
 
-static int __guc_action_register_multi_lrc(struct intel_guc *guc,
-                                          struct intel_context *ce,
-                                          struct guc_ctxt_registration_info *info,
-                                          bool loop)
+static int __guc_action_register_multi_lrc_v69(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              u32 guc_id,
+                                              u32 offset,
+                                              bool loop)
+{
+       struct intel_context *child;
+       u32 action[4 + MAX_ENGINE_INSTANCE];
+       int len = 0;
+
+       GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+       action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+       action[len++] = guc_id;
+       action[len++] = ce->parallel.number_children + 1;
+       action[len++] = offset;
+       for_each_child(ce, child) {
+               offset += sizeof(struct guc_lrc_desc_v69);
+               action[len++] = offset;
+       }
+
+       return guc_submission_send_busy_loop(guc, action, len, 0, loop);
+}
+
+static int __guc_action_register_multi_lrc_v70(struct intel_guc *guc,
+                                              struct intel_context *ce,
+                                              struct guc_ctxt_registration_info *info,
+                                              bool loop)
 {
        struct intel_context *child;
        u32 action[13 + (MAX_ENGINE_INSTANCE * 2)];
@@ -2190,9 +2280,24 @@ static int __guc_action_register_multi_lrc(struct intel_guc *guc,
        return guc_submission_send_busy_loop(guc, action, len, 0, loop);
 }
 
-static int __guc_action_register_context(struct intel_guc *guc,
-                                        struct guc_ctxt_registration_info *info,
-                                        bool loop)
+static int __guc_action_register_context_v69(struct intel_guc *guc,
+                                            u32 guc_id,
+                                            u32 offset,
+                                            bool loop)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_REGISTER_CONTEXT,
+               guc_id,
+               offset,
+       };
+
+       return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                            0, loop);
+}
+
+static int __guc_action_register_context_v70(struct intel_guc *guc,
+                                            struct guc_ctxt_registration_info *info,
+                                            bool loop)
 {
        u32 action[] = {
                INTEL_GUC_ACTION_REGISTER_CONTEXT,
@@ -2213,24 +2318,52 @@ static int __guc_action_register_context(struct intel_guc *guc,
                                             0, loop);
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info);
+static void prepare_context_registration_info_v69(struct intel_context *ce);
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info);
 
-static int register_context(struct intel_context *ce, bool loop)
+static int
+register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
+{
+       u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool_v69) +
+               ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
+
+       prepare_context_registration_info_v69(ce);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
+                                                          offset, loop);
+       else
+               return __guc_action_register_context_v69(guc, ce->guc_id.id,
+                                                        offset, loop);
+}
+
+static int
+register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
 {
        struct guc_ctxt_registration_info info;
+
+       prepare_context_registration_info_v70(ce, &info);
+
+       if (intel_context_is_parent(ce))
+               return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
+       else
+               return __guc_action_register_context_v70(guc, &info, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
        struct intel_guc *guc = ce_to_guc(ce);
        int ret;
 
        GEM_BUG_ON(intel_context_is_child(ce));
        trace_intel_context_register(ce);
 
-       prepare_context_registration_info(ce, &info);
-
-       if (intel_context_is_parent(ce))
-               ret = __guc_action_register_multi_lrc(guc, ce, &info, loop);
+       if (guc->fw.major_ver_found >= 70)
+               ret = register_context_v70(guc, ce, loop);
        else
-               ret = __guc_action_register_context(guc, &info, loop);
+               ret = register_context_v69(guc, ce, loop);
+
        if (likely(!ret)) {
                unsigned long flags;
 
@@ -2238,7 +2371,8 @@ static int register_context(struct intel_context *ce, bool loop)
                set_context_registered(ce);
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-               guc_context_policy_init(ce, loop);
+               if (guc->fw.major_ver_found >= 70)
+                       guc_context_policy_init_v70(ce, loop);
        }
 
        return ret;
@@ -2335,7 +2469,7 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
                                        0, loop);
 }
 
-static int guc_context_policy_init(struct intel_context *ce, bool loop)
+static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2394,8 +2528,108 @@ static int guc_context_policy_init(struct intel_context *ce, bool loop)
        return ret;
 }
 
-static void prepare_context_registration_info(struct intel_context *ce,
-                                             struct guc_ctxt_registration_info *info)
+static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
+                                       struct guc_lrc_desc_v69 *desc)
+{
+       desc->policy_flags = 0;
+
+       if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+               desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
+
+       /* NB: For both of these, zero means disabled. */
+       desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+       desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
+{
+       /*
+        * this matches the mapping we do in map_i915_prio_to_guc_prio()
+        * (e.g. prio < I915_PRIORITY_NORMAL maps to GUC_CLIENT_PRIORITY_NORMAL)
+        */
+       switch (prio) {
+       default:
+               MISSING_CASE(prio);
+               fallthrough;
+       case GUC_CLIENT_PRIORITY_KMD_NORMAL:
+               return GEN12_CTX_PRIORITY_NORMAL;
+       case GUC_CLIENT_PRIORITY_NORMAL:
+               return GEN12_CTX_PRIORITY_LOW;
+       case GUC_CLIENT_PRIORITY_HIGH:
+       case GUC_CLIENT_PRIORITY_KMD_HIGH:
+               return GEN12_CTX_PRIORITY_HIGH;
+       }
+}
+
+static void prepare_context_registration_info_v69(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       u32 ctx_id = ce->guc_id.id;
+       struct guc_lrc_desc_v69 *desc;
+       struct intel_context *child;
+
+       GEM_BUG_ON(!engine->mask);
+
+       /*
+        * Ensure LRC + CT vmas are is same region as write barrier is done
+        * based on CT vma region.
+        */
+       GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+                  i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+       desc = __get_lrc_desc_v69(guc, ctx_id);
+       desc->engine_class = engine_class_to_guc_class(engine->class);
+       desc->engine_submit_mask = engine->logical_mask;
+       desc->hw_context_desc = ce->lrc.lrca;
+       desc->priority = ce->guc_state.prio;
+       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+       guc_context_policy_init_v69(engine, desc);
+
+       /*
+        * If context is a parent, we need to register a process descriptor
+        * describing a work queue and register all child contexts.
+        */
+       if (intel_context_is_parent(ce)) {
+               struct guc_process_desc_v69 *pdesc;
+
+               ce->parallel.guc.wqi_tail = 0;
+               ce->parallel.guc.wqi_head = 0;
+
+               desc->process_desc = i915_ggtt_offset(ce->state) +
+                       __get_parent_scratch_offset(ce);
+               desc->wq_addr = i915_ggtt_offset(ce->state) +
+                       __get_wq_offset(ce);
+               desc->wq_size = WQ_SIZE;
+
+               pdesc = __get_process_desc_v69(ce);
+               memset(pdesc, 0, sizeof(*(pdesc)));
+               pdesc->stage_id = ce->guc_id.id;
+               pdesc->wq_base_addr = desc->wq_addr;
+               pdesc->wq_size_bytes = desc->wq_size;
+               pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+               ce->parallel.guc.wq_head = &pdesc->head;
+               ce->parallel.guc.wq_tail = &pdesc->tail;
+               ce->parallel.guc.wq_status = &pdesc->wq_status;
+
+               for_each_child(ce, child) {
+                       desc = __get_lrc_desc_v69(guc, child->guc_id.id);
+
+                       desc->engine_class =
+                               engine_class_to_guc_class(engine->class);
+                       desc->hw_context_desc = child->lrc.lrca;
+                       desc->priority = ce->guc_state.prio;
+                       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+                       guc_context_policy_init_v69(engine, desc);
+               }
+
+               clear_children_join_go_memory(ce);
+       }
+}
+
+static void prepare_context_registration_info_v70(struct intel_context *ce,
+                                                 struct guc_ctxt_registration_info *info)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct intel_guc *guc = &engine->gt->uc.guc;
@@ -2420,6 +2654,8 @@ static void prepare_context_registration_info(struct intel_context *ce,
         */
        info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
        info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
+       if (engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
+               info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
        info->flags = CONTEXT_REGISTRATION_FLAG_KMD;
 
        /*
@@ -2443,10 +2679,14 @@ static void prepare_context_registration_info(struct intel_context *ce,
                info->wq_base_hi = upper_32_bits(wq_base_offset);
                info->wq_size = WQ_SIZE;
 
-               wq_desc = __get_wq_desc(ce);
+               wq_desc = __get_wq_desc_v70(ce);
                memset(wq_desc, 0, sizeof(*wq_desc));
                wq_desc->wq_status = WQ_STATUS_ACTIVE;
 
+               ce->parallel.guc.wq_head = &wq_desc->head;
+               ce->parallel.guc.wq_tail = &wq_desc->tail;
+               ce->parallel.guc.wq_status = &wq_desc->wq_status;
+
                clear_children_join_go_memory(ce);
        }
 }
@@ -2761,11 +3001,21 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
                                                 u16 guc_id,
                                                 u32 preemption_timeout)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, guc_id);
-       __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, guc_id);
+               __guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PREEMPTION_TIMEOUT,
+                       guc_id,
+                       preemption_timeout
+               };
+
+               intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
@@ -3013,11 +3263,21 @@ static int guc_context_alloc(struct intel_context *ce)
 static void __guc_context_set_prio(struct intel_guc *guc,
                                   struct intel_context *ce)
 {
-       struct context_policy policy;
+       if (guc->fw.major_ver_found >= 70) {
+               struct context_policy policy;
 
-       __guc_context_policy_start_klv(&policy, ce->guc_id.id);
-       __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
-       __guc_context_set_context_policies(guc, &policy, true);
+               __guc_context_policy_start_klv(&policy, ce->guc_id.id);
+               __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
+               __guc_context_set_context_policies(guc, &policy, true);
+       } else {
+               u32 action[] = {
+                       INTEL_GUC_ACTION_V69_SET_CONTEXT_PRIORITY,
+                       ce->guc_id.id,
+                       ce->guc_state.prio,
+               };
+
+               guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+       }
 }
 
 static void guc_context_set_prio(struct intel_guc *guc,
@@ -4527,17 +4787,19 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
                guc_log_context_priority(p, ce);
 
                if (intel_context_is_parent(ce)) {
-                       struct guc_sched_wq_desc *wq_desc = __get_wq_desc(ce);
                        struct intel_context *child;
 
                        drm_printf(p, "\t\tNumber children: %u\n",
                                   ce->parallel.number_children);
-                       drm_printf(p, "\t\tWQI Head: %u\n",
-                                  READ_ONCE(wq_desc->head));
-                       drm_printf(p, "\t\tWQI Tail: %u\n",
-                                  READ_ONCE(wq_desc->tail));
-                       drm_printf(p, "\t\tWQI Status: %u\n\n",
-                                  READ_ONCE(wq_desc->wq_status));
+
+                       if (ce->parallel.guc.wq_status) {
+                               drm_printf(p, "\t\tWQI Head: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_head));
+                               drm_printf(p, "\t\tWQI Tail: %u\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_tail));
+                               drm_printf(p, "\t\tWQI Status: %u\n\n",
+                                          READ_ONCE(*ce->parallel.guc.wq_status));
+                       }
 
                        if (ce->engine->emit_bb_start ==
                            emit_bb_start_parent_no_preempt_mid_batch) {
index 2ff55b9..703f42b 100644 (file)
@@ -70,6 +70,10 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
        fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
 
+#define INTEL_GUC_FIRMWARE_DEFS_FALLBACK(fw_def, guc_def) \
+       fw_def(ALDERLAKE_P,  0, guc_def(adlp, 69, 0, 3)) \
+       fw_def(ALDERLAKE_S,  0, guc_def(tgl,  69, 0, 3))
+
 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
        fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
        fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
@@ -105,6 +109,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        MODULE_FIRMWARE(uc_);
 
 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
+INTEL_GUC_FIRMWARE_DEFS_FALLBACK(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
 
 /* The below structs and macros are used to iterate across the list of blobs */
@@ -149,6 +154,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
        static const struct uc_fw_platform_requirement blobs_guc[] = {
                INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
        };
+       static const struct uc_fw_platform_requirement blobs_guc_fallback[] = {
+               INTEL_GUC_FIRMWARE_DEFS_FALLBACK(MAKE_FW_LIST, GUC_FW_BLOB)
+       };
        static const struct uc_fw_platform_requirement blobs_huc[] = {
                INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
        };
@@ -179,12 +187,29 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
                if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
                        const struct uc_fw_blob *blob = &fw_blobs[i].blob;
                        uc_fw->path = blob->path;
+                       uc_fw->wanted_path = blob->path;
                        uc_fw->major_ver_wanted = blob->major;
                        uc_fw->minor_ver_wanted = blob->minor;
                        break;
                }
        }
 
+       if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
+               const struct uc_fw_platform_requirement *blobs = blobs_guc_fallback;
+               u32 count = ARRAY_SIZE(blobs_guc_fallback);
+
+               for (i = 0; i < count && p <= blobs[i].p; i++) {
+                       if (p == blobs[i].p && rev >= blobs[i].rev) {
+                               const struct uc_fw_blob *blob = &blobs[i].blob;
+
+                               uc_fw->fallback.path = blob->path;
+                               uc_fw->fallback.major_ver = blob->major;
+                               uc_fw->fallback.minor_ver = blob->minor;
+                               break;
+                       }
+               }
+       }
+
        /* make sure the list is ordered as expected */
        if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
                for (i = 1; i < fw_count; i++) {
@@ -338,7 +363,24 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        __force_fw_fetch_failures(uc_fw, -EINVAL);
        __force_fw_fetch_failures(uc_fw, -ESTALE);
 
-       err = request_firmware(&fw, uc_fw->path, dev);
+       err = firmware_request_nowarn(&fw, uc_fw->path, dev);
+       if (err && !intel_uc_fw_is_overridden(uc_fw) && uc_fw->fallback.path) {
+               err = firmware_request_nowarn(&fw, uc_fw->fallback.path, dev);
+               if (!err) {
+                       drm_notice(&i915->drm,
+                                  "%s firmware %s is recommended, but only %s was found\n",
+                                  intel_uc_fw_type_repr(uc_fw->type),
+                                  uc_fw->wanted_path,
+                                  uc_fw->fallback.path);
+                       drm_info(&i915->drm,
+                                "Consider updating your linux-firmware pkg or downloading from %s\n",
+                                INTEL_UC_FIRMWARE_URL);
+
+                       uc_fw->path = uc_fw->fallback.path;
+                       uc_fw->major_ver_wanted = uc_fw->fallback.major_ver;
+                       uc_fw->minor_ver_wanted = uc_fw->fallback.minor_ver;
+               }
+       }
        if (err)
                goto fail;
 
@@ -437,8 +479,8 @@ fail:
                                  INTEL_UC_FIRMWARE_MISSING :
                                  INTEL_UC_FIRMWARE_ERROR);
 
-       drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+       i915_probe_error(i915, "%s firmware %s: fetch failed with error %d\n",
+                        intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
        drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
                 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
 
@@ -796,7 +838,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
 {
        drm_printf(p, "%s firmware: %s\n",
-                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+                  intel_uc_fw_type_repr(uc_fw->type), uc_fw->wanted_path);
+       if (uc_fw->fallback.path) {
+               drm_printf(p, "%s firmware fallback: %s\n",
+                          intel_uc_fw_type_repr(uc_fw->type), uc_fw->fallback.path);
+               drm_printf(p, "fallback selected: %s\n",
+                          str_yes_no(uc_fw->path == uc_fw->fallback.path));
+       }
        drm_printf(p, "\tstatus: %s\n",
                   intel_uc_fw_status_repr(uc_fw->status));
        drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
index 3229018..562acdf 100644 (file)
@@ -74,6 +74,7 @@ struct intel_uc_fw {
                const enum intel_uc_fw_status status;
                enum intel_uc_fw_status __status; /* no accidental overwrites */
        };
+       const char *wanted_path;
        const char *path;
        bool user_overridden;
        size_t size;
@@ -98,6 +99,12 @@ struct intel_uc_fw {
        u16 major_ver_found;
        u16 minor_ver_found;
 
+       struct {
+               const char *path;
+               u16 major_ver;
+               u16 minor_ver;
+       } fallback;
+
        u32 rsa_size;
        u32 ucode_size;
 
index c849533..3f5750c 100644 (file)
@@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
 
        ret = dcss_submodules_init(dcss);
        if (ret) {
+               of_node_put(dcss->of_port);
                dev_err(dev, "submodules initialization failed\n");
                goto clks_err;
        }
@@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
                dcss_clocks_disable(dcss);
        }
 
+       of_node_put(dcss->of_port);
+
        pm_runtime_disable(dcss->dev);
 
        dcss_submodules_stop(dcss);
index c960144..a189982 100644 (file)
@@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
        of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
        desc->delay.hpd_reliable = reliable_ms;
        of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
-       desc->delay.hpd_reliable = absent_ms;
+       desc->delay.hpd_absent = absent_ms;
 
        /* Power the panel on so we can read the EDID */
        ret = pm_runtime_get_sync(dev);
index 191c560..6b25b2f 100644 (file)
@@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 }
 EXPORT_SYMBOL(drm_sched_entity_flush);
 
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk)
+static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
 {
        struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
 
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
        struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
                                                 finish_cb);
 
-       init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work);
-       irq_work_queue(&job->work);
+       INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
+       schedule_work(&job->work);
 }
 
 static struct dma_fence *
index 3d6f8ee..630cfa4 100644 (file)
@@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr)
  */
 static irqreturn_t cdns_i2c_master_isr(void *ptr)
 {
-       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int isr_status, avail_bytes;
        unsigned int bytes_to_send;
-       bool hold_quirk;
+       bool updatetx;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
@@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
         * Check if transfer size register needs to be updated again for a
         * large data receive operation.
         */
-       updatetx = 0;
-       if (id->recv_count > id->curr_recv_count)
-               updatetx = 1;
-
-       hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx;
+       updatetx = id->recv_count > id->curr_recv_count;
 
        /* When receiving, handle data interrupt and completion interrupt */
        if (id->p_recv_buf &&
@@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                break;
                        }
 
-                       if (cdns_is_holdquirk(id, hold_quirk))
+                       if (cdns_is_holdquirk(id, updatetx))
                                break;
                }
 
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                 * maintain transfer size non-zero while performing a large
                 * receive operation.
                 */
-               if (cdns_is_holdquirk(id, hold_quirk)) {
+               if (cdns_is_holdquirk(id, updatetx)) {
                        /* wait while fifo is full */
                        while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
                               (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
@@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
                                                  CDNS_I2C_XFER_SIZE_OFFSET);
                                id->curr_recv_count = id->recv_count;
                        }
-               } else if (id->recv_count && !hold_quirk &&
-                                               !id->curr_recv_count) {
-
-                       /* Set the slave address in address register*/
-                       cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK,
-                                               CDNS_I2C_ADDR_OFFSET);
-
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
-                       } else {
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                               id->curr_recv_count = id->recv_count;
-                       }
                }
 
                /* Clear hold (if not repeated start) and signal completion */
index e9e2db6..78fb1a4 100644 (file)
@@ -66,7 +66,7 @@
 
 /* IMX I2C registers:
  * the I2C register offset is different between SoCs,
- * to provid support for all these chips, split the
+ * to provide support for all these chips, split the
  * register offset into a fixed base address and a
  * variable shift value, then the full register offset
  * will be calculated by
index 56aa424..815cc56 100644 (file)
@@ -49,7 +49,7 @@
 #define MLXCPLD_LPCI2C_NACK_IND                2
 
 #define MLXCPLD_I2C_FREQ_1000KHZ_SET   0x04
-#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0c
+#define MLXCPLD_I2C_FREQ_400KHZ_SET    0x0e
 #define MLXCPLD_I2C_FREQ_100KHZ_SET    0x42
 
 enum mlxcpld_i2c_frequency {
index 20e53b1..c8539d0 100644 (file)
@@ -7304,7 +7304,9 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        conf->mddev = mddev;
 
-       if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
+       ret = -ENOMEM;
+       conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!conf->stripe_hashtbl)
                goto abort;
 
        /* We init hash_locks[0] separately to that it can be used
index 2e0aa74..95ef971 100644 (file)
@@ -13,10 +13,13 @@ lkdtm-$(CONFIG_LKDTM)               += cfi.o
 lkdtm-$(CONFIG_LKDTM)          += fortify.o
 lkdtm-$(CONFIG_PPC_64S_HASH_MMU)       += powerpc.o
 
-KASAN_SANITIZE_rodata.o                := n
 KASAN_SANITIZE_stackleak.o     := n
-KCOV_INSTRUMENT_rodata.o       := n
-CFLAGS_REMOVE_rodata.o         += $(CC_FLAGS_LTO)
+
+KASAN_SANITIZE_rodata.o                        := n
+KCSAN_SANITIZE_rodata.o                        := n
+KCOV_INSTRUMENT_rodata.o               := n
+OBJECT_FILES_NON_STANDARD_rodata.o     := y
+CFLAGS_REMOVE_rodata.o                 += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
 
 OBJCOPYFLAGS :=
 OBJCOPYFLAGS_rodata_objcopy.o  := \
index 86e867f..033be55 100644 (file)
@@ -1298,8 +1298,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        /*
         * omap_device_pm_domain has callbacks to enable the main
         * functional clock, interface clock and also configure the
-        * SYSCONFIG register of omap devices. The callback will be invoked
-        * as part of pm_runtime_get_sync.
+        * SYSCONFIG register to clear any boot loader set voltage
+        * capabilities before calling sdhci_setup_host(). The
+        * callback will be invoked as part of pm_runtime_get_sync.
         */
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_autosuspend_delay(dev, 50);
@@ -1441,7 +1442,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
 
-       sdhci_runtime_suspend_host(host);
+       if (omap_host->con != -EINVAL)
+               sdhci_runtime_suspend_host(host);
 
        sdhci_omap_context_save(omap_host);
 
@@ -1458,10 +1460,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
-       if (omap_host->con != -EINVAL)
+       if (omap_host->con != -EINVAL) {
                sdhci_omap_context_restore(omap_host);
-
-       sdhci_runtime_resume_host(host, 0);
+               sdhci_runtime_resume_host(host, 0);
+       }
 
        return 0;
 }
index 889e403..93da236 100644 (file)
@@ -850,9 +850,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        unsigned int tRP_ps;
        bool use_half_period;
        int sample_delay_ps, sample_delay_factor;
-       u16 busy_timeout_cycles;
+       unsigned int busy_timeout_cycles;
        u8 wrn_dly_sel;
        unsigned long clk_rate, min_rate;
+       u64 busy_timeout_ps;
 
        if (sdr->tRC_min >= 30000) {
                /* ONFI non-EDO modes [0-3] */
@@ -885,7 +886,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
        addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
        data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
        data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
-       busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
+       busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
+       busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
 
        hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
                      BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
index 0f6a549..29a6c2e 100644 (file)
@@ -142,6 +142,7 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
                         int ref_ok, struct funeth_txq *xdp_q)
 {
        struct bpf_prog *xdp_prog;
+       struct xdp_frame *xdpf;
        struct xdp_buff xdp;
        u32 act;
 
@@ -163,7 +164,9 @@ static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
        case XDP_TX:
                if (unlikely(!ref_ok))
                        goto pass;
-               if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
+
+               xdpf = xdp_convert_buff_to_frame(&xdp);
+               if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
                        goto xdp_error;
                FUN_QSTAT_INC(q, xdp_tx);
                q->xdp_flush |= FUN_XDP_FLUSH_TX;
index a97e3af..54bdeb6 100644 (file)
@@ -487,7 +487,7 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
 
                do {
                        fun_xdp_unmap(q, reclaim_idx);
-                       page_frag_free(q->info[reclaim_idx].vaddr);
+                       xdp_return_frame(q->info[reclaim_idx].xdpf);
 
                        trace_funeth_tx_free(q, reclaim_idx, 1, head);
 
@@ -500,11 +500,11 @@ static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
        return npkts;
 }
 
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
 {
        struct fun_eth_tx_req *req;
        struct fun_dataop_gl *gle;
-       unsigned int idx;
+       unsigned int idx, len;
        dma_addr_t dma;
 
        if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
@@ -515,7 +515,8 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
                return false;
        }
 
-       dma = dma_map_single(q->dma_dev, data, len, DMA_TO_DEVICE);
+       len = xdpf->len;
+       dma = dma_map_single(q->dma_dev, xdpf->data, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(q->dma_dev, dma))) {
                FUN_QSTAT_INC(q, tx_map_err);
                return false;
@@ -535,7 +536,7 @@ bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len)
        gle = (struct fun_dataop_gl *)req->dataop.imm;
        fun_dataop_gl_init(gle, 0, 0, len, dma);
 
-       q->info[idx].vaddr = data;
+       q->info[idx].xdpf = xdpf;
 
        u64_stats_update_begin(&q->syncp);
        q->stats.tx_bytes += len;
@@ -566,12 +567,9 @@ int fun_xdp_xmit_frames(struct net_device *dev, int n,
        if (unlikely(q_idx >= fp->num_xdpqs))
                return -ENXIO;
 
-       for (q = xdpqs[q_idx], i = 0; i < n; i++) {
-               const struct xdp_frame *xdpf = frames[i];
-
-               if (!fun_xdp_tx(q, xdpf->data, xdpf->len))
+       for (q = xdpqs[q_idx], i = 0; i < n; i++)
+               if (!fun_xdp_tx(q, frames[i]))
                        break;
-       }
 
        if (unlikely(flags & XDP_XMIT_FLUSH))
                fun_txq_wr_db(q);
@@ -598,7 +596,7 @@ static void fun_xdpq_purge(struct funeth_txq *q)
                unsigned int idx = q->cons_cnt & q->mask;
 
                fun_xdp_unmap(q, idx);
-               page_frag_free(q->info[idx].vaddr);
+               xdp_return_frame(q->info[idx].xdpf);
                q->cons_cnt++;
        }
 }
index 1711f82..53b7e95 100644 (file)
@@ -96,8 +96,8 @@ struct funeth_txq_stats {  /* per Tx queue SW counters */
 
 struct funeth_tx_info {      /* per Tx descriptor state */
        union {
-               struct sk_buff *skb; /* associated packet */
-               void *vaddr;         /* start address for XDP */
+               struct sk_buff *skb;    /* associated packet (sk_buff path) */
+               struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
        };
 };
 
@@ -246,7 +246,7 @@ static inline int fun_irq_node(const struct fun_irq *p)
 int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
 int fun_txq_napi_poll(struct napi_struct *napi, int budget);
 netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-bool fun_xdp_tx(struct funeth_txq *q, void *data, unsigned int len);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
 int fun_xdp_xmit_frames(struct net_device *dev, int n,
                        struct xdp_frame **frames, u32 flags);
 
index fb9f476..b36bf9c 100644 (file)
@@ -2033,11 +2033,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                 * non-zero req_queue_pairs says that user requested a new
                 * queue count via ethtool's set_channels, so use this
                 * value for queues distribution across traffic classes
+                * We need at least one queue pair for the interface
+                * to be usable as we see in else statement.
                 */
                if (vsi->req_queue_pairs > 0)
                        vsi->num_queue_pairs = vsi->req_queue_pairs;
                else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                        vsi->num_queue_pairs = pf->num_lan_msix;
+               else
+                       vsi->num_queue_pairs = 1;
        }
 
        /* Number of queues per enabled TC */
index 70335f6..4efa5e5 100644 (file)
@@ -658,7 +658,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
                rx_desc = ICE_RX_DESC(rx_ring, i);
 
                if (!(rx_desc->wb.status_error0 &
-                   cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+                   (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+                    cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
                        continue;
 
                rx_buf = &rx_ring->rx_buf[i];
index 3137166..29914ee 100644 (file)
@@ -4656,6 +4656,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                ice_set_safe_mode_caps(hw);
        }
 
+       hw->ucast_shared = true;
+
        err = ice_init_pf(pf);
        if (err) {
                dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -6005,10 +6007,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
        if (vsi->netdev) {
                ice_set_rx_mode(vsi->netdev);
 
-               err = ice_vsi_vlan_setup(vsi);
+               if (vsi->type != ICE_VSI_LB) {
+                       err = ice_vsi_vlan_setup(vsi);
 
-               if (err)
-                       return err;
+                       if (err)
+                               return err;
+               }
        }
        ice_vsi_cfg_dcb_rings(vsi);
 
index 86093b2..3ba1408 100644 (file)
@@ -1309,39 +1309,6 @@ out_put_vf:
        return ret;
 }
 
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
-       struct ice_sw_recipe *mac_recipe_list =
-               &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
-       struct ice_fltr_mgmt_list_entry *list_itr;
-       struct list_head *rule_head;
-       struct mutex *rule_lock; /* protect MAC filter list access */
-
-       rule_head = &mac_recipe_list->filt_rules;
-       rule_lock = &mac_recipe_list->filt_rule_lock;
-
-       mutex_lock(rule_lock);
-       list_for_each_entry(list_itr, rule_head, list_entry) {
-               u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
-               if (ether_addr_equal(existing_mac, umac)) {
-                       mutex_unlock(rule_lock);
-                       return true;
-               }
-       }
-
-       mutex_unlock(rule_lock);
-
-       return false;
-}
-
 /**
  * ice_set_vf_mac
  * @netdev: network interface device structure
@@ -1376,13 +1343,6 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        if (ret)
                goto out_put_vf;
 
-       if (ice_unicast_mac_exists(pf, mac)) {
-               netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
-                          mac, vf_id, mac);
-               ret = -EINVAL;
-               goto out_put_vf;
-       }
-
        mutex_lock(&vf->cfg_lock);
 
        /* VF is notified of its new MAC via the PF's response to the
index 3f8b727..836dce8 100644 (file)
@@ -1751,11 +1751,13 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 
        protocol = vlan_get_protocol(skb);
 
-       if (eth_p_mpls(protocol))
+       if (eth_p_mpls(protocol)) {
                ip.hdr = skb_inner_network_header(skb);
-       else
+               l4.hdr = skb_checksum_start(skb);
+       } else {
                ip.hdr = skb_network_header(skb);
-       l4.hdr = skb_checksum_start(skb);
+               l4.hdr = skb_transport_header(skb);
+       }
 
        /* compute outer L2 header size */
        l2_len = ip.hdr - skb->data;
index b2b5d2e..4508308 100644 (file)
@@ -2971,7 +2971,8 @@ ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
                                     struct virtchnl_vlan_filtering_caps *vfc,
                                     struct virtchnl_vlan_filter_list_v2 *vfl)
 {
-       u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
+       u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+               vfl->num_elements;
 
        if (num_requested_filters > vfc->max_filters)
                return false;
index 28b1994..e64318c 100644 (file)
@@ -28,6 +28,9 @@
 #define MAX_RATE_EXPONENT              0x0FULL
 #define MAX_RATE_MANTISSA              0xFFULL
 
+#define CN10K_MAX_BURST_MANTISSA       0x7FFFULL
+#define CN10K_MAX_BURST_SIZE           8453888ULL
+
 /* Bitfields in NIX_TLX_PIR register */
 #define TLX_RATE_MANTISSA              GENMASK_ULL(8, 1)
 #define TLX_RATE_EXPONENT              GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
 #define TLX_BURST_MANTISSA             GENMASK_ULL(36, 29)
 #define TLX_BURST_EXPONENT             GENMASK_ULL(40, 37)
 
+#define CN10K_TLX_BURST_MANTISSA       GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT       GENMASK_ULL(47, 44)
+
 struct otx2_tc_flow_stats {
        u64 bytes;
        u64 pkts;
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
 }
 EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
 
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
-                                     u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+                                     u32 *burst_exp, u32 *burst_mantissa)
 {
+       int max_burst, max_mantissa;
        unsigned int tmp;
 
+       if (is_dev_otx2(nic->pdev)) {
+               max_burst = MAX_BURST_SIZE;
+               max_mantissa = MAX_BURST_MANTISSA;
+       } else {
+               max_burst = CN10K_MAX_BURST_SIZE;
+               max_mantissa = CN10K_MAX_BURST_MANTISSA;
+       }
+
        /* Burst is calculated as
         * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
         * Max supported burst size is 130,816 bytes.
         */
-       burst = min_t(u32, burst, MAX_BURST_SIZE);
+       burst = min_t(u32, burst, max_burst);
        if (burst) {
                *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
                tmp = burst - rounddown_pow_of_two(burst);
-               if (burst < MAX_BURST_MANTISSA)
+               if (burst < max_mantissa)
                        *burst_mantissa = tmp * 2;
                else
                        *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
        } else {
                *burst_exp = MAX_BURST_EXPONENT;
-               *burst_mantissa = MAX_BURST_MANTISSA;
+               *burst_mantissa = max_mantissa;
        }
 }
 
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
                                     u32 *mantissa, u32 *div_exp)
 {
-       unsigned int tmp;
+       u64 tmp;
 
        /* Rate calculation by hardware
         *
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
        }
 }
 
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+                                      u64 maxrate, u32 burst)
 {
-       struct otx2_hw *hw = &nic->hw;
-       struct nix_txschq_config *req;
        u32 burst_exp, burst_mantissa;
        u32 exp, mantissa, div_exp;
+       u64 regval = 0;
+
+       /* Get exponent and mantissa values from the desired rate */
+       otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+       if (is_dev_otx2(nic->pdev)) {
+               regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       } else {
+               regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+                               FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+                               FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+                               FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+                               FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       }
+
+       return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+                                        u32 burst, u64 maxrate)
+{
+       struct otx2_hw *hw = &nic->hw;
+       struct nix_txschq_config *req;
        int txschq, err;
 
        /* All SQs share the same TL4, so pick the first scheduler */
        txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
 
-       /* Get exponent and mantissa values from the desired rate */
-       otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
-       otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
        mutex_lock(&nic->mbox.lock);
        req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
        if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
        req->lvl = NIX_TXSCH_LVL_TL4;
        req->num_regs = 1;
        req->reg[0] = NIX_AF_TL4X_PIR(txschq);
-       req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
-                        FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
-                        FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
-                        FIELD_PREP(TLX_RATE_EXPONENT, exp) |
-                        FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+       req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
 
        err = otx2_sync_mbox_msg(&nic->mbox);
        mutex_unlock(&nic->mbox.lock);
@@ -230,7 +264,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
        struct netlink_ext_ack *extack = cls->common.extack;
        struct flow_action *actions = &cls->rule->action;
        struct flow_action_entry *entry;
-       u32 rate;
+       u64 rate;
        int err;
 
        err = otx2_tc_validate_flow(nic, actions, extack);
@@ -256,7 +290,7 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
                }
                /* Convert bytes per second to Mbps */
                rate = entry->police.rate_bytes_ps * 8;
-               rate = max_t(u32, rate / 1000000, 1);
+               rate = max_t(u64, rate / 1000000, 1);
                err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
                if (err)
                        return err;
@@ -614,21 +648,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
 
                flow_spec->dport = match.key->dst;
                flow_mask->dport = match.mask->dst;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_DPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_DPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+               if (flow_mask->dport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_DPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_DPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_DPORT_SCTP);
+               }
 
                flow_spec->sport = match.key->src;
                flow_mask->sport = match.mask->src;
-               if (ip_proto == IPPROTO_UDP)
-                       req->features |= BIT_ULL(NPC_SPORT_UDP);
-               else if (ip_proto == IPPROTO_TCP)
-                       req->features |= BIT_ULL(NPC_SPORT_TCP);
-               else if (ip_proto == IPPROTO_SCTP)
-                       req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+               if (flow_mask->sport) {
+                       if (ip_proto == IPPROTO_UDP)
+                               req->features |= BIT_ULL(NPC_SPORT_UDP);
+                       else if (ip_proto == IPPROTO_TCP)
+                               req->features |= BIT_ULL(NPC_SPORT_TCP);
+                       else if (ip_proto == IPPROTO_SCTP)
+                               req->features |= BIT_ULL(NPC_SPORT_SCTP);
+               }
        }
 
        return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
index e31f8fb..df2ab5c 100644 (file)
@@ -4233,7 +4233,7 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
                        }
 
                        /* If the chain is ended by an load/store pair then this
-                        * could serve as the new head of the the next chain.
+                        * could serve as the new head of the next chain.
                         */
                        if (curr_pair_is_memcpy(meta1, meta2)) {
                                head_ld_meta = meta1;
index 4625f85..10ad0b9 100644 (file)
@@ -1100,7 +1100,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
 
        tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
        if (tx_queue && tx_queue->timestamping) {
+               /* This code invokes normal driver TX code which is always
+                * protected from softirqs when called from generic TX code,
+                * which in turn disables preemption. Look at __dev_queue_xmit
+                * which uses rcu_read_lock_bh disabling preemption for RCU
+                * plus disabling softirqs. We do not need RCU reader
+                * protection here.
+                *
+                * Although it is theoretically safe for current PTP TX/RX code
+                * running without disabling softirqs, there are three good
+                * reasond for doing so:
+                *
+                *      1) The code invoked is mainly implemented for non-PTP
+                *         packets and it is always executed with softirqs
+                *         disabled.
+                *      2) This being a single PTP packet, better to not
+                *         interrupt its processing by softirqs which can lead
+                *         to high latencies.
+                *      3) netdev_xmit_more checks preemption is disabled and
+                *         triggers a BUG_ON if not.
+                */
+               local_bh_disable();
                efx_enqueue_skb(tx_queue, skb);
+               local_bh_enable();
        } else {
                WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
                dev_kfree_skb_any(skb);
index ca8ab29..d42e1af 100644 (file)
@@ -688,18 +688,19 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
 
        ret = mediatek_dwmac_clks_config(priv_plat, true);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
-       if (ret) {
-               stmmac_remove_config_dt(pdev, plat_dat);
+       if (ret)
                goto err_drv_probe;
-       }
 
        return 0;
 
 err_drv_probe:
        mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
        return ret;
 }
 
index 3233d14..495e85a 100644 (file)
@@ -214,7 +214,7 @@ struct ipa_init_modem_driver_req {
 
 /* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
  * QMI response, but contains other information as well.  Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
  * ignore any other data that might be returned.
  */
 struct ipa_init_modem_driver_rsp {
index c881e1b..f1683ce 100644 (file)
@@ -243,6 +243,7 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
 #define DEFAULT_SEND_SCI true
 #define DEFAULT_ENCRYPT false
 #define DEFAULT_ENCODING_SA 0
+#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
 
 static bool send_sci(const struct macsec_secy *secy)
 {
@@ -1697,7 +1698,7 @@ static bool validate_add_rxsa(struct nlattr **attrs)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_PN] &&
-           *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+           nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -1753,7 +1754,8 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        }
 
        pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
-       if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
+       if (tb_sa[MACSEC_SA_ATTR_PN] &&
+           nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
                pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
                          nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
                rtnl_unlock();
@@ -1769,7 +1771,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -1842,7 +1844,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        return 0;
 
 cleanup:
-       kfree(rx_sa);
+       macsec_rxsa_put(rx_sa);
        rtnl_unlock();
        return err;
 }
@@ -1939,7 +1941,7 @@ static bool validate_add_txsa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -2011,7 +2013,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
                if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
                        pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
                                  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
-                                 MACSEC_SA_ATTR_SALT);
+                                 MACSEC_SALT_LEN);
                        rtnl_unlock();
                        return -EINVAL;
                }
@@ -2085,7 +2087,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
 
 cleanup:
        secy->operational = was_operational;
-       kfree(tx_sa);
+       macsec_txsa_put(tx_sa);
        rtnl_unlock();
        return err;
 }
@@ -2293,7 +2295,7 @@ static bool validate_upd_sa(struct nlattr **attrs)
        if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
                return false;
 
-       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
+       if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
                return false;
 
        if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
@@ -3745,9 +3747,6 @@ static int macsec_changelink_common(struct net_device *dev,
                secy->operational = tx_sa && tx_sa->active;
        }
 
-       if (data[IFLA_MACSEC_WINDOW])
-               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
-
        if (data[IFLA_MACSEC_ENCRYPT])
                tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
 
@@ -3793,6 +3792,16 @@ static int macsec_changelink_common(struct net_device *dev,
                }
        }
 
+       if (data[IFLA_MACSEC_WINDOW]) {
+               secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
+
+               /* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
+                * for XPN cipher suites */
+               if (secy->xpn &&
+                   secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -3822,7 +3831,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
 
        ret = macsec_changelink_common(dev, data);
        if (ret)
-               return ret;
+               goto cleanup;
 
        /* If h/w offloading is available, propagate to the device */
        if (macsec_is_offloaded(macsec)) {
index ab0af1d..70f88ea 100644 (file)
@@ -986,7 +986,7 @@ static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
         */
        ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_INTR_STS);
        if (ret < 0)
-               return false;
+               return ret;
 
        if (ret & DW_VR_MII_C37_ANSGM_SP_LNKSTS) {
                int speed_value;
index ff22b6b..36803d9 100644 (file)
@@ -450,6 +450,7 @@ static int bcm5421_init(struct mii_phy* phy)
                int can_low_power = 1;
                if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
                        can_low_power = 0;
+               of_node_put(np);
                if (can_low_power) {
                        /* Enable automatic low-power */
                        sungem_phy_write(phy, 0x1c, 0x9002);
index 356cf8d..ec8e1b3 100644 (file)
@@ -242,9 +242,15 @@ struct virtnet_info {
        /* Packet virtio header size */
        u8 hdr_len;
 
-       /* Work struct for refilling if we run low on memory. */
+       /* Work struct for delayed refilling if we run low on memory. */
        struct delayed_work refill;
 
+       /* Is delayed refill enabled? */
+       bool refill_enabled;
+
+       /* The lock to synchronize the access to refill_enabled */
+       spinlock_t refill_lock;
+
        /* Work struct for config space updates */
        struct work_struct config_work;
 
@@ -348,6 +354,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
        return p;
 }
 
+static void enable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = true;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
+static void disable_delayed_refill(struct virtnet_info *vi)
+{
+       spin_lock_bh(&vi->refill_lock);
+       vi->refill_enabled = false;
+       spin_unlock_bh(&vi->refill_lock);
+}
+
 static void virtqueue_napi_schedule(struct napi_struct *napi,
                                    struct virtqueue *vq)
 {
@@ -1527,8 +1547,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
        }
 
        if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
-               if (!try_fill_recv(vi, rq, GFP_ATOMIC))
-                       schedule_delayed_work(&vi->refill, 0);
+               if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
+                       spin_lock(&vi->refill_lock);
+                       if (vi->refill_enabled)
+                               schedule_delayed_work(&vi->refill, 0);
+                       spin_unlock(&vi->refill_lock);
+               }
        }
 
        u64_stats_update_begin(&rq->stats.syncp);
@@ -1651,6 +1675,8 @@ static int virtnet_open(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i, err;
 
+       enable_delayed_refill(vi);
+
        for (i = 0; i < vi->max_queue_pairs; i++) {
                if (i < vi->curr_queue_pairs)
                        /* Make sure we have some buffers: if oom use wq. */
@@ -2033,6 +2059,8 @@ static int virtnet_close(struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int i;
 
+       /* Make sure NAPI doesn't schedule refill work */
+       disable_delayed_refill(vi);
        /* Make sure refill_work doesn't re-enable napi! */
        cancel_delayed_work_sync(&vi->refill);
 
@@ -2792,6 +2820,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
+       enable_delayed_refill(vi);
+
        if (netif_running(vi->dev)) {
                err = virtnet_open(vi->dev);
                if (err)
@@ -3535,6 +3565,7 @@ static int virtnet_probe(struct virtio_device *vdev)
        vdev->priv = vi;
 
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+       spin_lock_init(&vi->refill_lock);
 
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
index f52960d..bff144c 100644 (file)
@@ -32,7 +32,7 @@ config DEBUG_PINCTRL
          Say Y here to add some extra checks and diagnostics to PINCTRL calls.
 
 config PINCTRL_AMD
-       tristate "AMD GPIO pin control"
+       bool "AMD GPIO pin control"
        depends on HAS_IOMEM
        depends on ACPI || COMPILE_TEST
        select GPIOLIB
index a140b6b..bcde042 100644 (file)
@@ -102,7 +102,7 @@ struct armada_37xx_pinctrl {
        struct device                   *dev;
        struct gpio_chip                gpio_chip;
        struct irq_chip                 irq_chip;
-       spinlock_t                      irq_lock;
+       raw_spinlock_t                  irq_lock;
        struct pinctrl_desc             pctl;
        struct pinctrl_dev              *pctl_dev;
        struct armada_37xx_pin_group    *groups;
@@ -523,9 +523,9 @@ static void armada_37xx_irq_ack(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        writel(d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_mask(struct irq_data *d)
@@ -536,10 +536,10 @@ static void armada_37xx_irq_mask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val & ~d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static void armada_37xx_irq_unmask(struct irq_data *d)
@@ -550,10 +550,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        writel(val | d->mask, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 }
 
 static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -564,14 +564,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
        unsigned long flags;
 
        armada_37xx_irq_update_reg(&reg, d);
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        val = readl(info->base + reg);
        if (on)
                val |= (BIT(d->hwirq % GPIO_PER_REG));
        else
                val &= ~(BIT(d->hwirq % GPIO_PER_REG));
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -583,7 +583,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
        u32 val, reg = IRQ_POL;
        unsigned long flags;
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        armada_37xx_irq_update_reg(&reg, d);
        val = readl(info->base + reg);
        switch (type) {
@@ -607,11 +607,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
                break;
        }
        default:
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                return -EINVAL;
        }
        writel(val, info->base + reg);
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
 
        return 0;
 }
@@ -626,7 +626,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
 
        regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
 
-       spin_lock_irqsave(&info->irq_lock, flags);
+       raw_spin_lock_irqsave(&info->irq_lock, flags);
        p = readl(info->base + IRQ_POL + 4 * reg_idx);
        if ((p ^ l) & (1 << bit_num)) {
                /*
@@ -647,7 +647,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
                ret = -1;
        }
 
-       spin_unlock_irqrestore(&info->irq_lock, flags);
+       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
        return ret;
 }
 
@@ -664,11 +664,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
                u32 status;
                unsigned long flags;
 
-               spin_lock_irqsave(&info->irq_lock, flags);
+               raw_spin_lock_irqsave(&info->irq_lock, flags);
                status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
                /* Manage only the interrupt that was enabled */
                status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-               spin_unlock_irqrestore(&info->irq_lock, flags);
+               raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                while (status) {
                        u32 hwirq = ffs(status) - 1;
                        u32 virq = irq_find_mapping(d, hwirq +
@@ -695,12 +695,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
 
 update_status:
                        /* Update status in case a new IRQ appears */
-                       spin_lock_irqsave(&info->irq_lock, flags);
+                       raw_spin_lock_irqsave(&info->irq_lock, flags);
                        status = readl_relaxed(info->base +
                                               IRQ_STATUS + 4 * i);
                        /* Manage only the interrupt that was enabled */
                        status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
-                       spin_unlock_irqrestore(&info->irq_lock, flags);
+                       raw_spin_unlock_irqrestore(&info->irq_lock, flags);
                }
        }
        chained_irq_exit(chip, desc);
@@ -731,7 +731,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
        struct device *dev = &pdev->dev;
        unsigned int i, nr_irq_parent;
 
-       spin_lock_init(&info->irq_lock);
+       raw_spin_lock_init(&info->irq_lock);
 
        nr_irq_parent = of_irq_count(np);
        if (!nr_irq_parent) {
@@ -1107,25 +1107,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = {
        { },
 };
 
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .use_raw_spinlock = true,
+};
+
 static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
 {
        struct armada_37xx_pinctrl *info;
        struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
        struct regmap *regmap;
+       void __iomem *base;
        int ret;
 
+       base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+       if (IS_ERR(base)) {
+               dev_err(dev, "failed to ioremap base address: %pe\n", base);
+               return PTR_ERR(base);
+       }
+
+       regmap = devm_regmap_init_mmio(dev, base,
+                                      &armada_37xx_pinctrl_regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(dev, "failed to create regmap: %pe\n", regmap);
+               return PTR_ERR(regmap);
+       }
+
        info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
 
        info->dev = dev;
-
-       regmap = syscon_node_to_regmap(np);
-       if (IS_ERR(regmap))
-               return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
        info->regmap = regmap;
-
        info->data = of_device_get_match_data(dev);
 
        ret = armada_37xx_pinctrl_register(pdev, info);
index 5f4a8c5..dfc8ea9 100644 (file)
 #define ocelot_clrsetbits(addr, clear, set) \
        writel((readl(addr) & ~(clear)) | (set), (addr))
 
-/* PINCONFIG bits (sparx5 only) */
 enum {
        PINCONF_BIAS,
        PINCONF_SCHMITT,
        PINCONF_DRIVE_STRENGTH,
 };
 
-#define BIAS_PD_BIT BIT(4)
-#define BIAS_PU_BIT BIT(3)
-#define BIAS_BITS   (BIAS_PD_BIT|BIAS_PU_BIT)
-#define SCHMITT_BIT BIT(2)
-#define DRIVE_BITS  GENMASK(1, 0)
-
 /* GPIO standard registers */
 #define OCELOT_GPIO_OUT_SET    0x0
 #define OCELOT_GPIO_OUT_CLR    0x4
@@ -321,6 +314,13 @@ struct ocelot_pin_caps {
        unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
 };
 
+struct ocelot_pincfg_data {
+       u8 pd_bit;
+       u8 pu_bit;
+       u8 drive_bits;
+       u8 schmitt_bit;
+};
+
 struct ocelot_pinctrl {
        struct device *dev;
        struct pinctrl_dev *pctl;
@@ -328,10 +328,16 @@ struct ocelot_pinctrl {
        struct regmap *map;
        struct regmap *pincfg;
        struct pinctrl_desc *desc;
+       const struct ocelot_pincfg_data *pincfg_data;
        struct ocelot_pmx_func func[FUNC_MAX];
        u8 stride;
 };
 
+struct ocelot_match_data {
+       struct pinctrl_desc desc;
+       struct ocelot_pincfg_data pincfg_data;
+};
+
 #define LUTON_P(p, f0, f1)                                             \
 static struct ocelot_pin_caps luton_pin_##p = {                                \
        .pin = p,                                                       \
@@ -1325,24 +1331,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
                u32 regcfg;
 
-               ret = regmap_read(info->pincfg, pin, &regcfg);
+               ret = regmap_read(info->pincfg,
+                                 pin * regmap_get_reg_stride(info->pincfg),
+                                 &regcfg);
                if (ret)
                        return ret;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       *val = regcfg & BIAS_BITS;
+                       *val = regcfg & (opd->pd_bit | opd->pu_bit);
                        break;
 
                case PINCONF_SCHMITT:
-                       *val = regcfg & SCHMITT_BIT;
+                       *val = regcfg & opd->schmitt_bit;
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
-                       *val = regcfg & DRIVE_BITS;
+                       *val = regcfg & opd->drive_bits;
                        break;
 
                default:
@@ -1359,14 +1368,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
        u32 val;
        int ret;
 
-       ret = regmap_read(info->pincfg, regaddr, &val);
+       ret = regmap_read(info->pincfg,
+                         regaddr * regmap_get_reg_stride(info->pincfg),
+                         &val);
        if (ret)
                return ret;
 
        val &= ~clrbits;
        val |= setbits;
 
-       ret = regmap_write(info->pincfg, regaddr, val);
+       ret = regmap_write(info->pincfg,
+                          regaddr * regmap_get_reg_stride(info->pincfg),
+                          val);
 
        return ret;
 }
@@ -1379,23 +1392,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
        int ret = -EOPNOTSUPP;
 
        if (info->pincfg) {
+               const struct ocelot_pincfg_data *opd = info->pincfg_data;
 
                ret = 0;
                switch (reg) {
                case PINCONF_BIAS:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->pd_bit | opd->pu_bit,
                                                       val);
                        break;
 
                case PINCONF_SCHMITT:
-                       ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+                       ret = ocelot_pincfg_clrsetbits(info, pin,
+                                                      opd->schmitt_bit,
                                                       val);
                        break;
 
                case PINCONF_DRIVE_STRENGTH:
                        if (val <= 3)
                                ret = ocelot_pincfg_clrsetbits(info, pin,
-                                                              DRIVE_BITS, val);
+                                                              opd->drive_bits,
+                                                              val);
                        else
                                ret = -EINVAL;
                        break;
@@ -1425,17 +1442,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev,
                if (param == PIN_CONFIG_BIAS_DISABLE)
                        val = (val == 0);
                else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
-                       val = (val & BIAS_PD_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pd_bit);
                else    /* PIN_CONFIG_BIAS_PULL_UP */
-                       val = (val & BIAS_PU_BIT ? true : false);
+                       val = !!(val & info->pincfg_data->pu_bit);
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+               if (!info->pincfg_data->schmitt_bit)
+                       return -EOPNOTSUPP;
+
                err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val);
                if (err)
                        return err;
 
-               val = (val & SCHMITT_BIT ? true : false);
+               val = !!(val & info->pincfg_data->schmitt_bit);
                break;
 
        case PIN_CONFIG_DRIVE_STRENGTH:
@@ -1479,6 +1499,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                              unsigned long *configs, unsigned int num_configs)
 {
        struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+       const struct ocelot_pincfg_data *opd = info->pincfg_data;
        u32 param, arg, p;
        int cfg, err = 0;
 
@@ -1491,8 +1512,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                case PIN_CONFIG_BIAS_PULL_UP:
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 :
-                       (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT :
-                       BIAS_PD_BIT;
+                             (param == PIN_CONFIG_BIAS_PULL_UP) ?
+                               opd->pu_bit : opd->pd_bit;
 
                        err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg);
                        if (err)
@@ -1501,7 +1522,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
 
                case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-                       arg = arg ? SCHMITT_BIT : 0;
+                       if (!opd->schmitt_bit)
+                               return -EOPNOTSUPP;
+
+                       arg = arg ? opd->schmitt_bit : 0;
                        err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT,
                                                  arg);
                        if (err)
@@ -1562,69 +1586,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = {
        .dt_free_map = pinconf_generic_dt_free_map,
 };
 
-static struct pinctrl_desc luton_desc = {
-       .name = "luton-pinctrl",
-       .pins = luton_pins,
-       .npins = ARRAY_SIZE(luton_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data luton_desc = {
+       .desc = {
+               .name = "luton-pinctrl",
+               .pins = luton_pins,
+               .npins = ARRAY_SIZE(luton_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc serval_desc = {
-       .name = "serval-pinctrl",
-       .pins = serval_pins,
-       .npins = ARRAY_SIZE(serval_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data serval_desc = {
+       .desc = {
+               .name = "serval-pinctrl",
+               .pins = serval_pins,
+               .npins = ARRAY_SIZE(serval_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc ocelot_desc = {
-       .name = "ocelot-pinctrl",
-       .pins = ocelot_pins,
-       .npins = ARRAY_SIZE(ocelot_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data ocelot_desc = {
+       .desc = {
+               .name = "ocelot-pinctrl",
+               .pins = ocelot_pins,
+               .npins = ARRAY_SIZE(ocelot_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc jaguar2_desc = {
-       .name = "jaguar2-pinctrl",
-       .pins = jaguar2_pins,
-       .npins = ARRAY_SIZE(jaguar2_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data jaguar2_desc = {
+       .desc = {
+               .name = "jaguar2-pinctrl",
+               .pins = jaguar2_pins,
+               .npins = ARRAY_SIZE(jaguar2_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc servalt_desc = {
-       .name = "servalt-pinctrl",
-       .pins = servalt_pins,
-       .npins = ARRAY_SIZE(servalt_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data servalt_desc = {
+       .desc = {
+               .name = "servalt-pinctrl",
+               .pins = servalt_pins,
+               .npins = ARRAY_SIZE(servalt_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .owner = THIS_MODULE,
+       },
 };
 
-static struct pinctrl_desc sparx5_desc = {
-       .name = "sparx5-pinctrl",
-       .pins = sparx5_pins,
-       .npins = ARRAY_SIZE(sparx5_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &ocelot_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data sparx5_desc = {
+       .desc = {
+               .name = "sparx5-pinctrl",
+               .pins = sparx5_pins,
+               .npins = ARRAY_SIZE(sparx5_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &ocelot_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(4),
+               .pu_bit = BIT(3),
+               .drive_bits = GENMASK(1, 0),
+               .schmitt_bit = BIT(2),
+       },
 };
 
-static struct pinctrl_desc lan966x_desc = {
-       .name = "lan966x-pinctrl",
-       .pins = lan966x_pins,
-       .npins = ARRAY_SIZE(lan966x_pins),
-       .pctlops = &ocelot_pctl_ops,
-       .pmxops = &lan966x_pmx_ops,
-       .confops = &ocelot_confops,
-       .owner = THIS_MODULE,
+static struct ocelot_match_data lan966x_desc = {
+       .desc = {
+               .name = "lan966x-pinctrl",
+               .pins = lan966x_pins,
+               .npins = ARRAY_SIZE(lan966x_pins),
+               .pctlops = &ocelot_pctl_ops,
+               .pmxops = &lan966x_pmx_ops,
+               .confops = &ocelot_confops,
+               .owner = THIS_MODULE,
+       },
+       .pincfg_data = {
+               .pd_bit = BIT(3),
+               .pu_bit = BIT(2),
+               .drive_bits = GENMASK(1, 0),
+       },
 };
 
 static int ocelot_create_group_func_map(struct device *dev,
@@ -1890,7 +1939,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
        {},
 };
 
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev,
+                                                  const struct ocelot_pinctrl *info)
 {
        void __iomem *base;
 
@@ -1898,7 +1948,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
                .reg_bits = 32,
                .val_bits = 32,
                .reg_stride = 4,
-               .max_register = 32,
+               .max_register = info->desc->npins * 4,
                .name = "pincfg",
        };
 
@@ -1913,6 +1963,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
 
 static int ocelot_pinctrl_probe(struct platform_device *pdev)
 {
+       const struct ocelot_match_data *data;
        struct device *dev = &pdev->dev;
        struct ocelot_pinctrl *info;
        struct reset_control *reset;
@@ -1929,7 +1980,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
        if (!info)
                return -ENOMEM;
 
-       info->desc = (struct pinctrl_desc *)device_get_match_data(dev);
+       data = device_get_match_data(dev);
+       if (!data)
+               return -EINVAL;
+
+       info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc),
+                                 GFP_KERNEL);
+       if (!info->desc)
+               return -ENOMEM;
+
+       info->pincfg_data = &data->pincfg_data;
 
        reset = devm_reset_control_get_optional_shared(dev, "switch");
        if (IS_ERR(reset))
@@ -1956,7 +2016,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
 
        /* Pinconf registers */
        if (info->desc->confops) {
-               pincfg = ocelot_pinctrl_create_pincfg(pdev);
+               pincfg = ocelot_pinctrl_create_pincfg(pdev, info);
                if (IS_ERR(pincfg))
                        dev_dbg(dev, "Failed to create pincfg regmap\n");
                else
index 63429a2..770862f 100644 (file)
@@ -266,6 +266,8 @@ static int ralink_pinctrl_pins(struct ralink_priv *p)
                                                p->func[i]->pin_count,
                                                sizeof(int),
                                                GFP_KERNEL);
+               if (!p->func[i]->pins)
+                       return -ENOMEM;
                for (j = 0; j < p->func[i]->pin_count; j++)
                        p->func[i]->pins[j] = p->func[i]->pin_first + j;
 
index 3ba4704..2b3335a 100644 (file)
@@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node
        }
 
        *map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL);
+       if (*map == NULL)
+               return -ENOMEM;
+
        for (i = 0; i < (*num_maps); i++) {
                dt_pin = be32_to_cpu(list[i]);
                pin_num = FIELD_GET(GENMASK(31, 24), dt_pin);
index 458218f..fe4971b 100644 (file)
@@ -176,6 +176,7 @@ config PTP_1588_CLOCK_OCP
        depends on !S390
        depends on COMMON_CLK
        select NET_DEVLINK
+       select CRC16
        help
          This driver adds support for an OpenCompute time card.
 
index 9e54fe7..35d4b39 100644 (file)
@@ -3565,7 +3565,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
                        if (!atomic_read(&queue->set_pci_flags_count)) {
                                /*
                                 * there's no outstanding PCI any more, so we
-                                * have to request a PCI to be sure the the PCI
+                                * have to request a PCI to be sure the PCI
                                 * will wake at some time in the future then we
                                 * can flush packed buffers that might still be
                                 * hanging around, which can happen if no
index 775c0bf..0933948 100644 (file)
@@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
        struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
 
        /* if an error occurred and we have an active dma, then terminate */
-       dmaengine_terminate_sync(ctlr->dma_tx);
-       bs->tx_dma_active = false;
-       dmaengine_terminate_sync(ctlr->dma_rx);
-       bs->rx_dma_active = false;
+       if (ctlr->dma_tx) {
+               dmaengine_terminate_sync(ctlr->dma_tx);
+               bs->tx_dma_active = false;
+       }
+       if (ctlr->dma_rx) {
+               dmaengine_terminate_sync(ctlr->dma_rx);
+               bs->rx_dma_active = false;
+       }
        bcm2835_spi_undo_prologue(bs);
 
        /* and reset */
index 31d778e..6a7f7df 100644 (file)
@@ -69,7 +69,7 @@
 #define CDNS_SPI_BAUD_DIV_SHIFT                3 /* Baud rate divisor shift in CR */
 #define CDNS_SPI_SS_SHIFT              10 /* Slave Select field shift in CR */
 #define CDNS_SPI_SS0                   0x1 /* Slave Select zero */
-#define CDNS_SPI_NOSS                  0x3C /* No Slave select */
+#define CDNS_SPI_NOSS                  0xF /* No Slave select */
 
 /*
  * SPI Interrupt Registers bit Masks
index 7a014ee..411b130 100644 (file)
@@ -613,6 +613,10 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
                                               rspi->dma_callbacked, HZ);
        if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
+               if (tx)
+                       dmaengine_synchronize(rspi->ctlr->dma_tx);
+               if (rx)
+                       dmaengine_synchronize(rspi->ctlr->dma_rx);
        } else {
                if (!ret) {
                        dev_err(&rspi->ctlr->dev, "DMA timeout\n");
index 90ce16b..f422f9c 100644 (file)
@@ -632,16 +632,19 @@ static int __init sev_guest_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct snp_guest_dev *snp_dev;
        struct miscdevice *misc;
+       void __iomem *mapping;
        int ret;
 
        if (!dev->platform_data)
                return -ENODEV;
 
        data = (struct sev_guest_platform_data *)dev->platform_data;
-       layout = (__force void *)ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
-       if (!layout)
+       mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE);
+       if (!mapping)
                return -ENODEV;
 
+       layout = (__force void *)mapping;
+
        ret = -ENOMEM;
        snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL);
        if (!snp_dev)
@@ -706,7 +709,7 @@ e_free_response:
 e_free_request:
        free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg));
 e_unmap:
-       iounmap(layout);
+       iounmap(mapping);
        return ret;
 }
 
index a01ea49..e8e769b 100644 (file)
@@ -1737,6 +1737,14 @@ static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
            (req->flags & REQ_F_PARTIAL_IO))
                return;
 
+       /*
+        * READV uses fields in `struct io_rw` (len/addr) to stash the selected
+        * buffer data. However if that buffer is recycled the original request
+        * data stored in addr is lost. Therefore forbid recycling for now.
+        */
+       if (req->opcode == IORING_OP_READV)
+               return;
+
        /*
         * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
         * the flag and hence ensure that bl->head doesn't get incremented.
@@ -12931,7 +12939,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
 {
        struct io_uring_buf_ring *br;
        struct io_uring_buf_reg reg;
-       struct io_buffer_list *bl;
+       struct io_buffer_list *bl, *free_bl = NULL;
        struct page **pages;
        int nr_pages;
 
@@ -12963,7 +12971,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                if (bl->buf_nr_pages || !list_empty(&bl->buf_list))
                        return -EEXIST;
        } else {
-               bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+               free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
                if (!bl)
                        return -ENOMEM;
        }
@@ -12972,7 +12980,7 @@ static int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
                             struct_size(br, bufs, reg.ring_entries),
                             &nr_pages);
        if (IS_ERR(pages)) {
-               kfree(bl);
+               kfree(free_bl);
                return PTR_ERR(pages);
        }
 
index 4de597a..52615e6 100644 (file)
@@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
                a = (ATTR_RECORD*)((u8*)ctx->attr +
                                le32_to_cpu(ctx->attr->length));
        for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
-               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
-                               le32_to_cpu(ctx->mrec->bytes_allocated))
+               u8 *mrec_end = (u8 *)ctx->mrec +
+                              le32_to_cpu(ctx->mrec->bytes_allocated);
+               u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
+                              a->name_length * sizeof(ntfschar);
+               if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
+                   name_end > mrec_end)
                        break;
                ctx->attr = a;
                if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
index 3375275..740b642 100644 (file)
@@ -277,7 +277,6 @@ enum ocfs2_mount_options
        OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
        OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
        OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
-       OCFS2_MOUNT_NOCLUSTER = 1 << 18, /* No cluster aware filesystem mount */
 };
 
 #define OCFS2_OSB_SOFT_RO      0x0001
@@ -673,8 +672,7 @@ static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
 
 static inline int ocfs2_mount_local(struct ocfs2_super *osb)
 {
-       return ((osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT)
-               || (osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER));
+       return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
 }
 
 static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
index 0b0ae3e..da7718c 100644 (file)
@@ -252,16 +252,14 @@ static int __ocfs2_find_empty_slot(struct ocfs2_slot_info *si,
        int i, ret = -ENOSPC;
 
        if ((preferred >= 0) && (preferred < si->si_num_slots)) {
-               if (!si->si_slots[preferred].sl_valid ||
-                   !si->si_slots[preferred].sl_node_num) {
+               if (!si->si_slots[preferred].sl_valid) {
                        ret = preferred;
                        goto out;
                }
        }
 
        for(i = 0; i < si->si_num_slots; i++) {
-               if (!si->si_slots[i].sl_valid ||
-                   !si->si_slots[i].sl_node_num) {
+               if (!si->si_slots[i].sl_valid) {
                        ret = i;
                        break;
                }
@@ -456,30 +454,24 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
        spin_lock(&osb->osb_lock);
        ocfs2_update_slot_info(si);
 
-       if (ocfs2_mount_local(osb))
-               /* use slot 0 directly in local mode */
-               slot = 0;
-       else {
-               /* search for ourselves first and take the slot if it already
-                * exists. Perhaps we need to mark this in a variable for our
-                * own journal recovery? Possibly not, though we certainly
-                * need to warn to the user */
-               slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       /* search for ourselves first and take the slot if it already
+        * exists. Perhaps we need to mark this in a variable for our
+        * own journal recovery? Possibly not, though we certainly
+        * need to warn to the user */
+       slot = __ocfs2_node_num_to_slot(si, osb->node_num);
+       if (slot < 0) {
+               /* if no slot yet, then just take 1st available
+                * one. */
+               slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
                if (slot < 0) {
-                       /* if no slot yet, then just take 1st available
-                        * one. */
-                       slot = __ocfs2_find_empty_slot(si, osb->preferred_slot);
-                       if (slot < 0) {
-                               spin_unlock(&osb->osb_lock);
-                               mlog(ML_ERROR, "no free slots available!\n");
-                               status = -EINVAL;
-                               goto bail;
-                       }
-               } else
-                       printk(KERN_INFO "ocfs2: Slot %d on device (%s) was "
-                              "already allocated to this node!\n",
-                              slot, osb->dev_str);
-       }
+                       spin_unlock(&osb->osb_lock);
+                       mlog(ML_ERROR, "no free slots available!\n");
+                       status = -EINVAL;
+                       goto bail;
+               }
+       } else
+               printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already "
+                      "allocated to this node!\n", slot, osb->dev_str);
 
        ocfs2_set_slot(si, slot, osb->node_num);
        osb->slot_num = slot;
index f729881..438be02 100644 (file)
@@ -172,7 +172,6 @@ enum {
        Opt_dir_resv_level,
        Opt_journal_async_commit,
        Opt_err_cont,
-       Opt_nocluster,
        Opt_err,
 };
 
@@ -206,7 +205,6 @@ static const match_table_t tokens = {
        {Opt_dir_resv_level, "dir_resv_level=%u"},
        {Opt_journal_async_commit, "journal_async_commit"},
        {Opt_err_cont, "errors=continue"},
-       {Opt_nocluster, "nocluster"},
        {Opt_err, NULL}
 };
 
@@ -618,13 +616,6 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
                goto out;
        }
 
-       tmp = OCFS2_MOUNT_NOCLUSTER;
-       if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
-               ret = -EINVAL;
-               mlog(ML_ERROR, "Cannot change nocluster option on remount\n");
-               goto out;
-       }
-
        tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
                OCFS2_MOUNT_HB_NONE;
        if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
@@ -865,7 +856,6 @@ static int ocfs2_verify_userspace_stack(struct ocfs2_super *osb,
        }
 
        if (ocfs2_userspace_stack(osb) &&
-           !(osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
            strncmp(osb->osb_cluster_stack, mopt->cluster_stack,
                    OCFS2_STACK_LABEL_LEN)) {
                mlog(ML_ERROR,
@@ -1137,11 +1127,6 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
               osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
               "ordered");
 
-       if ((osb->s_mount_opt & OCFS2_MOUNT_NOCLUSTER) &&
-          !(osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT))
-               printk(KERN_NOTICE "ocfs2: The shared device (%s) is mounted "
-                      "without cluster aware mode.\n", osb->dev_str);
-
        atomic_set(&osb->vol_state, VOLUME_MOUNTED);
        wake_up(&osb->osb_mount_event);
 
@@ -1452,9 +1437,6 @@ static int ocfs2_parse_options(struct super_block *sb,
                case Opt_journal_async_commit:
                        mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
                        break;
-               case Opt_nocluster:
-                       mopt->mount_opt |= OCFS2_MOUNT_NOCLUSTER;
-                       break;
                default:
                        mlog(ML_ERROR,
                             "Unrecognized mount option \"%s\" "
@@ -1566,9 +1548,6 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
        if (opts & OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT)
                seq_printf(s, ",journal_async_commit");
 
-       if (opts & OCFS2_MOUNT_NOCLUSTER)
-               seq_printf(s, ",nocluster");
-
        return 0;
 }
 
index e0777ee..397da02 100644 (file)
@@ -1263,6 +1263,9 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                                          count, fl);
                file_end_write(out.file);
        } else {
+               if (out.file->f_flags & O_NONBLOCK)
+                       fl |= SPLICE_F_NONBLOCK;
+
                retval = splice_file_to_pipe(in.file, opipe, &pos, count, fl);
        }
 
index e943370..de86f5b 100644 (file)
@@ -192,17 +192,19 @@ static inline void msg_init(struct uffd_msg *msg)
 }
 
 static inline struct uffd_msg userfault_msg(unsigned long address,
+                                           unsigned long real_address,
                                            unsigned int flags,
                                            unsigned long reason,
                                            unsigned int features)
 {
        struct uffd_msg msg;
+
        msg_init(&msg);
        msg.event = UFFD_EVENT_PAGEFAULT;
 
-       if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
-               address &= PAGE_MASK;
-       msg.arg.pagefault.address = address;
+       msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
+                                   real_address : address;
+
        /*
         * These flags indicate why the userfault occurred:
         * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
@@ -488,8 +490,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
 
        init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
        uwq.wq.private = current;
-       uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
-                       ctx->features);
+       uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
+                               reason, ctx->features);
        uwq.ctx = ctx;
        uwq.waken = false;
 
index 7ce93aa..98954dd 100644 (file)
@@ -1125,9 +1125,7 @@ static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
 }
 #endif
 
-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
 extern int devmem_is_allowed(unsigned long pfn);
-#endif
 
 #endif /* __KERNEL__ */
 
index cb2167c..492dce4 100644 (file)
@@ -368,9 +368,6 @@ static inline void tlb_flush(struct mmu_gather *tlb)
                flush_tlb_mm(tlb->mm);
 }
 
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
 #else /* CONFIG_MMU_GATHER_NO_RANGE */
 
 #ifndef tlb_flush
index 0fca8f3..addb135 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/dma-fence.h>
 #include <linux/completion.h>
 #include <linux/xarray.h>
-#include <linux/irq_work.h>
+#include <linux/workqueue.h>
 
 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
 
@@ -295,7 +295,7 @@ struct drm_sched_job {
         */
        union {
                struct dma_fence_cb             finish_cb;
-               struct irq_work                 work;
+               struct work_struct              work;
        };
 
        uint64_t                        id;
index cf3d0d6..7898e29 100644 (file)
@@ -1130,23 +1130,27 @@ static inline bool is_zone_movable_page(const struct page *page)
 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
 DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
 
-bool __put_devmap_managed_page(struct page *page);
-static inline bool put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs);
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (!static_branch_unlikely(&devmap_managed_key))
                return false;
        if (!is_zone_device_page(page))
                return false;
-       return __put_devmap_managed_page(page);
+       return __put_devmap_managed_page_refs(page, refs);
 }
-
 #else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool put_devmap_managed_page_refs(struct page *page, int refs)
 {
        return false;
 }
 #endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
 
+static inline bool put_devmap_managed_page(struct page *page)
+{
+       return put_devmap_managed_page_refs(page, 1);
+}
+
 /* 127: arbitrary random number, small enough to assemble well */
 #define folio_ref_zero_or_close_to_overflow(folio) \
        ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
index f7506f0..c04f359 100644 (file)
@@ -405,6 +405,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
 {
        const struct inet6_dev *idev = __in6_dev_get(dev);
 
+       if (unlikely(!idev))
+               return true;
+
        return !!idev->cnf.ignore_routes_with_linkdown;
 }
 
index 3c4f550..2f766e3 100644 (file)
@@ -847,6 +847,7 @@ enum {
 };
 
 void l2cap_chan_hold(struct l2cap_chan *c);
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
 void l2cap_chan_put(struct l2cap_chan *c);
 
 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
index 85cd695..ee88f0f 100644 (file)
@@ -321,7 +321,7 @@ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
 
 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 
-#define TCP_PINGPONG_THRESH    3
+#define TCP_PINGPONG_THRESH    1
 
 static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
 {
@@ -338,14 +338,6 @@ static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
        return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
 }
 
-static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (icsk->icsk_ack.pingpong < U8_MAX)
-               icsk->icsk_ack.pingpong++;
-}
-
 static inline bool inet_csk_has_ulp(struct sock *sk)
 {
        return inet_sk(sk)->is_icsk && !!inet_csk(sk)->icsk_ulp_ops;
index f7ad1a7..a7273b2 100644 (file)
@@ -2823,18 +2823,18 @@ static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_wmem ? */
        if (proto->sysctl_wmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
 
-       return *proto->sysctl_wmem;
+       return READ_ONCE(*proto->sysctl_wmem);
 }
 
 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
 {
        /* Does this proto have per netns sysctl_rmem ? */
        if (proto->sysctl_rmem_offset)
-               return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+               return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
 
-       return *proto->sysctl_rmem;
+       return READ_ONCE(*proto->sysctl_rmem);
 }
 
 /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
index b8620be..d10962b 100644 (file)
@@ -1425,7 +1425,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
 
 static inline int tcp_win_from_space(const struct sock *sk, int space)
 {
-       int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
+       int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
 
        return tcp_adv_win_scale <= 0 ?
                (space>>(-tcp_adv_win_scale)) :
index f13d37b..1ecdb91 100644 (file)
@@ -192,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -216,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 811897d..860f867 100644 (file)
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index 50ba70f..1c304fe 100644 (file)
@@ -511,10 +511,52 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
        return sum;
 }
 
-#define SRCU_INTERVAL          1       // Base delay if no expedited GPs pending.
-#define SRCU_MAX_INTERVAL      10      // Maximum incremental delay from slow readers.
-#define SRCU_MAX_NODELAY_PHASE 1       // Maximum per-GP-phase consecutive no-delay instances.
-#define SRCU_MAX_NODELAY       100     // Maximum consecutive no-delay instances.
+/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited().  We spin for a fixed time period
+ * (defined below, boot time configurable) to allow SRCU readers to exit
+ * their read-side critical sections.  If there are still some readers
+ * after one jiffy, we repeatedly block for one jiffy time periods.
+ * The blocking time is increased as the grace-period age increases,
+ * with max blocking time capped at 10 jiffies.
+ */
+#define SRCU_DEFAULT_RETRY_CHECK_DELAY         5
+
+static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
+module_param(srcu_retry_check_delay, ulong, 0444);
+
+#define SRCU_INTERVAL          1               // Base delay if no expedited GPs pending.
+#define SRCU_MAX_INTERVAL      10              // Maximum incremental delay from slow readers.
+
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO      3UL     // Lowmark on default per-GP-phase
+                                                       // no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI      1000UL  // Highmark on default per-GP-phase
+                                                       // no-delay instances.
+
+#define SRCU_UL_CLAMP_LO(val, low)     ((val) > (low) ? (val) : (low))
+#define SRCU_UL_CLAMP_HI(val, high)    ((val) < (high) ? (val) : (high))
+#define SRCU_UL_CLAMP(val, low, high)  SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
+// per-GP-phase no-delay instances adjusted to allow non-sleeping poll upto
+// one jiffies time duration. Mult by 2 is done to factor in the srcu_get_delay()
+// called from process_srcu().
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED        \
+       (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
+
+// Maximum per-GP-phase consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY_PHASE \
+       SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED,  \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_LO,        \
+                     SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
+
+static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
+module_param(srcu_max_nodelay_phase, ulong, 0444);
+
+// Maximum consecutive no-delay instances.
+#define SRCU_DEFAULT_MAX_NODELAY       (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
+                                        SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
+
+static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
+module_param(srcu_max_nodelay, ulong, 0444);
 
 /*
  * Return grace-period delay, zero if there are expedited grace
@@ -522,16 +564,22 @@ static bool srcu_readers_active(struct srcu_struct *ssp)
  */
 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
 {
+       unsigned long gpstart;
+       unsigned long j;
        unsigned long jbase = SRCU_INTERVAL;
 
        if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
                jbase = 0;
-       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)))
-               jbase += jiffies - READ_ONCE(ssp->srcu_gp_start);
-       if (!jbase) {
-               WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
-               if (READ_ONCE(ssp->srcu_n_exp_nodelay) > SRCU_MAX_NODELAY_PHASE)
-                       jbase = 1;
+       if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
+               j = jiffies - 1;
+               gpstart = READ_ONCE(ssp->srcu_gp_start);
+               if (time_after(j, gpstart))
+                       jbase += j - gpstart;
+               if (!jbase) {
+                       WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
+                       if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
+                               jbase = 1;
+               }
        }
        return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
 }
@@ -606,15 +654,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 }
 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 
-/*
- * We use an adaptive strategy for synchronize_srcu() and especially for
- * synchronize_srcu_expedited().  We spin for a fixed time period
- * (defined below) to allow SRCU readers to exit their read-side critical
- * sections.  If there are still some readers after a few microseconds,
- * we repeatedly block for 1-millisecond time periods.
- */
-#define SRCU_RETRY_CHECK_DELAY         5
-
 /*
  * Start an SRCU grace period.
  */
@@ -700,7 +739,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp
  */
 static void srcu_gp_end(struct srcu_struct *ssp)
 {
-       unsigned long cbdelay;
+       unsigned long cbdelay = 1;
        bool cbs;
        bool last_lvl;
        int cpu;
@@ -720,7 +759,9 @@ static void srcu_gp_end(struct srcu_struct *ssp)
        spin_lock_irq_rcu_node(ssp);
        idx = rcu_seq_state(ssp->srcu_gp_seq);
        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
-       cbdelay = !!srcu_get_delay(ssp);
+       if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
+               cbdelay = 0;
+
        WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
        rcu_seq_end(&ssp->srcu_gp_seq);
        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
@@ -921,12 +962,16 @@ static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
  */
 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
 {
+       unsigned long curdelay;
+
+       curdelay = !srcu_get_delay(ssp);
+
        for (;;) {
                if (srcu_readers_active_idx_check(ssp, idx))
                        return true;
-               if (--trycount + !srcu_get_delay(ssp) <= 0)
+               if ((--trycount + curdelay) <= 0)
                        return false;
-               udelay(SRCU_RETRY_CHECK_DELAY);
+               udelay(srcu_retry_check_delay);
        }
 }
 
@@ -1582,7 +1627,7 @@ static void process_srcu(struct work_struct *work)
                j = jiffies;
                if (READ_ONCE(ssp->reschedule_jiffies) == j) {
                        WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
-                       if (READ_ONCE(ssp->reschedule_count) > SRCU_MAX_NODELAY)
+                       if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
                                curdelay = 1;
                } else {
                        WRITE_ONCE(ssp->reschedule_count, 1);
@@ -1674,6 +1719,11 @@ static int __init srcu_bootup_announce(void)
        pr_info("Hierarchical SRCU implementation.\n");
        if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
                pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
+       if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
+               pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
+       if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
+               pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
+       pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
        return 0;
 }
 early_initcall(srcu_bootup_announce);
index b515296..7bf5612 100644 (file)
@@ -1701,7 +1701,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                 * the throttle.
                 */
                p->dl.dl_throttled = 0;
-               BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
+               if (!(flags & ENQUEUE_REPLENISH))
+                       printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
+                                            task_pid_nr(p));
+
                return;
        }
 
index bb9962b..59ddb00 100644 (file)
@@ -454,6 +454,33 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
        rcu_assign_pointer(watch->queue, wqueue);
 }
 
+static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
+{
+       const struct cred *cred;
+       struct watch *w;
+
+       hlist_for_each_entry(w, &wlist->watchers, list_node) {
+               struct watch_queue *wq = rcu_access_pointer(w->queue);
+               if (wqueue == wq && watch->id == w->id)
+                       return -EBUSY;
+       }
+
+       cred = current_cred();
+       if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
+               atomic_dec(&cred->user->nr_watches);
+               return -EAGAIN;
+       }
+
+       watch->cred = get_cred(cred);
+       rcu_assign_pointer(watch->watch_list, wlist);
+
+       kref_get(&wqueue->usage);
+       kref_get(&watch->usage);
+       hlist_add_head(&watch->queue_node, &wqueue->watches);
+       hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
+       return 0;
+}
+
 /**
  * add_watch_to_object - Add a watch on an object to a watch list
  * @watch: The watch to add
@@ -468,34 +495,21 @@ void init_watch(struct watch *watch, struct watch_queue *wqueue)
  */
 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
 {
-       struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
-       struct watch *w;
-
-       hlist_for_each_entry(w, &wlist->watchers, list_node) {
-               struct watch_queue *wq = rcu_access_pointer(w->queue);
-               if (wqueue == wq && watch->id == w->id)
-                       return -EBUSY;
-       }
-
-       watch->cred = get_current_cred();
-       rcu_assign_pointer(watch->watch_list, wlist);
+       struct watch_queue *wqueue;
+       int ret = -ENOENT;
 
-       if (atomic_inc_return(&watch->cred->user->nr_watches) >
-           task_rlimit(current, RLIMIT_NOFILE)) {
-               atomic_dec(&watch->cred->user->nr_watches);
-               put_cred(watch->cred);
-               return -EAGAIN;
-       }
+       rcu_read_lock();
 
+       wqueue = rcu_access_pointer(watch->queue);
        if (lock_wqueue(wqueue)) {
-               kref_get(&wqueue->usage);
-               kref_get(&watch->usage);
-               hlist_add_head(&watch->queue_node, &wqueue->watches);
+               spin_lock(&wlist->lock);
+               ret = add_one_watch(watch, wlist, wqueue);
+               spin_unlock(&wlist->lock);
                unlock_wqueue(wqueue);
        }
 
-       hlist_add_head(&watch->list_node, &wlist->watchers);
-       return 0;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(add_watch_to_object);
 
index 5512644..e2a39e3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -87,7 +87,8 @@ retry:
         * belongs to this folio.
         */
        if (unlikely(page_folio(page) != folio)) {
-               folio_put_refs(folio, refs);
+               if (!put_devmap_managed_page_refs(&folio->page, refs))
+                       folio_put_refs(folio, refs);
                goto retry;
        }
 
@@ -176,7 +177,8 @@ static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
                        refs *= GUP_PIN_COUNTING_BIAS;
        }
 
-       folio_put_refs(folio, refs);
+       if (!put_devmap_managed_page_refs(&folio->page, refs))
+               folio_put_refs(folio, refs);
 }
 
 /**
index a57e1be..a18c071 100644 (file)
@@ -4788,8 +4788,13 @@ again:
                         * sharing with another vma.
                         */
                        ;
-               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
-                                   is_hugetlb_entry_hwpoisoned(entry))) {
+               } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
+                       bool uffd_wp = huge_pte_uffd_wp(entry);
+
+                       if (!userfaultfd_wp(dst_vma) && uffd_wp)
+                               entry = huge_pte_clear_uffd_wp(entry);
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else if (unlikely(is_hugetlb_entry_migration(entry))) {
                        swp_entry_t swp_entry = pte_to_swp_entry(entry);
                        bool uffd_wp = huge_pte_uffd_wp(entry);
 
@@ -5947,6 +5952,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                page = alloc_huge_page(dst_vma, dst_addr, 0);
                if (IS_ERR(page)) {
+                       put_page(*pagep);
                        ret = -ENOMEM;
                        *pagep = NULL;
                        goto out;
index 4b5e5a3..6aff49f 100644 (file)
@@ -603,14 +603,6 @@ static unsigned long kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
-       /*
-        * The pool is live and will never be deallocated from this point on.
-        * Remove the pool object from the kmemleak object tree, as it would
-        * otherwise overlap with allocations returned by kfence_alloc(), which
-        * are registered with kmemleak through the slab post-alloc hook.
-        */
-       kmemleak_free(__kfence_pool);
-
        return 0;
 }
 
@@ -623,8 +615,16 @@ static bool __init kfence_init_pool_early(void)
 
        addr = kfence_init_pool();
 
-       if (!addr)
+       if (!addr) {
+               /*
+                * The pool is live and will never be deallocated from this point on.
+                * Ignore the pool object from the kmemleak phys object tree, as it would
+                * otherwise overlap with allocations returned by kfence_alloc(), which
+                * are registered with kmemleak through the slab post-alloc hook.
+                */
+               kmemleak_ignore_phys(__pa(__kfence_pool));
                return true;
+       }
 
        /*
         * Only release unprotected pages, and do not try to go back and change
index 4cf7d4b..1c6027a 100644 (file)
@@ -3043,7 +3043,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
        pte_t entry;
 
        VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
-       VM_BUG_ON(PageAnon(page) && !PageAnonExclusive(page));
+       VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
 
        /*
         * Clear the pages cpupid information as the existing
@@ -4369,9 +4369,12 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
                        return VM_FAULT_OOM;
        }
 
-       /* See comment in handle_pte_fault() */
+       /*
+        * See comment in handle_pte_fault() for how this scenario happens, we
+        * need to return NOPAGE so that we drop this page.
+        */
        if (pmd_devmap_trans_unstable(vmf->pmd))
-               return 0;
+               return VM_FAULT_NOPAGE;
 
        vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
                                      vmf->address, &vmf->ptl);
index b870a65..745eea0 100644 (file)
@@ -499,7 +499,7 @@ void free_zone_device_page(struct page *page)
 }
 
 #ifdef CONFIG_FS_DAX
-bool __put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page_refs(struct page *page, int refs)
 {
        if (page->pgmap->type != MEMORY_DEVICE_FS_DAX)
                return false;
@@ -509,9 +509,9 @@ bool __put_devmap_managed_page(struct page *page)
         * refcount is 1, then the page is free and the refcount is
         * stable because nobody holds a reference on the page.
         */
-       if (page_ref_dec_return(page) == 1)
+       if (page_ref_sub_return(page, refs) == 1)
                wake_up_var(&page->_refcount);
        return true;
 }
-EXPORT_SYMBOL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page_refs);
 #endif /* CONFIG_FS_DAX */
index 206ed6b..f06279d 100644 (file)
@@ -55,22 +55,28 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
        gfp_t gfp = vmf->gfp_mask;
        unsigned long addr;
        struct page *page;
+       vm_fault_t ret;
        int err;
 
        if (((loff_t)vmf->pgoff << PAGE_SHIFT) >= i_size_read(inode))
                return vmf_error(-EINVAL);
 
+       filemap_invalidate_lock_shared(mapping);
+
 retry:
        page = find_lock_page(mapping, offset);
        if (!page) {
                page = alloc_page(gfp | __GFP_ZERO);
-               if (!page)
-                       return VM_FAULT_OOM;
+               if (!page) {
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               }
 
                err = set_direct_map_invalid_noflush(page);
                if (err) {
                        put_page(page);
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                __SetPageUptodate(page);
@@ -86,7 +92,8 @@ retry:
                        if (err == -EEXIST)
                                goto retry;
 
-                       return vmf_error(err);
+                       ret = vmf_error(err);
+                       goto out;
                }
 
                addr = (unsigned long)page_address(page);
@@ -94,7 +101,11 @@ retry:
        }
 
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = VM_FAULT_LOCKED;
+
+out:
+       filemap_invalidate_unlock_shared(mapping);
+       return ret;
 }
 
 static const struct vm_operations_struct secretmem_vm_ops = {
@@ -162,12 +173,20 @@ static int secretmem_setattr(struct user_namespace *mnt_userns,
                             struct dentry *dentry, struct iattr *iattr)
 {
        struct inode *inode = d_inode(dentry);
+       struct address_space *mapping = inode->i_mapping;
        unsigned int ia_valid = iattr->ia_valid;
+       int ret;
+
+       filemap_invalidate_lock(mapping);
 
        if ((ia_valid & ATTR_SIZE) && inode->i_size)
-               return -EINVAL;
+               ret = -EINVAL;
+       else
+               ret = simple_setattr(mnt_userns, dentry, iattr);
 
-       return simple_setattr(mnt_userns, dentry, iattr);
+       filemap_invalidate_unlock(mapping);
+
+       return ret;
 }
 
 static const struct inode_operations secretmem_iops = {
index a6f5653..b7f2d4a 100644 (file)
@@ -3392,7 +3392,7 @@ static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
                break;
        case Opt_nr_blocks:
                ctx->blocks = memparse(param->string, &rest);
-               if (*rest)
+               if (*rest || ctx->blocks > S64_MAX)
                        goto bad_value;
                ctx->seen |= SHMEM_SEEN_BLOCKS;
                break;
@@ -3514,10 +3514,7 @@ static int shmem_reconfigure(struct fs_context *fc)
 
        raw_spin_lock(&sbinfo->stat_lock);
        inodes = sbinfo->max_inodes - sbinfo->free_inodes;
-       if (ctx->blocks > S64_MAX) {
-               err = "Number of blocks too large";
-               goto out;
-       }
+
        if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
                if (!sbinfo->max_blocks) {
                        err = "Cannot retroactively limit size";
index 148ce62..e6d804b 100644 (file)
@@ -5297,6 +5297,9 @@ int hci_suspend_sync(struct hci_dev *hdev)
                return err;
        }
 
+       /* Update event mask so only the allowed event can wakeup the host */
+       hci_set_event_mask_sync(hdev);
+
        /* Only configure accept list if disconnect succeeded and wake
         * isn't being prevented.
         */
@@ -5308,9 +5311,6 @@ int hci_suspend_sync(struct hci_dev *hdev)
        /* Unpause to take care of updating scanning params */
        hdev->scanning_paused = false;
 
-       /* Update event mask so only the allowed event can wakeup the host */
-       hci_set_event_mask_sync(hdev);
-
        /* Enable event filter for paired devices */
        hci_update_event_filter_sync(hdev);
 
index 09ecaf5..77c0aac 100644 (file)
@@ -111,7 +111,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 }
 
 /* Find channel with given SCID.
- * Returns locked channel. */
+ * Returns a reference locked channel.
+ */
 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
                                                 u16 cid)
 {
@@ -119,15 +120,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_scid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
 }
 
 /* Find channel with given DCID.
- * Returns locked channel.
+ * Returns a reference locked channel.
  */
 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
                                                 u16 cid)
@@ -136,8 +141,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_dcid(conn, cid);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -162,8 +171,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
 
        mutex_lock(&conn->chan_lock);
        c = __l2cap_get_chan_by_ident(conn, ident);
-       if (c)
-               l2cap_chan_lock(c);
+       if (c) {
+               /* Only lock if chan reference is not 0 */
+               c = l2cap_chan_hold_unless_zero(c);
+               if (c)
+                       l2cap_chan_lock(c);
+       }
        mutex_unlock(&conn->chan_lock);
 
        return c;
@@ -497,6 +510,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
        kref_get(&c->kref);
 }
 
+struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
+{
+       BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
+
+       if (!kref_get_unless_zero(&c->kref))
+               return NULL;
+
+       return c;
+}
+
 void l2cap_chan_put(struct l2cap_chan *c)
 {
        BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
@@ -1969,7 +1992,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
                        src_match = !bacmp(&c->src, src);
                        dst_match = !bacmp(&c->dst, dst);
                        if (src_match && dst_match) {
-                               l2cap_chan_hold(c);
+                               c = l2cap_chan_hold_unless_zero(c);
+                               if (!c)
+                                       continue;
+
                                read_unlock(&chan_list_lock);
                                return c;
                        }
@@ -1984,7 +2010,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
        }
 
        if (c1)
-               l2cap_chan_hold(c1);
+               c1 = l2cap_chan_hold_unless_zero(c1);
 
        read_unlock(&chan_list_lock);
 
@@ -4464,6 +4490,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
 
 unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -4578,6 +4605,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
        return err;
 }
 
@@ -5305,6 +5333,7 @@ send_move_response:
        l2cap_send_move_chan_rsp(chan, result);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5397,6 +5426,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
@@ -5426,6 +5456,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
        l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
@@ -5489,6 +5520,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
        l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5524,6 +5556,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
        }
 
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -5896,12 +5929,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (credits > max_credits) {
                BT_ERR("LE credits overflow");
                l2cap_send_disconn_req(chan, ECONNRESET);
-               l2cap_chan_unlock(chan);
 
                /* Return 0 so that we don't trigger an unnecessary
                 * command reject packet.
                 */
-               return 0;
+               goto unlock;
        }
 
        chan->tx_credits += credits;
@@ -5912,7 +5944,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (chan->tx_credits)
                chan->ops->resume(chan);
 
+unlock:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 
        return 0;
 }
@@ -7598,6 +7632,7 @@ drop:
 
 done:
        l2cap_chan_unlock(chan);
+       l2cap_chan_put(chan);
 }
 
 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
@@ -8086,7 +8121,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
                if (src_type != c->src_type)
                        continue;
 
-               l2cap_chan_hold(c);
+               c = l2cap_chan_hold_unless_zero(c);
                read_unlock(&chan_list_lock);
                return c;
        }
index 8cfafd7..646d104 100644 (file)
@@ -4844,7 +4844,6 @@ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
                else
                        status = MGMT_STATUS_FAILED;
 
-               mgmt_pending_remove(cmd);
                goto unlock;
        }
 
index 1ef14a0..5aeb364 100644 (file)
@@ -589,9 +589,13 @@ static int br_fill_ifinfo(struct sk_buff *skb,
        }
 
 done:
+       if (af) {
+               if (nlmsg_get_pos(skb) - (void *)af > nla_attr_size(0))
+                       nla_nest_end(skb, af);
+               else
+                       nla_nest_cancel(skb, af);
+       }
 
-       if (af)
-               nla_nest_end(skb, af);
        nlmsg_end(skb, nlh);
        return 0;
 
index 251e666..748be72 100644 (file)
@@ -47,7 +47,7 @@ enum caif_states {
 struct caifsock {
        struct sock sk; /* must be first member */
        struct cflayer layer;
-       u32 flow_state;
+       unsigned long flow_state;
        struct caif_connect_request conn_req;
        struct mutex readlock;
        struct dentry *debugfs_socket_dir;
@@ -56,38 +56,32 @@ struct caifsock {
 
 static int rx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static int tx_flow_is_on(struct caifsock *cf_sk)
 {
-       return test_bit(TX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       return test_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(RX_FLOW_ON_BIT,
-                (void *) &cf_sk->flow_state);
+       clear_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_rx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(RX_FLOW_ON_BIT,
-                       (void *) &cf_sk->flow_state);
+       set_bit(RX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_off(struct caifsock *cf_sk)
 {
-        clear_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       clear_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void set_tx_flow_on(struct caifsock *cf_sk)
 {
-        set_bit(TX_FLOW_ON_BIT,
-               (void *) &cf_sk->flow_state);
+       set_bit(TX_FLOW_ON_BIT, &cf_sk->flow_state);
 }
 
 static void caif_read_lock(struct sock *sk)
index aa4f43f..6582dfd 100644 (file)
@@ -484,8 +484,8 @@ static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gf
        sk->sk_family      = PF_DECnet;
        sk->sk_protocol    = 0;
        sk->sk_allocation  = gfp;
-       sk->sk_sndbuf      = sysctl_decnet_wmem[1];
-       sk->sk_rcvbuf      = sysctl_decnet_rmem[1];
+       sk->sk_sndbuf      = READ_ONCE(sysctl_decnet_wmem[1]);
+       sk->sk_rcvbuf      = READ_ONCE(sysctl_decnet_rmem[1]);
 
        /* Initialization of DECnet Session Control Port                */
        scp = DN_SK(sk);
index 2b56218..4dfd68c 100644 (file)
@@ -344,6 +344,7 @@ static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 
        ether_addr_copy(a->addr, addr);
        a->vid = vid;
+       a->db = db;
        refcount_set(&a->refcount, 1);
        list_add_tail(&a->list, &lag->fdbs);
 
index 46e8a51..452ff17 100644 (file)
@@ -1042,6 +1042,7 @@ fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri)
 
 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
 {
+       u8 fib_notify_on_flag_change;
        struct fib_alias *fa_match;
        struct sk_buff *skb;
        int err;
@@ -1063,14 +1064,16 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
        WRITE_ONCE(fa_match->offload, fri->offload);
        WRITE_ONCE(fa_match->trap, fri->trap);
 
+       fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change);
+
        /* 2 means send notifications only if offload_failed was changed. */
-       if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
+       if (fib_notify_on_flag_change == 2 &&
            READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
                goto out;
 
        WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
 
-       if (!net->ipv4.sysctl_fib_notify_on_flag_change)
+       if (!fib_notify_on_flag_change)
                goto out;
 
        skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC);
index dc7cc3c..970e9a2 100644 (file)
@@ -454,8 +454,8 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
-       WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
+       WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
 
        sk_sockets_allocated_inc(sk);
 }
@@ -688,7 +688,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
                                int size_goal)
 {
        return skb->len < size_goal &&
-              sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
+              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
               !tcp_rtx_queue_empty(sk) &&
               refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
               tcp_skb_can_collapse_to(skb);
@@ -1842,7 +1842,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
        if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
                cap = sk->sk_rcvbuf >> 1;
        else
-               cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
+               cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
        val = min(val, cap);
        WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
 
@@ -4573,9 +4573,18 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
                return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
        }
 
-       /* check the signature */
-       genhash = tp->af_specific->calc_md5_hash(newhash, hash_expected,
-                                                NULL, skb);
+       /* Check the signature.
+        * To support dual stack listeners, we need to handle
+        * IPv4-mapped case.
+        */
+       if (family == AF_INET)
+               genhash = tcp_v4_md5_hash_skb(newhash,
+                                             hash_expected,
+                                             NULL, skb);
+       else
+               genhash = tp->af_specific->calc_md5_hash(newhash,
+                                                        hash_expected,
+                                                        NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
index ae73b34..ab5f0ea 100644 (file)
@@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
 
        if (sk->sk_sndbuf < sndmem)
                WRITE_ONCE(sk->sk_sndbuf,
-                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
+                          min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -461,7 +461,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        /* Optimize this! */
        int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
-       int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
+       int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
 
        while (tp->rcv_ssthresh <= window) {
                if (truesize <= skb->len)
@@ -534,7 +534,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
  */
 static void tcp_init_buffer_space(struct sock *sk)
 {
-       int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
+       int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
        struct tcp_sock *tp = tcp_sk(sk);
        int maxwin;
 
@@ -574,16 +574,17 @@ static void tcp_clamp_window(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct net *net = sock_net(sk);
+       int rmem2;
 
        icsk->icsk_ack.quick = 0;
+       rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
 
-       if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
+       if (sk->sk_rcvbuf < rmem2 &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                WRITE_ONCE(sk->sk_rcvbuf,
-                          min(atomic_read(&sk->sk_rmem_alloc),
-                              net->ipv4.sysctl_tcp_rmem[2]));
+                          min(atomic_read(&sk->sk_rmem_alloc), rmem2));
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -724,7 +725,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
         * <prev RTT . ><current RTT .. ><next RTT .... >
         */
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -745,7 +746,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
                do_div(rcvwin, tp->advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                if (rcvbuf > sk->sk_rcvbuf) {
                        WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
@@ -909,9 +910,9 @@ static void tcp_update_pacing_rate(struct sock *sk)
         *       end of slow start and should slow down.
         */
        if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
        else
-               rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
+               rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
 
        rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
 
@@ -2174,7 +2175,7 @@ void tcp_enter_loss(struct sock *sk)
         * loss recovery is underway except recurring timeout(s) on
         * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
         */
-       tp->frto = net->ipv4.sysctl_tcp_frto &&
+       tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
                   (new_recovery || icsk->icsk_retransmits) &&
                   !inet_csk(sk)->icsk_mtup.probe_size;
 }
@@ -3057,7 +3058,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 
 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
 {
-       u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
+       u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
        struct tcp_sock *tp = tcp_sk(sk);
 
        if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
@@ -3580,7 +3581,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
        if (*last_oow_ack_time) {
                s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
 
-               if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
+               if (0 <= elapsed &&
+                   elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
                        NET_INC_STATS(net, mib_idx);
                        return true;    /* rate-limited: don't send yet! */
                }
@@ -3628,7 +3630,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
        /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
-               u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
+               u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
                u32 half = (ack_limit + 1) >> 1;
 
                challenge_timestamp = now;
@@ -4425,7 +4427,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+       if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                int mib_idx;
 
                if (before(seq, tp->rcv_nxt))
@@ -4472,7 +4474,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
                NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
                tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 
-               if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
+               if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
                        u32 end_seq = TCP_SKB_CB(skb)->end_seq;
 
                        tcp_rcv_spurious_retrans(sk, skb);
@@ -5516,7 +5518,7 @@ send_now:
        }
 
        if (!tcp_is_sack(tp) ||
-           tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
+           tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
                goto send_now;
 
        if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
@@ -5537,11 +5539,12 @@ send_now:
        if (tp->srtt_us && tp->srtt_us < rtt)
                rtt = tp->srtt_us;
 
-       delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
+       delay = min_t(unsigned long,
+                     READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
                      rtt * (NSEC_PER_USEC >> 3)/20);
        sock_hold(sk);
        hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
-                              sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns,
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
                               HRTIMER_MODE_REL_PINNED_SOFT);
 }
 
index c7e7101..0c83780 100644 (file)
@@ -1008,7 +1008,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
-               tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (inet_sk(sk)->tos & INET_ECN_MASK) :
                                inet_sk(sk)->tos;
@@ -1528,7 +1528,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        if (!dst) {
index a501150..d58e672 100644 (file)
@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
        int m;
 
        sk_dst_confirm(sk);
-       if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
+       if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
                return;
 
        rcu_read_lock();
@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
 
        if (tcp_in_initial_slowstart(tp)) {
                /* Slow start still did not finish. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && (tcp_snd_cwnd(tp) >> 1) > val)
@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
        } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
                        tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
                                       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
                        tcp_metric_set(tm, TCP_METRIC_CWND,
                                       (val + tp->snd_ssthresh) >> 1);
                }
-               if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
+               if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
                    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
                        val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
                        if (val && tp->snd_ssthresh > val)
@@ -463,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
        if (tcp_metric_locked(tm, TCP_METRIC_CWND))
                tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
 
-       val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
+       val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
              0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
        if (val) {
                tp->snd_ssthresh = val;
index 2b72ccd..78b654f 100644 (file)
@@ -167,16 +167,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        if (tcp_packets_in_flight(tp) == 0)
                tcp_ca_event(sk, CA_EVENT_TX_START);
 
-       /* If this is the first data packet sent in response to the
-        * previous received data,
-        * and it is a reply for ato after last received packet,
-        * increase pingpong count.
-        */
-       if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
-           (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
-               inet_csk_inc_pingpong_cnt(sk);
-
        tp->lsndtime = now;
+
+       /* If it is a reply for ato after last received
+        * packet, enter pingpong mode.
+        */
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               inet_csk_enter_pingpong_mode(sk);
 }
 
 /* Account for an ACK we sent. */
@@ -230,7 +227,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
         * which we interpret as a sign the remote TCP is not
         * misinterpreting the window field as a signed quantity.
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
        else
                (*rcv_wnd) = min_t(u32, space, U16_MAX);
@@ -241,7 +238,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
        *rcv_wscale = 0;
        if (wscale_ok) {
                /* Set window scaling on max possible window */
-               space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+               space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
                space = max_t(u32, space, sysctl_rmem_max);
                space = min_t(u32, space, *window_clamp);
                *rcv_wscale = clamp_t(int, ilog2(space) - 15,
@@ -285,7 +282,7 @@ static u16 tcp_select_window(struct sock *sk)
         * scaled window.
         */
        if (!tp->rx_opt.rcv_wscale &&
-           sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
+           READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
                new_win = min(new_win, MAX_TCP_WINDOW);
        else
                new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -1976,7 +1973,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
 
        bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
 
-       r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
+       r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
        if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
                bytes += sk->sk_gso_max_size >> r;
 
@@ -1995,7 +1992,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
 
        min_tso = ca_ops->min_tso_segs ?
                        ca_ops->min_tso_segs(sk) :
-                       sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
+                       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
 
        tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
        return min_t(u32, tso_segs, sk->sk_gso_max_segs);
@@ -2507,7 +2504,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
                      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
        if (sk->sk_pacing_status == SK_PACING_NONE)
                limit = min_t(unsigned long, limit,
-                             sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+                             READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
        limit <<= factor;
 
        if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
index 7f695c3..87c699d 100644 (file)
@@ -1522,7 +1522,6 @@ static void mld_query_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_query_work, 0);
                        break;
                }
        }
@@ -1533,8 +1532,10 @@ static void mld_query_work(struct work_struct *work)
                __mld_query_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 /* called with rcu_read_lock() */
@@ -1624,7 +1625,6 @@ static void mld_report_work(struct work_struct *work)
 
                if (++cnt >= MLD_MAX_QUEUE) {
                        rework = true;
-                       schedule_delayed_work(&idev->mc_report_work, 0);
                        break;
                }
        }
@@ -1635,8 +1635,10 @@ static void mld_report_work(struct work_struct *work)
                __mld_report_work(skb);
        mutex_unlock(&idev->mc_lock);
 
-       if (!rework)
-               in6_dev_put(idev);
+       if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
+               return;
+
+       in6_dev_put(idev);
 }
 
 static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
index b1179f6..91b8405 100644 (file)
 #include <linux/proc_fs.h>
 #include <net/ping.h>
 
+static void ping_v6_destroy(struct sock *sk)
+{
+       inet6_destroy_sock(sk);
+}
+
 /* Compatibility glue so we can support IPv6 when it's compiled as a module */
 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                                 int *addr_len)
@@ -185,6 +190,7 @@ struct proto pingv6_prot = {
        .owner =        THIS_MODULE,
        .init =         ping_init_sock,
        .close =        ping_close,
+       .destroy =      ping_v6_destroy,
        .connect =      ip6_datagram_connect_v6_only,
        .disconnect =   __udp_disconnect,
        .setsockopt =   ipv6_setsockopt,
index 85b8b76..e54eee8 100644 (file)
@@ -546,7 +546,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
 
-               tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+               tclass = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
                                (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
                                (np->tclass & INET_ECN_MASK) :
                                np->tclass;
@@ -1317,7 +1317,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        /* Set ToS of the new socket based upon the value of incoming SYN.
         * ECT bits are set later in tcp_init_transfer().
         */
-       if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
                newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
 
        /* Clone native IPv6 options from listening socket (if any)
index bd8f0f4..30d2890 100644 (file)
@@ -1271,7 +1271,7 @@ raise_win:
                if (unlikely(th->syn))
                        new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
                if (!tp->rx_opt.rcv_wscale &&
-                   sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows)
+                   READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
                        new_win = min(new_win, MAX_TCP_WINDOW);
                else
                        new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
index 57f23f4..a3f1c14 100644 (file)
@@ -1873,7 +1873,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
        if (msk->rcvq_space.copied <= msk->rcvq_space.space)
                goto new_measure;
 
-       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+       if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
                int rcvmem, rcvbuf;
                u64 rcvwin, grow;
@@ -1891,7 +1891,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
 
                do_div(rcvwin, advmss);
                rcvbuf = min_t(u64, rcvwin * rcvmem,
-                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+                              READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 
                if (rcvbuf > sk->sk_rcvbuf) {
                        u32 window_clamp;
@@ -2634,8 +2634,8 @@ static int mptcp_init_sock(struct sock *sk)
        mptcp_ca_reset(sk);
 
        sk_sockets_allocated_inc(sk);
-       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
+       sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
 
        return 0;
 }
index d4b16d0..901c763 100644 (file)
@@ -1533,7 +1533,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        mptcp_sock_graft(ssk, sk->sk_socket);
        iput(SOCK_INODE(sf));
        WRITE_ONCE(msk->allow_infinite_fallback, false);
-       return err;
+       return 0;
 
 failed_unlink:
        list_del(&subflow->node);
index 646d5fd..9f976b1 100644 (file)
@@ -3340,6 +3340,8 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
                        if (err < 0)
                                return err;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -9367,9 +9369,13 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                break;
                        }
                }
+
+               cond_resched();
        }
 
        list_for_each_entry(set, &ctx->table->sets, list) {
+               cond_resched();
+
                if (!nft_is_active_next(ctx->net, set))
                        continue;
                if (!(set->flags & NFT_SET_MAP) ||
index a364f8e..87a9009 100644 (file)
@@ -843,11 +843,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
 }
 
 static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
+nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
 {
        struct sk_buff *nskb;
 
        if (diff < 0) {
+               unsigned int min_len = skb_transport_offset(e->skb);
+
+               if (data_len < min_len)
+                       return -EINVAL;
+
                if (pskb_trim(e->skb, data_len))
                        return -ENOMEM;
        } else if (diff > 0) {
index 15e4b76..da29e92 100644 (file)
@@ -68,6 +68,31 @@ static void nft_queue_sreg_eval(const struct nft_expr *expr,
        regs->verdict.code = ret;
 }
 
+static int nft_queue_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+{
+       static const unsigned int supported_hooks = ((1 << NF_INET_PRE_ROUTING) |
+                                                    (1 << NF_INET_LOCAL_IN) |
+                                                    (1 << NF_INET_FORWARD) |
+                                                    (1 << NF_INET_LOCAL_OUT) |
+                                                    (1 << NF_INET_POST_ROUTING));
+
+       switch (ctx->family) {
+       case NFPROTO_IPV4:
+       case NFPROTO_IPV6:
+       case NFPROTO_INET:
+       case NFPROTO_BRIDGE:
+               break;
+       case NFPROTO_NETDEV: /* lacks okfn */
+               fallthrough;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return nft_chain_validate_hooks(ctx->chain, supported_hooks);
+}
+
 static const struct nla_policy nft_queue_policy[NFTA_QUEUE_MAX + 1] = {
        [NFTA_QUEUE_NUM]        = { .type = NLA_U16 },
        [NFTA_QUEUE_TOTAL]      = { .type = NLA_U16 },
@@ -164,6 +189,7 @@ static const struct nft_expr_ops nft_queue_ops = {
        .eval           = nft_queue_eval,
        .init           = nft_queue_init,
        .dump           = nft_queue_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
@@ -173,6 +199,7 @@ static const struct nft_expr_ops nft_queue_sreg_ops = {
        .eval           = nft_queue_sreg_eval,
        .init           = nft_queue_sreg_init,
        .dump           = nft_queue_sreg_dump,
+       .validate       = nft_queue_validate,
        .reduce         = NFT_REDUCE_READONLY,
 };
 
index be29da0..3460abc 100644 (file)
@@ -229,9 +229,8 @@ static struct sctp_association *sctp_association_init(
        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                goto fail_init;
 
-       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
-                            0, gfp))
-               goto fail_init;
+       if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+               goto stream_free;
 
        /* Initialize default path MTU. */
        asoc->pathmtu = sp->pathmtu;
index 6dc95dc..ef9fcea 100644 (file)
@@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 
        ret = sctp_stream_alloc_out(stream, outcnt, gfp);
        if (ret)
-               goto out_err;
+               return ret;
 
        for (i = 0; i < stream->outcnt; i++)
                SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
@@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
 handle_in:
        sctp_stream_interleave_init(stream);
        if (!incnt)
-               goto out;
-
-       ret = sctp_stream_alloc_in(stream, incnt, gfp);
-       if (ret)
-               goto in_err;
-
-       goto out;
+               return 0;
 
-in_err:
-       sched->free(stream);
-       genradix_free(&stream->in);
-out_err:
-       genradix_free(&stream->out);
-       stream->outcnt = 0;
-out:
-       return ret;
+       return sctp_stream_alloc_in(stream, incnt, gfp);
 }
 
 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
index 518b1b9..1ad565e 100644 (file)
@@ -160,7 +160,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
                if (!SCTP_SO(&asoc->stream, i)->ext)
                        continue;
 
-               ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
+               ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
                if (ret)
                        goto err;
        }
index 43509c7..f1c3b8e 100644 (file)
@@ -517,7 +517,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
        sk->sk_shutdown = 0;
        sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
-       sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+       sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        sk->sk_destruct = tipc_sock_destruct;
index fc513c1..b3e2a30 100644 (file)
@@ -1383,8 +1383,13 @@ static int tls_device_down(struct net_device *netdev)
                 * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
                 * Now release the ref taken above.
                 */
-               if (refcount_dec_and_test(&ctx->refcount))
+               if (refcount_dec_and_test(&ctx->refcount)) {
+                       /* sk_destruct ran after tls_device_down took a ref, and
+                        * it returned early. Complete the destruction here.
+                        */
+                       list_del(&ctx->list);
                        tls_device_free_ctx(ctx);
+               }
        }
 
        up_write(&device_offload_lock);
index 99a128a..4ce5d25 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
 #include <linux/clk.h>
-#include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/spinlock.h>
@@ -55,40 +54,8 @@ struct rk_i2s_dev {
        const struct rk_i2s_pins *pins;
        unsigned int bclk_ratio;
        spinlock_t lock; /* tx/rx lock */
-       struct pinctrl *pinctrl;
-       struct pinctrl_state *bclk_on;
-       struct pinctrl_state *bclk_off;
 };
 
-static int i2s_pinctrl_select_bclk_on(struct rk_i2s_dev *i2s)
-{
-       int ret = 0;
-
-       if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_on))
-               ret = pinctrl_select_state(i2s->pinctrl,
-                                    i2s->bclk_on);
-
-       if (ret)
-               dev_err(i2s->dev, "bclk enable failed %d\n", ret);
-
-       return ret;
-}
-
-static int i2s_pinctrl_select_bclk_off(struct rk_i2s_dev *i2s)
-{
-
-       int ret = 0;
-
-       if (!IS_ERR(i2s->pinctrl) && !IS_ERR_OR_NULL(i2s->bclk_off))
-               ret = pinctrl_select_state(i2s->pinctrl,
-                                    i2s->bclk_off);
-
-       if (ret)
-               dev_err(i2s->dev, "bclk disable failed %d\n", ret);
-
-       return ret;
-}
-
 static int i2s_runtime_suspend(struct device *dev)
 {
        struct rk_i2s_dev *i2s = dev_get_drvdata(dev);
@@ -125,49 +92,38 @@ static inline struct rk_i2s_dev *to_info(struct snd_soc_dai *dai)
        return snd_soc_dai_get_drvdata(dai);
 }
 
-static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
 {
        unsigned int val = 0;
        int retry = 10;
-       int ret = 0;
 
        spin_lock(&i2s->lock);
        if (on) {
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
-                               I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
+                                  I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_ENABLE);
 
-               ret = regmap_update_bits(i2s->regmap, I2S_XFER,
-                               I2S_XFER_TXS_START | I2S_XFER_RXS_START,
-                               I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_XFER,
+                                  I2S_XFER_TXS_START | I2S_XFER_RXS_START,
+                                  I2S_XFER_TXS_START | I2S_XFER_RXS_START);
 
                i2s->tx_start = true;
        } else {
                i2s->tx_start = false;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
-                               I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
-               if (ret < 0)
-                       goto end;
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
+                                  I2S_DMACR_TDE_ENABLE, I2S_DMACR_TDE_DISABLE);
 
                if (!i2s->rx_start) {
-                       ret = regmap_update_bits(i2s->regmap, I2S_XFER,
-                                       I2S_XFER_TXS_START |
-                                       I2S_XFER_RXS_START,
-                                       I2S_XFER_TXS_STOP |
-                                       I2S_XFER_RXS_STOP);
-                       if (ret < 0)
-                               goto end;
+                       regmap_update_bits(i2s->regmap, I2S_XFER,
+                                          I2S_XFER_TXS_START |
+                                          I2S_XFER_RXS_START,
+                                          I2S_XFER_TXS_STOP |
+                                          I2S_XFER_RXS_STOP);
 
                        udelay(150);
-                       ret = regmap_update_bits(i2s->regmap, I2S_CLR,
-                                       I2S_CLR_TXC | I2S_CLR_RXC,
-                                       I2S_CLR_TXC | I2S_CLR_RXC);
-                       if (ret < 0)
-                               goto end;
+                       regmap_update_bits(i2s->regmap, I2S_CLR,
+                                          I2S_CLR_TXC | I2S_CLR_RXC,
+                                          I2S_CLR_TXC | I2S_CLR_RXC);
 
                        regmap_read(i2s->regmap, I2S_CLR, &val);
 
@@ -182,57 +138,44 @@ static int rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
                        }
                }
        }
-end:
        spin_unlock(&i2s->lock);
-       if (ret < 0)
-               dev_err(i2s->dev, "lrclk update failed\n");
-
-       return ret;
 }
 
-static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
+static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
 {
        unsigned int val = 0;
        int retry = 10;
-       int ret = 0;
 
        spin_lock(&i2s->lock);
        if (on) {
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
                                   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_ENABLE);
-               if (ret < 0)
-                       goto end;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+               regmap_update_bits(i2s->regmap, I2S_XFER,
                                   I2S_XFER_TXS_START | I2S_XFER_RXS_START,
                                   I2S_XFER_TXS_START | I2S_XFER_RXS_START);
-               if (ret < 0)
-                       goto end;
 
                i2s->rx_start = true;
        } else {
                i2s->rx_start = false;
 
-               ret = regmap_update_bits(i2s->regmap, I2S_DMACR,
+               regmap_update_bits(i2s->regmap, I2S_DMACR,
                                   I2S_DMACR_RDE_ENABLE, I2S_DMACR_RDE_DISABLE);
-               if (ret < 0)
-                       goto end;
 
                if (!i2s->tx_start) {
-                       ret = regmap_update_bits(i2s->regmap, I2S_XFER,
+                       regmap_update_bits(i2s->regmap, I2S_XFER,
                                           I2S_XFER_TXS_START |
                                           I2S_XFER_RXS_START,
                                           I2S_XFER_TXS_STOP |
                                           I2S_XFER_RXS_STOP);
-                       if (ret < 0)
-                               goto end;
+
                        udelay(150);
-                       ret = regmap_update_bits(i2s->regmap, I2S_CLR,
+                       regmap_update_bits(i2s->regmap, I2S_CLR,
                                           I2S_CLR_TXC | I2S_CLR_RXC,
                                           I2S_CLR_TXC | I2S_CLR_RXC);
-                       if (ret < 0)
-                               goto end;
+
                        regmap_read(i2s->regmap, I2S_CLR, &val);
+
                        /* Should wait for clear operation to finish */
                        while (val) {
                                regmap_read(i2s->regmap, I2S_CLR, &val);
@@ -244,12 +187,7 @@ static int rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
                        }
                }
        }
-end:
        spin_unlock(&i2s->lock);
-       if (ret < 0)
-               dev_err(i2s->dev, "lrclk update failed\n");
-
-       return ret;
 }
 
 static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
@@ -487,26 +425,17 @@ static int rockchip_i2s_trigger(struct snd_pcm_substream *substream,
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-                       ret = rockchip_snd_rxctrl(i2s, 1);
+                       rockchip_snd_rxctrl(i2s, 1);
                else
-                       ret = rockchip_snd_txctrl(i2s, 1);
-               /* Do not turn on bclk if lrclk open fails. */
-               if (ret < 0)
-                       return ret;
-               i2s_pinctrl_select_bclk_on(i2s);
+                       rockchip_snd_txctrl(i2s, 1);
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
-                       if (!i2s->tx_start)
-                               i2s_pinctrl_select_bclk_off(i2s);
-                       ret = rockchip_snd_rxctrl(i2s, 0);
-               } else {
-                       if (!i2s->rx_start)
-                               i2s_pinctrl_select_bclk_off(i2s);
-                       ret = rockchip_snd_txctrl(i2s, 0);
-               }
+               if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+                       rockchip_snd_rxctrl(i2s, 0);
+               else
+                       rockchip_snd_txctrl(i2s, 0);
                break;
        default:
                ret = -EINVAL;
@@ -807,33 +736,6 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        }
 
        i2s->bclk_ratio = 64;
-       i2s->pinctrl = devm_pinctrl_get(&pdev->dev);
-       if (IS_ERR(i2s->pinctrl))
-               dev_err(&pdev->dev, "failed to find i2s pinctrl\n");
-
-       i2s->bclk_on = pinctrl_lookup_state(i2s->pinctrl,
-                                  "bclk_on");
-       if (IS_ERR_OR_NULL(i2s->bclk_on))
-               dev_err(&pdev->dev, "failed to find i2s default state\n");
-       else
-               dev_dbg(&pdev->dev, "find i2s bclk state\n");
-
-       i2s->bclk_off = pinctrl_lookup_state(i2s->pinctrl,
-                                 "bclk_off");
-       if (IS_ERR_OR_NULL(i2s->bclk_off))
-               dev_err(&pdev->dev, "failed to find i2s gpio state\n");
-       else
-               dev_dbg(&pdev->dev, "find i2s bclk_off state\n");
-
-       i2s_pinctrl_select_bclk_off(i2s);
-
-       i2s->playback_dma_data.addr = res->start + I2S_TXDR;
-       i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->playback_dma_data.maxburst = 4;
-
-       i2s->capture_dma_data.addr = res->start + I2S_RXDR;
-       i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       i2s->capture_dma_data.maxburst = 4;
 
        dev_set_drvdata(&pdev->dev, i2s);
 
index 0197042..1ecdb91 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 #ifndef _ASM_GENERIC_FCNTL_H
 #define _ASM_GENERIC_FCNTL_H
 
@@ -90,7 +91,7 @@
 
 /* a horrid kludge trying to make sure that this will fail on old kernels */
 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
+#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
 
 #ifndef O_NDELAY
 #define O_NDELAY       O_NONBLOCK
 #define F_GETSIG       11      /* for sockets. */
 #endif
 
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
 #ifndef F_GETLK64
 #define F_GETLK64      12      /*  using 'struct flock64' */
 #define F_SETLK64      13
 #define F_SETLKW64     14
 #endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
 
 #ifndef F_SETOWN_EX
 #define F_SETOWN_EX    15
@@ -178,6 +181,10 @@ struct f_owner_ex {
                                   blocking */
 #define LOCK_UN                8       /* remove lock */
 
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
 #define LOCK_MAND      32      /* This is a mandatory flock ... */
 #define LOCK_READ      64      /* which allows concurrent read operations */
 #define LOCK_WRITE     128     /* which allows concurrent write operations */
@@ -185,6 +192,7 @@ struct f_owner_ex {
 
 #define F_LINUX_SPECIFIC_BASE  1024
 
+#ifndef HAVE_ARCH_STRUCT_FLOCK
 struct flock {
        short   l_type;
        short   l_whence;
@@ -209,5 +217,6 @@ struct flock64 {
        __ARCH_FLOCK64_PAD
 #endif
 };
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
 
 #endif /* _ASM_GENERIC_FCNTL_H */
index 811897d..860f867 100644 (file)
@@ -2084,7 +2084,7 @@ struct kvm_stats_header {
 #define KVM_STATS_UNIT_SECONDS         (0x2 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_CYCLES          (0x3 << KVM_STATS_UNIT_SHIFT)
 #define KVM_STATS_UNIT_BOOLEAN         (0x4 << KVM_STATS_UNIT_SHIFT)
-#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_CYCLES
+#define KVM_STATS_UNIT_MAX             KVM_STATS_UNIT_BOOLEAN
 
 #define KVM_STATS_BASE_SHIFT           8
 #define KVM_STATS_BASE_MASK            (0xF << KVM_STATS_BASE_SHIFT)
index 71b3066..616ed40 100644 (file)
@@ -3,6 +3,6 @@
 TEST_PROGS := gpio-mockup.sh gpio-sim.sh
 TEST_FILES := gpio-mockup-sysfs.sh
 TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name
-CFLAGS += -O2 -g -Wall -I../../../../usr/include/
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
 
 include ../lib.mk
index 4158da0..2237d1a 100644 (file)
@@ -82,8 +82,9 @@ static int next_cpu(int cpu)
        return cpu;
 }
 
-static void *migration_worker(void *ign)
+static void *migration_worker(void *__rseq_tid)
 {
+       pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid;
        cpu_set_t allowed_mask;
        int r, i, cpu;
 
@@ -106,7 +107,7 @@ static void *migration_worker(void *ign)
                 * stable, i.e. while changing affinity is in-progress.
                 */
                smp_wmb();
-               r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask);
+               r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask);
                TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)",
                            errno, strerror(errno));
                smp_wmb();
@@ -231,7 +232,8 @@ int main(int argc, char *argv[])
        vm = vm_create_default(VCPU_ID, 0, guest_code);
        ucall_init(vm, NULL);
 
-       pthread_create(&migration_thread, NULL, migration_worker, 0);
+       pthread_create(&migration_thread, NULL, migration_worker,
+                      (void *)(unsigned long)gettid());
 
        for (i = 0; !done; i++) {
                vcpu_run(vm, VCPU_ID);