Merge tag 'timers-urgent-2020-03-29' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Mar 2020 17:36:29 +0000 (10:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 29 Mar 2020 17:36:29 +0000 (10:36 -0700)
Pull timer fix from Thomas Gleixner:
 "A single fix for the Hyper-V clocksource driver to make sched clock
  actually return nanoseconds and not the virtual clock value which
  increments at 10e7 HZ (100ns)"

* tag 'timers-urgent-2020-03-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  clocksource/drivers/hyper-v: Make sched clock return nanoseconds correctly

316 files changed:
.mailmap
Documentation/devicetree/bindings/crypto/allwinner,sun4i-a10-crypto.yaml
Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt
Documentation/devicetree/bindings/vendor-prefixes.yaml
Documentation/driver-api/dmaengine/provider.rst
Documentation/filesystems/zonefs.txt
Documentation/virt/kvm/amd-memory-encryption.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/dm8148-evm.dts
arch/arm/boot/dts/dm8148-t410.dts
arch/arm/boot/dts/dra62x-j5eco-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/exynos4412-galaxy-s3.dtsi
arch/arm/boot/dts/exynos4412-n710x.dts
arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
arch/arm/boot/dts/motorola-mapphone-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ox810se.dtsi
arch/arm/boot/dts/ox820.dtsi
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
arch/arm/boot/dts/sun8i-a83t.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a-rdb.dts
arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts
arch/arm64/boot/dts/sprd/sc9863a.dtsi
arch/arm64/crypto/chacha-neon-glue.c
arch/arm64/include/asm/alternative.h
arch/parisc/Kconfig
arch/parisc/Makefile
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/kasan/kasan_init_32.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/configs/defconfig
arch/riscv/configs/rv32_defconfig
arch/riscv/include/asm/clint.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/smp.c
arch/riscv/lib/Makefile
arch/x86/kvm/lapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/ioremap.c
arch/x86/net/bpf_jit_comp32.c
drivers/acpi/apei/ghes.c
drivers/bus/sunxi-rsb.c
drivers/bus/ti-sysc.c
drivers/clk/imx/clk-imx8mp.c
drivers/clk/imx/clk-scu.c
drivers/clk/ti/clk-43xx.c
drivers/dma/dmaengine.c
drivers/dma/idxd/cdev.c
drivers/dma/ti/k3-udma-glue.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-nvidia-gpu.c
drivers/i2c/busses/i2c-pca-platform.c
drivers/i2c/busses/i2c-st.c
drivers/infiniband/core/device.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/input/input.c
drivers/input/keyboard/tm2-touchkey.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_f11.c
drivers/input/touchscreen/raydium_i2c_ts.c
drivers/net/Kconfig
drivers/net/caif/caif_spi.c
drivers/net/can/slcan.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/neterion/vxge/vxge-config.h
drivers/net/ethernet/neterion/vxge/vxge-main.h
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
drivers/net/ethernet/pensando/ionic/ionic_if.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_regs.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/geneve.c
drivers/net/ifb.c
drivers/net/macsec.c
drivers/net/netdevsim/ipsec.c
drivers/net/phy/dp83867.c
drivers/net/phy/mdio-bcm-unimac.c
drivers/net/phy/mdio-mux-bcm-iproc.c
drivers/net/phy/sfp-bus.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/net/wireguard/device.c
drivers/net/wireguard/netlink.c
drivers/net/wireguard/noise.c
drivers/net/wireguard/noise.h
drivers/net/wireguard/peer.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
drivers/net/wireless/ti/wlcore/main.c
drivers/nfc/fdp/fdp.c
drivers/nvme/host/rdma.c
drivers/nvme/target/tcp.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/sd.c
drivers/soc/fsl/dpio/dpio-driver.c
drivers/soc/samsung/exynos-chipid.c
drivers/tee/amdtee/core.c
fs/afs/cmservice.c
fs/afs/fs_probe.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/btrfs/block-group.c
fs/btrfs/inode.c
fs/ceph/file.c
fs/ceph/snap.c
fs/eventpoll.c
fs/file.c
fs/io_uring.c
fs/zonefs/super.c
include/linux/bpf.h
include/linux/ceph/messenger.h
include/linux/ceph/osdmap.h
include/linux/ceph/rados.h
include/linux/clk-provider.h
include/linux/dsa/8021q.h
include/linux/file.h
include/linux/i2c.h
include/linux/ieee80211.h
include/linux/netlink.h
include/linux/page-flags.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/vmalloc.h
include/net/af_rxrpc.h
include/net/sch_generic.h
include/trace/events/afs.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/serio.h
kernel/bpf/bpf_struct_ops.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/syscall.c
kernel/irq/manage.c
kernel/notifier.c
kernel/trace/bpf_trace.c
lib/crypto/chacha20poly1305-selftest.c
mm/madvise.c
mm/memcontrol.c
mm/mmu_notifier.c
mm/nommu.c
mm/slub.c
mm/sparse.c
mm/vmalloc.c
net/Kconfig
net/bpfilter/main.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/pktgen.c
net/core/sock_map.c
net/dsa/tag_8021q.c
net/dsa/tag_brcm.c
net/dsa/tag_sja1105.c
net/ethtool/debug.c
net/ethtool/linkinfo.c
net/ethtool/linkmodes.c
net/ethtool/netlink.c
net/ethtool/wol.c
net/hsr/hsr_framereg.c
net/hsr/hsr_netlink.c
net/hsr/hsr_slave.c
net/ipv4/Kconfig
net/ipv4/bpf_tcp_ca.c
net/ipv4/fib_frontend.c
net/ipv4/ip_gre.c
net/ipv4/ip_vti.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/ipv6/ip6_vti.c
net/ipv6/xfrm6_tunnel.c
net/mac80211/debugfs_sta.c
net/mac80211/key.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/tx.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_ip.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_fwd_netdev.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/packet/internal.h
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/input.c
net/rxrpc/sendmsg.c
net/sched/act_ct.c
net/sched/act_mirred.c
net/sched/cls_route.c
net/sched/cls_tcindex.c
net/sched/sch_cbs.c
net/socket.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/dtc/dtc-lexer.l
scripts/parse-maintainers.pl
tools/include/uapi/linux/in.h
tools/perf/Makefile
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/probe-file.c
tools/perf/util/probe-finder.c
tools/perf/util/setup.py
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.c
tools/scripts/Makefile.include
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_send_signal_kern.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/verifier/jmp32.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/forwarding/Makefile [new file with mode: 0644]
tools/testing/selftests/net/forwarding/ethtool_lib.sh [changed mode: 0755->0644]
tools/testing/selftests/net/reuseport_addr_any.c
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/config
tools/testing/selftests/netfilter/nf-queue.c [new file with mode: 0644]
tools/testing/selftests/netfilter/nft_queue.sh [new file with mode: 0755]
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/Makefile
tools/testing/selftests/wireguard/qemu/init.c
tools/testing/selftests/wireguard/qemu/kernel.config

index ffb8f28..a0dfce8 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -225,6 +225,7 @@ Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
+Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
 Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
 Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
 Rajesh Shah <rajesh.shah@intel.com>
index 33c7842..8b9a8f3 100644 (file)
@@ -23,6 +23,8 @@ properties:
       - items:
         - const: allwinner,sun7i-a20-crypto
         - const: allwinner,sun4i-a10-crypto
+      - items:
+        - const: allwinner,sun8i-a33-crypto
 
   reg:
     maxItems: 1
index ef2ae72..921172f 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
     * "cypress,tm2-touchkey" - for the touchkey found on the tm2 board
     * "cypress,midas-touchkey" - for the touchkey found on midas boards
     * "cypress,aries-touchkey" - for the touchkey found on aries boards
+    * "coreriver,tc360-touchkey" - for the Coreriver TouchCore 360 touchkey
 - reg: I2C address of the chip.
 - interrupts: interrupt to which the chip is connected (see interrupt
        binding[0]).
index 9e67944..b3c8c62 100644 (file)
@@ -205,6 +205,8 @@ patternProperties:
     description: Colorful GRP, Shenzhen Xueyushi Technology Ltd.
   "^compulab,.*":
     description: CompuLab Ltd.
+  "^coreriver,.*":
+    description: CORERIVER Semiconductor Co.,Ltd.
   "^corpro,.*":
     description: Chengdu Corpro Technology Co., Ltd.
   "^cortina,.*":
index 790a150..56e5833 100644 (file)
@@ -266,11 +266,15 @@ to use.
   attached (via the dmaengine_desc_attach_metadata() helper to the descriptor.
 
   From the DMA driver the following is expected for this mode:
+
   - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM
+
     The data from the provided metadata buffer should be prepared for the DMA
     controller to be sent alongside of the payload data. Either by copying to a
     hardware descriptor, or highly coupled packet.
+
   - DMA_DEV_TO_MEM
+
     On transfer completion the DMA driver must copy the metadata to the client
     provided metadata buffer before notifying the client about the completion.
     After the transfer completion, DMA drivers must not touch the metadata
@@ -284,10 +288,14 @@ to use.
   and dmaengine_desc_set_metadata_len() is provided as helper functions.
 
   From the DMA driver the following is expected for this mode:
-  - get_metadata_ptr
+
+  - get_metadata_ptr()
+
     Should return a pointer for the metadata buffer, the maximum size of the
     metadata buffer and the currently used / valid (if any) bytes in the buffer.
-  - set_metadata_len
+
+  - set_metadata_len()
+
     It is called by the clients after it have placed the metadata to the buffer
     to let the DMA driver know the number of valid bytes provided.
 
index d54fa98..78813c3 100644 (file)
@@ -258,11 +258,11 @@ conditions.
     |    option    | condition | size     read    write    read    write |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     no       yes     yes   |
-    | remount-ro   | read-only | fixed    yes     no       yes     no    |
+    | remount-ro   | read-only | as is    yes     no       yes     no    |
     | (default)    | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     no       yes     yes   |
-    | zone-ro      | read-only | fixed    yes     no       yes     no    |
+    | zone-ro      | read-only | as is    yes     no       yes     no    |
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      |   0      no      no       yes     yes   |
@@ -270,7 +270,7 @@ conditions.
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
     |              | good      | fixed    yes     yes      yes     yes   |
-    | repair       | read-only | fixed    yes     no       yes     no    |
+    | repair       | read-only | as is    yes     no       yes     no    |
     |              | offline   |   0      no      no       no      no    |
     +--------------+-----------+-----------------------------------------+
 
@@ -307,8 +307,16 @@ condition changes. The defined behaviors are as follow:
 * zone-offline
 * repair
 
-The I/O error actions defined for each behavior are detailed in the previous
-section.
+The run-time I/O error actions defined for each behavior are detailed in the
+previous section. Mount time I/O errors will cause the mount operation to fail.
+The handling of read-only zones also differs between mount-time and run-time.
+If a read-only zone is found at mount time, the zone is always treated in the
+same manner as offline zones, that is, all accesses are disabled and the zone
+file size set to 0. This is necessary as the write pointer of read-only zones
+is defined as invalib by the ZBC and ZAC standards, making it impossible to
+discover the amount of data that has been written to the zone. In the case of a
+read-only zone discovered at run-time, as indicated in the previous section.
+the size of the zone file is left unchanged from its last updated value.
 
 Zonefs User Space Tools
 =======================
index d18c97b..c3129b9 100644 (file)
@@ -53,6 +53,29 @@ key management interface to perform common hypervisor activities such as
 encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
 information, see the SEV Key Management spec [api-spec]_
 
+The main ioctl to access SEV is KVM_MEM_ENCRYPT_OP.  If the argument
+to KVM_MEM_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
+and ``ENOTTY` if it is disabled (on some older versions of Linux,
+the ioctl runs normally even with a NULL argument, and therefore will
+likely return ``EFAULT``).  If non-NULL, the argument to KVM_MEM_ENCRYPT_OP
+must be a struct kvm_sev_cmd::
+
+       struct kvm_sev_cmd {
+               __u32 id;
+               __u64 data;
+               __u32 error;
+               __u32 sev_fd;
+       };
+
+
+The ``id`` field contains the subcommand, and the ``data`` field points to
+another struct containing arguments specific to command.  The ``sev_fd``
+should point to a file descriptor that is opened on the ``/dev/sev``
+device, if needed (see individual commands).
+
+On output, ``error`` is zero on success, or an error code.  Error codes
+are defined in ``<linux/psp-dev.h>`.
+
 KVM implements the following commands to support common lifecycle events of SEV
 guests, such as launching, running, snapshotting, migrating and decommissioning.
 
@@ -90,6 +113,8 @@ Returns: 0 on success, -negative on error
 
 On success, the 'handle' field contains a new handle and on error, a negative value.
 
+KVM_SEV_LAUNCH_START requires the ``sev_fd`` field to be valid.
+
 For more details, see SEV spec Section 6.2.
 
 3. KVM_SEV_LAUNCH_UPDATE_DATA
index cc1d18c..5a5332b 100644 (file)
@@ -7516,6 +7516,12 @@ F:       include/uapi/linux/if_hippi.h
 F:     net/802/hippi.c
 F:     drivers/net/hippi/
 
+HISILICON DMA DRIVER
+M:     Zhou Wang <wangzhou1@hisilicon.com>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     drivers/dma/hisi_dma.c
+
 HISILICON SECURITY ENGINE V2 DRIVER (SEC2)
 M:     Zaibo Xu <xuzaibo@huawei.com>
 L:     linux-crypto@vger.kernel.org
@@ -7573,7 +7579,8 @@ F:        Documentation/admin-guide/perf/hisi-pmu.rst
 
 HISILICON ROCE DRIVER
 M:     Lijun Ou <oulijun@huawei.com>
-M:     Wei Hu(Xavier) <xavier.huwei@huawei.com>
+M:     Wei Hu(Xavier) <huwei87@hisilicon.com>
+M:     Weihang Li <liweihang@huawei.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/hns/
@@ -8476,7 +8483,6 @@ L:        dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/idxd/*
 F:     include/uapi/linux/idxd.h
-F:     include/linux/idxd.h
 
 INTEL IDLE DRIVER
 M:     Jacob Pan <jacob.jun.pan@linux.intel.com>
@@ -8683,7 +8689,7 @@ M:        Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 M:     Luca Coelho <luciano.coelho@intel.com>
 M:     Intel Linux Wireless <linuxwifi@intel.com>
 L:     linux-wireless@vger.kernel.org
-W:     http://intellinuxwireless.org
+W:     https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
 S:     Supported
 F:     drivers/net/wireless/intel/iwlwifi/
@@ -15416,11 +15422,9 @@ F:     drivers/infiniband/sw/siw/
 F:     include/uapi/rdma/siw-abi.h
 
 SOFT-ROCE DRIVER (rxe)
-M:     Moni Shoua <monis@mellanox.com>
+M:     Zhu Yanjun <yanjunz@mellanox.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
-W:     https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
-Q:     http://patchwork.kernel.org/project/linux-rdma/list/
 F:     drivers/infiniband/sw/rxe/
 F:     include/uapi/rdma/rdma_user_rxe.h
 
@@ -16750,7 +16754,7 @@ Q:      http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/platform/ti-vpe/
 F:     Documentation/devicetree/bindings/media/ti,vpe.yaml
-       Documentation/devicetree/bindings/media/ti,cal.yaml
+F:     Documentation/devicetree/bindings/media/ti,cal.yaml
 
 TI WILINK WIRELESS DRIVERS
 L:     linux-wireless@vger.kernel.org
index 16d8271..e56bf7e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index b75af21..4c3f606 100644 (file)
 &sdhci {
        #address-cells = <1>;
        #size-cells = <0>;
+       pinctrl-names = "default";
        pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
        bus-width = <4>;
        mmc-pwrseq = <&wifi_pwrseq>;
index 394c8a7..fd2c766 100644 (file)
@@ -15,6 +15,7 @@
                firmware: firmware {
                        compatible = "raspberrypi,bcm2835-firmware", "simple-bus";
                        mboxes = <&mailbox>;
+                       dma-ranges;
                };
 
                power: power {
index 3931fb0..91d1018 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 9e43d5e..79ccdd4 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 861ab90..c16e183 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&ethphy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &cpsw_emac1 {
        phy-handle = <&ethphy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
 };
 
 &davinci_mdio {
index 4305051..5f5ee16 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                ranges = <0x0 0x0 0x0 0xc0000000>;
+               dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
                ti,hwmods = "l3_main_1", "l3_main_2";
                reg = <0x0 0x44000000 0x0 0x1000000>,
                      <0x0 0x45000000 0x0 0x1000>;
index 31719c0..44f9754 100644 (file)
@@ -33,7 +33,7 @@
                };
        };
 
-       lcd_vdd3_reg: voltage-regulator-6 {
+       lcd_vdd3_reg: voltage-regulator-7 {
                compatible = "regulator-fixed";
                regulator-name = "LCD_VDD_2.2V";
                regulator-min-microvolt = <2200000>;
@@ -42,7 +42,7 @@
                enable-active-high;
        };
 
-       ps_als_reg: voltage-regulator-7 {
+       ps_als_reg: voltage-regulator-8 {
                compatible = "regulator-fixed";
                regulator-name = "LED_A_3.0V";
                regulator-min-microvolt = <3000000>;
index 98cd128..4189e1f 100644 (file)
@@ -13,7 +13,7 @@
 
        /* bootargs are passed in by bootloader */
 
-       cam_vdda_reg: voltage-regulator-6 {
+       cam_vdda_reg: voltage-regulator-7 {
                compatible = "regulator-fixed";
                regulator-name = "CAM_SENSOR_CORE_1.2V";
                regulator-min-microvolt = <1200000>;
index 4d18952..77d8713 100644 (file)
                regulators {
                        vdd_arm: buck1 {
                                regulator-name = "vdd_arm";
-                               regulator-min-microvolt = <730000>;
+                               regulator-min-microvolt = <925000>;
                                regulator-max-microvolt = <1380000>;
                                regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-always-on;
 
                        vdd_soc: buck2 {
                                regulator-name = "vdd_soc";
-                               regulator-min-microvolt = <730000>;
+                               regulator-min-microvolt = <1150000>;
                                regulator-max-microvolt = <1380000>;
                                regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-always-on;
index b6e82b1..9067e0e 100644 (file)
                reset-gpios = <&gpio6 13 GPIO_ACTIVE_HIGH>; /* gpio173 */
 
                /* gpio_183 with sys_nirq2 pad as wakeup */
-               interrupts-extended = <&gpio6 23 IRQ_TYPE_EDGE_FALLING>,
+               interrupts-extended = <&gpio6 23 IRQ_TYPE_LEVEL_LOW>,
                                      <&omap4_pmx_core 0x160>;
                interrupt-names = "irq", "wakeup";
                wakeup-source;
index c3c6d7d..4089d97 100644 (file)
                compatible = "ti,omap2-onenand";
                reg = <0 0 0x20000>;    /* CS0, offset 0, IO size 128K */
 
+               /*
+                * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+                * bootloader set values when booted with v5.1
+                * (OneNAND Manufacturer: Samsung):
+                *
+                *   cs0 GPMC_CS_CONFIG1: 0xfb001202
+                *   cs0 GPMC_CS_CONFIG2: 0x00111100
+                *   cs0 GPMC_CS_CONFIG3: 0x00020200
+                *   cs0 GPMC_CS_CONFIG4: 0x11001102
+                *   cs0 GPMC_CS_CONFIG5: 0x03101616
+                *   cs0 GPMC_CS_CONFIG6: 0x90060000
+                */
                gpmc,sync-read;
                gpmc,sync-write;
                gpmc,burst-length = <16>;
                gpmc,burst-read;
                gpmc,burst-wrap;
                gpmc,burst-write;
-               gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
-               gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
+               gpmc,device-width = <2>;
+               gpmc,mux-add-data = <2>;
                gpmc,cs-on-ns = <0>;
-               gpmc,cs-rd-off-ns = <87>;
-               gpmc,cs-wr-off-ns = <87>;
+               gpmc,cs-rd-off-ns = <102>;
+               gpmc,cs-wr-off-ns = <102>;
                gpmc,adv-on-ns = <0>;
-               gpmc,adv-rd-off-ns = <10>;
-               gpmc,adv-wr-off-ns = <10>;
-               gpmc,oe-on-ns = <15>;
-               gpmc,oe-off-ns = <87>;
+               gpmc,adv-rd-off-ns = <12>;
+               gpmc,adv-wr-off-ns = <12>;
+               gpmc,oe-on-ns = <12>;
+               gpmc,oe-off-ns = <102>;
                gpmc,we-on-ns = <0>;
-               gpmc,we-off-ns = <87>;
-               gpmc,rd-cycle-ns = <112>;
-               gpmc,wr-cycle-ns = <112>;
-               gpmc,access-ns = <81>;
-               gpmc,page-burst-access-ns = <15>;
+               gpmc,we-off-ns = <102>;
+               gpmc,rd-cycle-ns = <132>;
+               gpmc,wr-cycle-ns = <132>;
+               gpmc,access-ns = <96>;
+               gpmc,page-burst-access-ns = <18>;
                gpmc,bus-turnaround-ns = <0>;
                gpmc,cycle2cycle-delay-ns = <0>;
                gpmc,wait-monitoring-ns = <0>;
-               gpmc,clk-activation-ns = <5>;
-               gpmc,wr-data-mux-bus-ns = <30>;
-               gpmc,wr-access-ns = <81>;
+               gpmc,clk-activation-ns = <6>;
+               gpmc,wr-data-mux-bus-ns = <36>;
+               gpmc,wr-access-ns = <96>;
                gpmc,sync-clk-ps = <15000>;
 
                /*
index d0ecf54..a7562d3 100644 (file)
                #address-cells = <1>;
                #size-cells = <1>;
                ranges = <0 0 0 0xc0000000>;
+               dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
                ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
                reg = <0 0x44000000 0 0x2000>,
                      <0 0x44800000 0 0x3000>,
index 9f6c2b6..0755e58 100644 (file)
                                        interrupt-controller;
                                        reg = <0 0x200>;
                                        #interrupt-cells = <1>;
-                                       valid-mask = <0xFFFFFFFF>;
-                                       clear-mask = <0>;
+                                       valid-mask = <0xffffffff>;
+                                       clear-mask = <0xffffffff>;
                                };
 
                                timer0: timer@200 {
index c9b3277..90846a7 100644 (file)
                                        reg = <0 0x200>;
                                        interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                                        #interrupt-cells = <1>;
-                                       valid-mask = <0xFFFFFFFF>;
-                                       clear-mask = <0>;
+                                       valid-mask = <0xffffffff>;
+                                       clear-mask = <0xffffffff>;
                                };
 
                                timer0: timer@200 {
index 1532a0e..a2c37ad 100644 (file)
                };
 
                crypto: crypto-engine@1c15000 {
-                       compatible = "allwinner,sun4i-a10-crypto";
+                       compatible = "allwinner,sun8i-a33-crypto";
                        reg = <0x01c15000 0x1000>;
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_BUS_SS>, <&ccu CLK_SS>;
index 2fd31a0..e8b3669 100644 (file)
 };
 
 &reg_dldo3 {
-       regulator-min-microvolt = <2800000>;
-       regulator-max-microvolt = <2800000>;
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
        regulator-name = "vdd-csi";
 };
 
 };
 
 &usbphy {
-       usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
+       usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */
+       usb0_vbus_power-supply = <&usb_power_supply>;
        usb0_vbus-supply = <&reg_drivevbus>;
        usb1_vbus-supply = <&reg_vmain>;
        usb2_vbus-supply = <&reg_vmain>;
index 74ac7ee..e7b9bef 100644 (file)
                        reg = <0x01c30000 0x104>;
                        interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "macirq";
-                       resets = <&ccu CLK_BUS_EMAC>;
-                       reset-names = "stmmaceth";
-                       clocks = <&ccu RST_BUS_EMAC>;
+                       clocks = <&ccu CLK_BUS_EMAC>;
                        clock-names = "stmmaceth";
+                       resets = <&ccu RST_BUS_EMAC>;
+                       reset-names = "stmmaceth";
                        status = "disabled";
 
                        mdio: mdio {
index 8f09a24..a9d5d6d 100644 (file)
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
                };
 
+               spi0: spi@1c05000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c05000 0x1000>;
+                       interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI0>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               spi1: spi@1c06000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c06000 0x1000>;
+                       interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI1>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                csi0: csi@1c09000 {
                        compatible = "allwinner,sun8i-r40-csi0",
                                     "allwinner,sun7i-a20-csi0";
                        resets = <&ccu RST_BUS_CE>;
                };
 
+               spi2: spi@1c17000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c17000 0x1000>;
+                       interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI2>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
+               ahci: sata@1c18000 {
+                       compatible = "allwinner,sun8i-r40-ahci";
+                       reg = <0x01c18000 0x1000>;
+                       interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
+                       resets = <&ccu RST_BUS_SATA>;
+                       reset-names = "ahci";
+                       status = "disabled";
+               };
+
                ehci1: usb@1c19000 {
                        compatible = "allwinner,sun8i-r40-ehci", "generic-ehci";
                        reg = <0x01c19000 0x100>;
                        status = "disabled";
                };
 
+               spi3: spi@1c1f000 {
+                       compatible = "allwinner,sun8i-r40-spi",
+                                    "allwinner,sun8i-h3-spi";
+                       reg = <0x01c1f000 0x1000>;
+                       interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>;
+                       clock-names = "ahb", "mod";
+                       resets = <&ccu RST_BUS_SPI3>;
+                       status = "disabled";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+               };
+
                ccu: clock@1c20000 {
                        compatible = "allwinner,sun8i-r40-ccu";
                        reg = <0x01c20000 0x400>;
                        #size-cells = <0>;
                };
 
-               spi0: spi@1c05000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c05000 0x1000>;
-                       interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI0>, <&ccu CLK_SPI0>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI0>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi1: spi@1c06000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c06000 0x1000>;
-                       interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI1>, <&ccu CLK_SPI1>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI1>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi2: spi@1c07000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c07000 0x1000>;
-                       interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI2>, <&ccu CLK_SPI2>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI2>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               spi3: spi@1c0f000 {
-                       compatible = "allwinner,sun8i-r40-spi",
-                                    "allwinner,sun8i-h3-spi";
-                       reg = <0x01c0f000 0x1000>;
-                       interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SPI3>, <&ccu CLK_SPI3>;
-                       clock-names = "ahb", "mod";
-                       resets = <&ccu RST_BUS_SPI3>;
-                       status = "disabled";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-               };
-
-               ahci: sata@1c18000 {
-                       compatible = "allwinner,sun8i-r40-ahci";
-                       reg = <0x01c18000 0x1000>;
-                       interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_BUS_SATA>, <&ccu CLK_SATA>;
-                       resets = <&ccu RST_BUS_SATA>;
-                       reset-names = "ahci";
-                       status = "disabled";
-
-               };
-
                gmac: ethernet@1c50000 {
                        compatible = "allwinner,sun8i-r40-gmac";
                        syscon = <&ccu>;
index 0bf375e..55b71bb 100644 (file)
@@ -53,7 +53,7 @@
                 * PSCI node is not added default, U-boot will add missing
                 * parts if it determines to use PSCI.
                 */
-               entry-method = "arm,psci";
+               entry-method = "psci";
 
                CPU_PW20: cpu-pw20 {
                          compatible = "arm,idle-state";
index 4223a23..dde50c8 100644 (file)
 
        ethernet@e4000 {
                phy-handle = <&rgmii_phy1>;
-               phy-connection-type = "rgmii-txid";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e6000 {
                phy-handle = <&rgmii_phy2>;
-               phy-connection-type = "rgmii-txid";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e8000 {
index dbc23d6..d53ccc5 100644 (file)
 &fman0 {
        ethernet@e4000 {
                phy-handle = <&rgmii_phy1>;
-               phy-connection-type = "rgmii";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e6000 {
                phy-handle = <&rgmii_phy2>;
-               phy-connection-type = "rgmii";
+               phy-connection-type = "rgmii-id";
        };
 
        ethernet@e8000 {
index cd80756..2c590ca 100644 (file)
        };
 
        idle-states {
-               entry-method = "arm,psci";
+               entry-method = "psci";
                CORE_PD: core-pd {
                        compatible = "arm,idle-state";
                        entry-latency-us = <4000>;
index c1f9660..37ca3e8 100644 (file)
@@ -55,10 +55,10 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
                        break;
                }
                chacha_4block_xor_neon(state, dst, src, nrounds, l);
-               bytes -= CHACHA_BLOCK_SIZE * 5;
-               src += CHACHA_BLOCK_SIZE * 5;
-               dst += CHACHA_BLOCK_SIZE * 5;
-               state[12] += 5;
+               bytes -= l;
+               src += l;
+               dst += l;
+               state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE);
        }
 }
 
index 324e7d5..5e5dc05 100644 (file)
@@ -221,7 +221,7 @@ alternative_endif
 
 .macro user_alt, label, oldinstr, newinstr, cond
 9999:  alternative_insn "\oldinstr", "\newinstr", \cond
-       _ASM_EXTABLE 9999b, \label
+       _asm_extable 9999b, \label
 .endm
 
 /*
index 71034b5..3801a2e 100644 (file)
@@ -79,6 +79,11 @@ config MMU
 config STACK_GROWSUP
        def_bool y
 
+config ARCH_DEFCONFIG
+       string
+       default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
+       default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
+
 config GENERIC_LOCKBREAK
        bool
        default y
index dca8f2d..628cd8b 100644 (file)
@@ -34,6 +34,13 @@ CC_ARCHES    = hppa hppa2.0 hppa1.1
 LD_BFD         := elf32-hppa-linux
 endif
 
+# select defconfig based on actual architecture
+ifeq ($(shell uname -m),parisc64)
+       KBUILD_DEFCONFIG := generic-64bit_defconfig
+else
+       KBUILD_DEFCONFIG := generic-32bit_defconfig
+endif
+
 export LD_BFD
 
 ifneq ($(SUBARCH),$(UTS_MACHINE))
index 729a0f1..db3a873 100644 (file)
@@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
+       kvmppc_mmu_destroy_pr(vcpu);
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
index 1af96fb..302e9dc 100644 (file)
@@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        return 0;
 
 out_vcpu_uninit:
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
        return err;
 }
@@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
        kvmppc_core_vcpu_free(vcpu);
 
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
index db5664d..d2bed3f 100644 (file)
@@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
        unsigned long k_cur;
        phys_addr_t pa = __pa(kasan_early_shadow_page);
 
-       if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
-               int ret = kasan_init_shadow_page_tables(k_start, k_end);
-
-               if (ret)
-                       panic("kasan: kasan_init_shadow_page_tables() failed");
-       }
        for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
                pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
                pte_t *ptep = pte_offset_kernel(pmd, k_cur);
@@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
        int ret;
        struct memblock_region *reg;
 
-       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+           IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
                ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
                if (ret)
index 1a3b5a5..cd5db57 100644 (file)
@@ -50,7 +50,6 @@ config RISCV
        select PCI_DOMAINS_GENERIC if PCI
        select PCI_MSI if PCI
        select RISCV_TIMER
-       select UACCESS_MEMCPY if !MMU
        select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_ARCH_TOPOLOGY if SMP
        select ARCH_HAS_PTE_SPECIAL
index 3078b2d..a131174 100644 (file)
@@ -12,20 +12,6 @@ config SOC_SIFIVE
 
 config SOC_VIRT
        bool "QEMU Virt Machine"
-       select VIRTIO_PCI
-       select VIRTIO_BALLOON
-       select VIRTIO_MMIO
-       select VIRTIO_CONSOLE
-       select VIRTIO_NET
-       select NET_9P_VIRTIO
-       select VIRTIO_BLK
-       select SCSI_VIRTIO
-       select DRM_VIRTIO_GPU
-       select HW_RANDOM_VIRTIO
-       select RPMSG_CHAR
-       select RPMSG_VIRTIO
-       select CRYPTO_DEV_VIRTIO
-       select VIRTIO_INPUT
        select POWER_RESET_SYSCON
        select POWER_RESET_SYSCON_POWEROFF
        select GOLDFISH
index c8f0842..2557c53 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETLINK_DIAG=y
 CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
@@ -38,12 +39,15 @@ CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
 CONFIG_MACB=y
 CONFIG_E1000E=y
 CONFIG_R8169=y
@@ -54,13 +58,16 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_SPI=y
 CONFIG_SPI_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
@@ -74,6 +81,12 @@ CONFIG_USB_UAS=y
 CONFIG_MMC=y
 CONFIG_MMC_SPI=y
 CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -88,16 +101,17 @@ CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_9P_FS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_VM=y
 CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
index a844920..0292879 100644 (file)
@@ -31,6 +31,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_IP_PNP_RARP=y
 CONFIG_NETLINK_DIAG=y
 CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
@@ -38,12 +39,15 @@ CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
 CONFIG_MACB=y
 CONFIG_E1000E=y
 CONFIG_R8169=y
@@ -54,11 +58,14 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
@@ -70,6 +77,12 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -84,16 +97,17 @@ CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
 CONFIG_9P_FS=y
 CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_VM=y
 CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_SOFTLOCKUP_DETECTOR=y
 CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHED_STACK_END_CHECK=y
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
index 6eaa2ee..a279b17 100644 (file)
@@ -15,12 +15,12 @@ static inline void clint_send_ipi_single(unsigned long hartid)
        writel(1, clint_ipi_base + hartid);
 }
 
-static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask)
+static inline void clint_send_ipi_mask(const struct cpumask *mask)
 {
-       int hartid;
+       int cpu;
 
-       for_each_cpu(hartid, hartid_mask)
-               clint_send_ipi_single(hartid);
+       for_each_cpu(cpu, mask)
+               clint_send_ipi_single(cpuid_to_hartid_map(cpu));
 }
 
 static inline void clint_clear_ipi(unsigned long hartid)
index e430415..393f201 100644 (file)
 #include <asm/tlbflush.h>
 #include <linux/mm_types.h>
 
+#ifdef CONFIG_MMU
+
+#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END      (PAGE_OFFSET - 1)
+#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE    (SZ_128M)
+#define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END     (VMALLOC_END)
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END    (VMALLOC_START - 1)
+#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap                ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE      SZ_16M
+#define PCI_IO_END       VMEMMAP_START
+#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP      PCI_IO_START
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE     PMD_SIZE
+#else
+#define FIXADDR_SIZE     PGDIR_SIZE
+#endif
+#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
 #ifdef CONFIG_64BIT
 #include <asm/pgtable-64.h>
 #else
@@ -90,31 +131,6 @@ extern pgd_t swapper_pg_dir[];
 #define __S110 PAGE_SHARED_EXEC
 #define __S111 PAGE_SHARED_EXEC
 
-#define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END      (PAGE_OFFSET - 1)
-#define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
-
-#define BPF_JIT_REGION_SIZE    (SZ_128M)
-#define BPF_JIT_REGION_START   (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
-#define BPF_JIT_REGION_END     (VMALLOC_END)
-
-/*
- * Roughly size the vmemmap space to be large enough to fit enough
- * struct pages to map half the virtual address space. Then
- * position vmemmap directly below the VMALLOC region.
- */
-#define VMEMMAP_SHIFT \
-       (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
-#define VMEMMAP_SIZE   BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END    (VMALLOC_START - 1)
-#define VMEMMAP_START  (VMALLOC_START - VMEMMAP_SIZE)
-
-/*
- * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
- * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
- */
-#define vmemmap                ((struct page *)VMEMMAP_START)
-
 static inline int pmd_present(pmd_t pmd)
 {
        return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
@@ -432,18 +448,6 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#define PCI_IO_SIZE      SZ_16M
-#define PCI_IO_END       VMEMMAP_START
-#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
-
-#define FIXADDR_TOP      PCI_IO_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE     PMD_SIZE
-#else
-#define FIXADDR_SIZE     PGDIR_SIZE
-#endif
-#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
-
 /*
  * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
index f462a18..8ce9d60 100644 (file)
 /*
  * User space memory access functions
  */
+
+extern unsigned long __must_check __asm_copy_to_user(void __user *to,
+       const void *from, unsigned long n);
+extern unsigned long __must_check __asm_copy_from_user(void *to,
+       const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       return __asm_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       return __asm_copy_to_user(to, from, n);
+}
+
 #ifdef CONFIG_MMU
 #include <linux/errno.h>
 #include <linux/compiler.h>
@@ -367,24 +385,6 @@ do {                                                               \
                -EFAULT;                                        \
 })
 
-
-extern unsigned long __must_check __asm_copy_to_user(void __user *to,
-       const void *from, unsigned long n);
-extern unsigned long __must_check __asm_copy_from_user(void *to,
-       const void __user *from, unsigned long n);
-
-static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       return __asm_copy_from_user(to, from, n);
-}
-
-static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       return __asm_copy_to_user(to, from, n);
-}
-
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern long __must_check strlen_user(const char __user *str);
index eb878ab..e0a6293 100644 (file)
@@ -96,7 +96,7 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
        if (IS_ENABLED(CONFIG_RISCV_SBI))
                sbi_send_ipi(cpumask_bits(&hartid_mask));
        else
-               clint_send_ipi_mask(&hartid_mask);
+               clint_send_ipi_mask(mask);
 }
 
 static void send_ipi_single(int cpu, enum ipi_message_type op)
index 47e7a82..0d0db80 100644 (file)
@@ -2,5 +2,5 @@
 lib-y                  += delay.o
 lib-y                  += memcpy.o
 lib-y                  += memset.o
-lib-$(CONFIG_MMU)      += uaccess.o
+lib-y                  += uaccess.o
 lib-$(CONFIG_64BIT)    += tishift.o
index e3099c6..7356a56 100644 (file)
@@ -1445,6 +1445,8 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
        }
 }
 
+static void cancel_hv_timer(struct kvm_lapic *apic);
+
 static void apic_update_lvtt(struct kvm_lapic *apic)
 {
        u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1454,6 +1456,10 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
                if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
                                APIC_LVT_TIMER_TSCDEADLINE)) {
                        hrtimer_cancel(&apic->lapic_timer.timer);
+                       preempt_disable();
+                       if (apic->lapic_timer.hv_timer_in_use)
+                               cancel_hv_timer(apic);
+                       preempt_enable();
                        kvm_lapic_set_reg(apic, APIC_TMICT, 0);
                        apic->lapic_timer.period = 0;
                        apic->lapic_timer.tscdeadline = 0;
@@ -1715,7 +1721,7 @@ static void start_sw_period(struct kvm_lapic *apic)
 
        hrtimer_start(&apic->lapic_timer.timer,
                apic->lapic_timer.target_expiration,
-               HRTIMER_MODE_ABS);
+               HRTIMER_MODE_ABS_HARD);
 }
 
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
index 9100050..50d1eba 100644 (file)
@@ -1933,14 +1933,6 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
 static void __unregister_enc_region_locked(struct kvm *kvm,
                                           struct enc_region *region)
 {
-       /*
-        * The guest may change the memory encryption attribute from C=0 -> C=1
-        * or vice versa for this memory range. Lets make sure caches are
-        * flushed to ensure that guest data gets written into memory with
-        * correct C-bit.
-        */
-       sev_clflush_pages(region->pages, region->npages);
-
        sev_unpin_memory(kvm, region->pages, region->npages);
        list_del(&region->list);
        kfree(region);
@@ -1970,6 +1962,13 @@ static void sev_vm_destroy(struct kvm *kvm)
 
        mutex_lock(&kvm->lock);
 
+       /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
        /*
         * if userspace was terminated before unregistering the memory regions
         * then lets unpin all the registered memory.
@@ -7158,6 +7157,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
        if (!svm_sev_enabled())
                return -ENOTTY;
 
+       if (!argp)
+               return 0;
+
        if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
                return -EFAULT;
 
@@ -7285,6 +7287,13 @@ static int svm_unregister_enc_region(struct kvm *kvm,
                goto failed;
        }
 
+       /*
+        * Ensure that all guest tagged cache entries are flushed before
+        * releasing the pages back to the system for use. CLFLUSH will
+        * not do this, so issue a WBINVD.
+        */
+       wbinvd_on_all_cpus();
+
        __unregister_enc_region_locked(kvm, region);
 
        mutex_unlock(&kvm->lock);
index 26f8f31..079d9fb 100644 (file)
@@ -6287,7 +6287,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
 #endif
                ASM_CALL_CONSTRAINT
                :
-               THUNK_TARGET(entry),
+               [thunk_target]"r"(entry),
                [ss]"i"(__KERNEL_DS),
                [cs]"i"(__KERNEL_CS)
        );
index 3156e25..cf95c36 100644 (file)
@@ -1554,7 +1554,10 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
  */
 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
 {
-       if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic) &&
+       if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
+               return 1;
+
+       if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
                ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
                ((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
 
@@ -2444,7 +2447,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
        vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
        vcpu->last_guest_tsc = tsc_timestamp;
-       WARN_ON((s64)vcpu->hv_clock.system_time < 0);
 
        /* If the host uses TSC clocksource, then it is stable */
        pvclock_flags = 0;
index fa4ea09..629fdf1 100644 (file)
@@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        return pmd_k;
 }
 
-void vmalloc_sync_all(void)
+static void vmalloc_sync(void)
 {
        unsigned long address;
 
@@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
        }
 }
 
+void vmalloc_sync_mappings(void)
+{
+       vmalloc_sync();
+}
+
+void vmalloc_sync_unmappings(void)
+{
+       vmalloc_sync();
+}
+
 /*
  * 32-bit:
  *
@@ -319,11 +329,23 @@ out:
 
 #else /* CONFIG_X86_64: */
 
-void vmalloc_sync_all(void)
+void vmalloc_sync_mappings(void)
 {
+       /*
+        * 64-bit mappings might allocate new p4d/pud pages
+        * that need to be propagated to all tasks' PGDs.
+        */
        sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 }
 
+void vmalloc_sync_unmappings(void)
+{
+       /*
+        * Unmappings never allocate or free p4d/pud pages.
+        * No work is required here.
+        */
+}
+
 /*
  * 64-bit:
  *
index 935a91e..18c637c 100644 (file)
@@ -115,6 +115,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
        if (!sev_active())
                return;
 
+       if (!IS_ENABLED(CONFIG_EFI))
+               return;
+
        if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
                desc->flags |= IORES_MAP_ENCRYPTED;
 }
index 393d251..4d2a7a7 100644 (file)
@@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        }
                        /* and dreg_lo,sreg_lo */
                        EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
-                       /* and dreg_hi,sreg_hi */
-                       EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
-                       /* or dreg_lo,dreg_hi */
-                       EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+                       if (is_jmp64) {
+                               /* and dreg_hi,sreg_hi */
+                               EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
+                               /* or dreg_lo,dreg_hi */
+                               EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
+                       }
                        goto emit_cond_jmp;
                }
                case BPF_JMP | BPF_JSET | BPF_K:
index 103acbb..24c9642 100644 (file)
@@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
         * New allocation must be visible in all pgd before it can be found by
         * an NMI allocating from the pool.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
 
        rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
        if (rc)
index be79d6c..1bb00a9 100644 (file)
@@ -345,7 +345,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
        if (ret)
                goto unlock;
 
-       *buf = readl(rsb->regs + RSB_DATA);
+       *buf = readl(rsb->regs + RSB_DATA) & GENMASK(len * 8 - 1, 0);
 
 unlock:
        mutex_unlock(&rsb->lock);
index 6113fc0..4400196 100644 (file)
@@ -1266,6 +1266,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
        SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
                   SYSC_MODULE_QUIRK_SGX),
+       SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff,
+                  SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
                   0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
        SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
@@ -1294,7 +1296,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
        SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
        SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
        SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
-       SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
        SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
        SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
        SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
index f6c120c..cf19290 100644 (file)
@@ -560,7 +560,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_CLK_MEDIA_AXI] = imx8m_clk_hw_composite("media_axi", imx8mp_media_axi_sels, ccm_base + 0x8a00);
        hws[IMX8MP_CLK_MEDIA_APB] = imx8m_clk_hw_composite("media_apb", imx8mp_media_apb_sels, ccm_base + 0x8a80);
        hws[IMX8MP_CLK_HDMI_APB] = imx8m_clk_hw_composite("hdmi_apb", imx8mp_media_apb_sels, ccm_base + 0x8b00);
-       hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_apb_sels, ccm_base + 0x8b80);
+       hws[IMX8MP_CLK_HDMI_AXI] = imx8m_clk_hw_composite("hdmi_axi", imx8mp_media_axi_sels, ccm_base + 0x8b80);
        hws[IMX8MP_CLK_GPU_AXI] = imx8m_clk_hw_composite("gpu_axi", imx8mp_gpu_axi_sels, ccm_base + 0x8c00);
        hws[IMX8MP_CLK_GPU_AHB] = imx8m_clk_hw_composite("gpu_ahb", imx8mp_gpu_ahb_sels, ccm_base + 0x8c80);
        hws[IMX8MP_CLK_NOC] = imx8m_clk_hw_composite_critical("noc", imx8mp_noc_sels, ccm_base + 0x8d00);
@@ -686,7 +686,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
        hws[IMX8MP_CLK_CAN1_ROOT] = imx_clk_hw_gate2("can1_root_clk", "can1", ccm_base + 0x4350, 0);
        hws[IMX8MP_CLK_CAN2_ROOT] = imx_clk_hw_gate2("can2_root_clk", "can2", ccm_base + 0x4360, 0);
        hws[IMX8MP_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_root_clk", "ipg_root", ccm_base + 0x43a0, 0);
-       hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "enet_axi", ccm_base + 0x43b0, 0);
+       hws[IMX8MP_CLK_ENET_QOS_ROOT] = imx_clk_hw_gate4("enet_qos_root_clk", "sim_enet_root_clk", ccm_base + 0x43b0, 0);
        hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0);
        hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", ccm_base + 0x4450, 0);
        hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core_div", ccm_base + 0x4460, 0);
index fbef740..b8b2072 100644 (file)
@@ -43,12 +43,12 @@ struct imx_sc_msg_req_set_clock_rate {
        __le32 rate;
        __le16 resource;
        u8 clk;
-} __packed;
+} __packed __aligned(4);
 
 struct req_get_clock_rate {
        __le16 resource;
        u8 clk;
-} __packed;
+} __packed __aligned(4);
 
 struct resp_get_clock_rate {
        __le32 rate;
@@ -84,7 +84,7 @@ struct imx_sc_msg_get_clock_parent {
                struct req_get_clock_parent {
                        __le16 resource;
                        u8 clk;
-               } __packed req;
+               } __packed __aligned(4) req;
                struct resp_get_clock_parent {
                        u8 parent;
                } resp;
@@ -121,7 +121,7 @@ struct imx_sc_msg_req_clock_enable {
        u8 clk;
        u8 enable;
        u8 autog;
-} __packed;
+} __packed __aligned(4);
 
 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
 {
index af3e780..e5538d5 100644 (file)
@@ -78,7 +78,7 @@ static const struct omap_clkctrl_reg_data am4_gfx_l3_clkctrl_regs[] __initconst
 };
 
 static const struct omap_clkctrl_reg_data am4_l4_rtc_clkctrl_regs[] __initconst = {
-       { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clk_32768_ck" },
+       { AM4_L4_RTC_RTC_CLKCTRL, NULL, CLKF_SW_SUP, "clkdiv32k_ick" },
        { 0 },
 };
 
index c3b1283..17909fd 100644 (file)
@@ -1151,7 +1151,7 @@ int dma_async_device_register(struct dma_device *device)
        }
 
        if (!device->device_release)
-               dev_warn(device->dev,
+               dev_dbg(device->dev,
                         "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
 
        kref_init(&device->ref);
index df47be6..989b7a2 100644 (file)
@@ -81,9 +81,9 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
        dev = &idxd->pdev->dev;
        idxd_cdev = &wq->idxd_cdev;
 
-       dev_dbg(dev, "%s called\n", __func__);
+       dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
 
-       if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq))
+       if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
                return -EBUSY;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
index c151129..4d7561a 100644 (file)
@@ -564,12 +564,12 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (IS_ERR(flow->udma_rflow)) {
                ret = PTR_ERR(flow->udma_rflow);
                dev_err(dev, "UDMAX rflow get err %d\n", ret);
-               goto err;
+               return ret;
        }
 
        if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
-               xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_rflow_put;
        }
 
        /* request and cfg rings */
@@ -578,7 +578,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (!flow->ringrx) {
                ret = -ENODEV;
                dev_err(dev, "Failed to get RX ring\n");
-               goto err;
+               goto err_rflow_put;
        }
 
        flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc,
@@ -586,19 +586,19 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (!flow->ringrxfdq) {
                ret = -ENODEV;
                dev_err(dev, "Failed to get RXFDQ ring\n");
-               goto err;
+               goto err_ringrx_free;
        }
 
        ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
        if (ret) {
                dev_err(dev, "Failed to cfg ringrx %d\n", ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
        if (ret) {
                dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        if (rx_chn->remote) {
@@ -648,7 +648,7 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
        if (ret) {
                dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
                        ret);
-               goto err;
+               goto err_ringrxfdq_free;
        }
 
        rx_chn->flows_ready++;
@@ -656,8 +656,17 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
                flow->udma_rflow_id, rx_chn->flows_ready);
 
        return 0;
-err:
-       k3_udma_glue_release_rx_flow(rx_chn, flow_idx);
+
+err_ringrxfdq_free:
+       k3_ringacc_ring_free(flow->ringrxfdq);
+
+err_ringrx_free:
+       k3_ringacc_ring_free(flow->ringrx);
+
+err_rflow_put:
+       xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
+       flow->udma_rflow = NULL;
+
        return ret;
 }
 
index 31fee5e..0017367 100644 (file)
 #include "gpiolib.h"
 #include "gpiolib-acpi.h"
 
-#define QUIRK_NO_EDGE_EVENTS_ON_BOOT           0x01l
-#define QUIRK_NO_WAKEUP                                0x02l
-
 static int run_edge_events_on_boot = -1;
 module_param(run_edge_events_on_boot, int, 0444);
 MODULE_PARM_DESC(run_edge_events_on_boot,
                 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
 
-static int honor_wakeup = -1;
-module_param(honor_wakeup, int, 0444);
-MODULE_PARM_DESC(honor_wakeup,
-                "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
+static char *ignore_wake;
+module_param(ignore_wake, charp, 0444);
+MODULE_PARM_DESC(ignore_wake,
+                "controller@pin combos on which to ignore the ACPI wake flag "
+                "ignore_wake=controller@pin[,controller@pin[,...]]");
+
+struct acpi_gpiolib_dmi_quirk {
+       bool no_edge_events_on_boot;
+       char *ignore_wake;
+};
 
 /**
  * struct acpi_gpio_event - ACPI GPIO event handler data
@@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
                acpi_gpiochip_request_irq(acpi_gpio, event);
 }
 
+static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+{
+       const char *controller, *pin_str;
+       int len, pin;
+       char *endp;
+
+       controller = ignore_wake;
+       while (controller) {
+               pin_str = strchr(controller, '@');
+               if (!pin_str)
+                       goto err;
+
+               len = pin_str - controller;
+               if (len == strlen(controller_in) &&
+                   strncmp(controller, controller_in, len) == 0) {
+                       pin = simple_strtoul(pin_str + 1, &endp, 10);
+                       if (*endp != 0 && *endp != ',')
+                               goto err;
+
+                       if (pin == pin_in)
+                               return true;
+               }
+
+               controller = strchr(controller, ',');
+               if (controller)
+                       controller++;
+       }
+
+       return false;
+err:
+       pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
+                   ignore_wake);
+       return false;
+}
+
+static bool acpi_gpio_irq_is_wake(struct device *parent,
+                                 struct acpi_resource_gpio *agpio)
+{
+       int pin = agpio->pin_table[0];
+
+       if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
+               return false;
+
+       if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
+               dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+               return false;
+       }
+
+       return true;
+}
+
 /* Always returns AE_OK so that we keep looping over the resources */
 static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
                                             void *context)
@@ -289,7 +343,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        event->handle = evt_handle;
        event->handler = handler;
        event->irq = irq;
-       event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
+       event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
        event->pin = pin;
        event->desc = desc;
 
@@ -1328,7 +1382,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
                },
-               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .no_edge_events_on_boot = true,
+               },
        },
        {
                /*
@@ -1341,16 +1397,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
                },
-               .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .no_edge_events_on_boot = true,
+               },
        },
        {
                /*
-                * Various HP X2 10 Cherry Trail models use an external
-                * embedded-controller connected via I2C + an ACPI GPIO
-                * event handler. The embedded controller generates various
-                * spurious wakeup events when suspended. So disable wakeup
-                * for its handler (it uses the only ACPI GPIO event handler).
-                * This breaks wakeup when opening the lid, the user needs
+                * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+                * When suspending by closing the LID, the power to the USB
+                * keyboard is turned off, causing INT0002 ACPI events to
+                * trigger once the XHCI controller notices the keyboard is
+                * gone. So INT0002 events cause spurious wakeups too. Ignoring
+                * EC wakes breaks wakeup when opening the lid, the user needs
                 * to press the power-button to wakeup the system. The
                 * alternative is suspend simply not working, which is worse.
                 */
@@ -1358,33 +1418,61 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
                },
-               .driver_data = (void *)QUIRK_NO_WAKEUP,
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FF:01@0,INT0002:00@2",
+               },
+       },
+       {
+               /*
+                * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FC:02 pin 28, causing spurious wakeups.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+                       DMI_MATCH(DMI_BOARD_NAME, "815D"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FC:02@28",
+               },
+       },
+       {
+               /*
+                * HP X2 10 models with Cherry Trail SoC + AXP288 PMIC use an
+                * external embedded-controller connected via I2C + an ACPI GPIO
+                * event handler on INT33FF:01 pin 0, causing spurious wakeups.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+                       DMI_MATCH(DMI_BOARD_NAME, "813E"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "INT33FF:01@0",
+               },
        },
        {} /* Terminating entry */
 };
 
 static int acpi_gpio_setup_params(void)
 {
+       const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
        const struct dmi_system_id *id;
-       long quirks = 0;
 
        id = dmi_first_match(gpiolib_acpi_quirks);
        if (id)
-               quirks = (long)id->driver_data;
+               quirk = id->driver_data;
 
        if (run_edge_events_on_boot < 0) {
-               if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
+               if (quirk && quirk->no_edge_events_on_boot)
                        run_edge_events_on_boot = 0;
                else
                        run_edge_events_on_boot = 1;
        }
 
-       if (honor_wakeup < 0) {
-               if (quirks & QUIRK_NO_WAKEUP)
-                       honor_wakeup = 0;
-               else
-                       honor_wakeup = 1;
-       }
+       if (ignore_wake == NULL && quirk && quirk->ignore_wake)
+               ignore_wake = quirk->ignore_wake;
 
        return 0;
 }
index 4d0106c..00fb91f 100644 (file)
@@ -2306,9 +2306,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
+       /*
+        * Since we override .irq_disable() we need to mimic the
+        * behaviour of __irq_disable() in irq/chip.c.
+        * First call .irq_disable() if it exists, else mimic the
+        * behaviour of mask_irq() which calls .irq_mask() if
+        * it exists.
+        */
        if (chip->irq.irq_disable)
                chip->irq.irq_disable(d);
-       else
+       else if (chip->irq.chip->irq_mask)
                chip->irq.chip->irq_mask(d);
        gpiochip_disable_irq(chip, d->hwirq);
 }
index dee4462..c6e9885 100644 (file)
@@ -974,7 +974,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
        /* Map SG to device */
        r = -ENOMEM;
        nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-       if (nents != ttm->sg->nents)
+       if (nents == 0)
                goto release_sg;
 
        /* convert SG to linear array of pages and dma addresses */
index 86d9b0e..1de2cde 100644 (file)
@@ -967,7 +967,7 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 
        index = 0;
        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-               len = sg->length;
+               len = sg_dma_len(sg);
                page = sg_page(sg);
                addr = sg_dma_address(sg);
 
index 3b92311..b3380ff 100644 (file)
@@ -528,7 +528,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 
        r = -ENOMEM;
        nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-       if (nents != ttm->sg->nents)
+       if (nents == 0)
                goto release_sg;
 
        drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
index 71ce621..60c4c6a 100644 (file)
@@ -661,7 +661,9 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 
        trace_drm_sched_process_job(s_fence);
 
+       dma_fence_get(&s_fence->finished);
        drm_sched_fence_finished(s_fence);
+       dma_fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
index 8497c7a..224f830 100644 (file)
@@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
        i2c_del_adapter(&priv->adap);
        pm_runtime_disable(priv->dev);
        pm_runtime_set_suspended(priv->dev);
+       clk_disable_unprepare(priv->clk);
 
        return 0;
 }
index 62e18b4..f5d25ce 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/delay.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
@@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
 
 static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
 {
-       unsigned long target = jiffies + msecs_to_jiffies(1000);
        u32 val;
+       int ret;
 
-       do {
-               val = readl(i2cd->regs + I2C_MST_CNTL);
-               if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
-                       break;
-               if ((val & I2C_MST_CNTL_STATUS) !=
-                               I2C_MST_CNTL_STATUS_BUS_BUSY)
-                       break;
-               usleep_range(500, 600);
-       } while (time_is_after_jiffies(target));
-
-       if (time_is_before_jiffies(target)) {
+       ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val,
+                                !(val & I2C_MST_CNTL_CYCLE_TRIGGER) ||
+                                (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY,
+                                500, 1000 * USEC_PER_MSEC);
+
+       if (ret) {
                dev_err(i2cd->dev, "i2c timeout error %x\n", val);
                return -ETIMEDOUT;
        }
index a7a8184..635dd69 100644 (file)
@@ -140,7 +140,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
        int ret = 0;
        int irq;
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        /* If irq is 0, we do polling. */
        if (irq < 0)
                irq = 0;
index 54e1fc8..f7f7b5b 100644 (file)
@@ -434,6 +434,7 @@ static void st_i2c_wr_fill_tx_fifo(struct st_i2c_dev *i2c_dev)
 /**
  * st_i2c_rd_fill_tx_fifo() - Fill the Tx FIFO in read mode
  * @i2c_dev: Controller's private data
+ * @max: Maximum amount of data to fill into the Tx FIFO
  *
  * This functions fills the Tx FIFO with fixed pattern when
  * in read mode to trigger clock.
index f6c2552..d0b3d35 100644 (file)
@@ -896,7 +896,9 @@ static int add_one_compat_dev(struct ib_device *device,
        cdev->dev.parent = device->dev.parent;
        rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
        cdev->dev.release = compatdev_release;
-       dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+       ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
+       if (ret)
+               goto add_err;
 
        ret = device_add(&cdev->dev);
        if (ret)
index e0b0a91..9eec26d 100644 (file)
@@ -918,6 +918,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
                nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
                            IB_DEVICE_NAME_MAX);
+               if (strlen(name) == 0) {
+                       err = -EINVAL;
+                       goto done;
+               }
                err = ib_device_rename(device, name);
                goto done;
        }
@@ -1514,7 +1518,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
                    sizeof(ibdev_name));
-       if (strchr(ibdev_name, '%'))
+       if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
                return -EINVAL;
 
        nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
index 2d56083..75e7ec0 100644 (file)
@@ -349,16 +349,11 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
        else if (qp_pps)
                new_pps->main.pkey_index = qp_pps->main.pkey_index;
 
-       if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
+       if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
+            (qp_attr_mask & IB_QP_PORT)) ||
+           (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
                new_pps->main.state = IB_PORT_PKEY_VALID;
 
-       if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
-               new_pps->main.port_num = qp_pps->main.port_num;
-               new_pps->main.pkey_index = qp_pps->main.pkey_index;
-               if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
-                       new_pps->main.state = IB_PORT_PKEY_VALID;
-       }
-
        if (qp_attr_mask & IB_QP_ALT_PATH) {
                new_pps->alt.port_num = qp_attr->alt_port_num;
                new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
index cd656ad..3b1e627 100644 (file)
@@ -275,8 +275,8 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
                mmu_interval_notifier_remove(&umem_odp->notifier);
                kvfree(umem_odp->dma_list);
                kvfree(umem_odp->page_list);
-               put_pid(umem_odp->tgid);
        }
+       put_pid(umem_odp->tgid);
        kfree(umem_odp);
 }
 EXPORT_SYMBOL(ib_umem_odp_release);
index 1235ffb..da229ea 100644 (file)
@@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = {
        .llseek  = no_llseek,
 };
 
+static struct ib_umad_port *get_port(struct ib_device *ibdev,
+                                    struct ib_umad_device *umad_dev,
+                                    unsigned int port)
+{
+       if (!umad_dev)
+               return ERR_PTR(-EOPNOTSUPP);
+       if (!rdma_is_port_valid(ibdev, port))
+               return ERR_PTR(-EINVAL);
+       if (!rdma_cap_ib_mad(ibdev, port))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       return &umad_dev->ports[port - rdma_start_port(ibdev)];
+}
+
 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
                               struct ib_client_nl_info *res)
 {
-       struct ib_umad_device *umad_dev = client_data;
+       struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
 
-       if (!rdma_is_port_valid(ibdev, res->port))
-               return -EINVAL;
+       if (IS_ERR(port))
+               return PTR_ERR(port);
 
        res->abi = IB_USER_MAD_ABI_VERSION;
-       res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev;
-
+       res->cdev = &port->dev;
        return 0;
 }
 
@@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad");
 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
                               struct ib_client_nl_info *res)
 {
-       struct ib_umad_device *umad_dev =
-               ib_get_client_data(ibdev, &umad_client);
+       struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
 
-       if (!rdma_is_port_valid(ibdev, res->port))
-               return -EINVAL;
+       if (IS_ERR(port))
+               return PTR_ERR(port);
 
        res->abi = IB_USER_MAD_ABI_VERSION;
-       res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev;
-
+       res->cdev = &port->sm_dev;
        return 0;
 }
 
index c2f0d9b..13e4203 100644 (file)
@@ -141,6 +141,7 @@ static int defer_packet_queue(
         */
        xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
        if (list_empty(&pq->busy.list)) {
+               pq->busy.lock = &sde->waitlock;
                iowait_get_priority(&pq->busy);
                iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
        }
@@ -155,6 +156,7 @@ static void activate_packet_queue(struct iowait *wait, int reason)
 {
        struct hfi1_user_sdma_pkt_q *pq =
                container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+       pq->busy.lock = NULL;
        xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
        wake_up(&wait->wait_dma);
 };
@@ -256,6 +258,21 @@ pq_reqs_nomem:
        return ret;
 }
 
+static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq)
+{
+       unsigned long flags;
+       seqlock_t *lock = pq->busy.lock;
+
+       if (!lock)
+               return;
+       write_seqlock_irqsave(lock, flags);
+       if (!list_empty(&pq->busy.list)) {
+               list_del_init(&pq->busy.list);
+               pq->busy.lock = NULL;
+       }
+       write_sequnlock_irqrestore(lock, flags);
+}
+
 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                               struct hfi1_ctxtdata *uctxt)
 {
@@ -281,6 +298,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
                kfree(pq->reqs);
                kfree(pq->req_in_use);
                kmem_cache_destroy(pq->txreq_cache);
+               flush_pq_iowait(pq);
                kfree(pq);
        } else {
                spin_unlock(&fd->pq_rcu_lock);
@@ -587,11 +605,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
                if (ret < 0) {
                        if (ret != -EBUSY)
                                goto free_req;
-                       wait_event_interruptible_timeout(
+                       if (wait_event_interruptible_timeout(
                                pq->busy.wait_dma,
-                               (pq->state == SDMA_PKT_Q_ACTIVE),
+                               pq->state == SDMA_PKT_Q_ACTIVE,
                                msecs_to_jiffies(
-                                       SDMA_IOWAIT_TIMEOUT));
+                                       SDMA_IOWAIT_TIMEOUT)) <= 0)
+                               flush_pq_iowait(pq);
                }
        }
        *count += idx;
index 367a71b..3dec3de 100644 (file)
@@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
                dump_cqe(dev, cqe);
 }
 
+static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+                          u16 tail, u16 head)
+{
+       u16 idx;
+
+       do {
+               idx = tail & (qp->sq.wqe_cnt - 1);
+               if (idx == head)
+                       break;
+
+               tail = qp->sq.w_list[idx].next;
+       } while (1);
+       tail = qp->sq.w_list[idx].next;
+       qp->sq.last_poll = tail;
+}
+
 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 {
        mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
@@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
 }
 
 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
-                   int *npolled, int is_send)
+                   int *npolled, bool is_send)
 {
        struct mlx5_ib_wq *wq;
        unsigned int cur;
@@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
                return;
 
        for (i = 0;  i < cur && np < num_entries; i++) {
-               wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+               unsigned int idx;
+
+               idx = (is_send) ? wq->last_poll : wq->tail;
+               idx &= (wq->wqe_cnt - 1);
+               wc->wr_id = wq->wrid[idx];
                wc->status = IB_WC_WR_FLUSH_ERR;
                wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
                wq->tail++;
+               if (is_send)
+                       wq->last_poll = wq->w_list[idx].next;
                np++;
                wc->qp = &qp->ibqp;
                wc++;
@@ -473,6 +495,7 @@ repoll:
                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
                idx = wqe_ctr & (wq->wqe_cnt - 1);
                handle_good_req(wc, cqe64, wq, idx);
+               handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
                wc->wr_id = wq->wrid[idx];
                wq->tail = wq->wqe_head[idx] + 1;
                wc->status = IB_WC_SUCCESS;
index e4bcfa8..ffa7c21 100644 (file)
@@ -5722,9 +5722,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
        const struct mlx5_ib_counters *cnts =
                get_counters(dev, counter->port - 1);
 
-       /* Q counters are in the beginning of all counters */
        return rdma_alloc_hw_stats_struct(cnts->names,
-                                         cnts->num_q_counters,
+                                         cnts->num_q_counters +
+                                         cnts->num_cong_counters +
+                                         cnts->num_ext_ppcnt_counters,
                                          RDMA_HW_STATS_DEFAULT_LIFESPAN);
 }
 
index bb78142..f3bdbd5 100644 (file)
@@ -288,6 +288,7 @@ struct mlx5_ib_wq {
        unsigned                head;
        unsigned                tail;
        u16                     cur_post;
+       u16                     last_poll;
        void                    *cur_edge;
 };
 
index 957f3a5..8fe149e 100644 (file)
@@ -3775,6 +3775,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                qp->sq.cur_post = 0;
                if (qp->sq.wqe_cnt)
                        qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
+               qp->sq.last_poll = 0;
                qp->db.db[MLX5_RCV_DBR] = 0;
                qp->db.db[MLX5_SND_DBR] = 0;
        }
@@ -6204,6 +6205,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
        if (udata->outlen && udata->outlen < min_resp_len)
                return ERR_PTR(-EINVAL);
 
+       if (!capable(CAP_SYS_RAWIO) &&
+           init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
+               return ERR_PTR(-EPERM);
+
        dev = to_mdev(pd->device);
        switch (init_attr->wq_type) {
        case IB_WQT_RQ:
index 13d7f66..5724cbb 100644 (file)
@@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
        if (cq->ip)
                kref_put(&cq->ip->ref, rvt_release_mmap_info);
        else
-               vfree(cq->queue);
+               vfree(cq->kqueue);
 }
 
 /**
index fce43e6..3cfd2c1 100644 (file)
@@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t)
                        input_value_sync
                };
 
+               input_set_timestamp(dev, ktime_get());
                input_pass_values(dev, vals, ARRAY_SIZE(vals));
 
                if (dev->rep[REP_PERIOD])
index 14b55ba..fb078e0 100644 (file)
@@ -75,6 +75,14 @@ static struct touchkey_variant aries_touchkey_variant = {
        .cmd_led_off = ARIES_TOUCHKEY_CMD_LED_OFF,
 };
 
+static const struct touchkey_variant tc360_touchkey_variant = {
+       .keycode_reg = 0x00,
+       .base_reg = 0x00,
+       .fixed_regulator = true,
+       .cmd_led_on = TM2_TOUCHKEY_CMD_LED_ON,
+       .cmd_led_off = TM2_TOUCHKEY_CMD_LED_OFF,
+};
+
 static int tm2_touchkey_led_brightness_set(struct led_classdev *led_dev,
                                            enum led_brightness brightness)
 {
@@ -327,6 +335,9 @@ static const struct of_device_id tm2_touchkey_of_match[] = {
        }, {
                .compatible = "cypress,aries-touchkey",
                .data = &aries_touchkey_variant,
+       }, {
+               .compatible = "coreriver,tc360-touchkey",
+               .data = &tc360_touchkey_variant,
        },
        { },
 };
index 2c666fb..4d20362 100644 (file)
@@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
        "SYN3052", /* HP EliteBook 840 G4 */
        "SYN3221", /* HP 15-ay000 */
        "SYN323d", /* HP Spectre X360 13-w013dx */
+       "SYN3257", /* HP Envy 13-ad105ng */
        NULL
 };
 
index 6adea8a..ffa39ab 100644 (file)
@@ -1203,8 +1203,8 @@ static int rmi_f11_initialize(struct rmi_function *fn)
         * If distance threshold values are set, switch to reduced reporting
         * mode so they actually get used by the controller.
         */
-       if (ctrl->ctrl0_11[RMI_F11_DELTA_X_THRESHOLD] ||
-           ctrl->ctrl0_11[RMI_F11_DELTA_Y_THRESHOLD]) {
+       if (sensor->axis_align.delta_x_threshold ||
+           sensor->axis_align.delta_y_threshold) {
                ctrl->ctrl0_11[0] &= ~RMI_F11_REPORT_MODE_MASK;
                ctrl->ctrl0_11[0] |= RMI_F11_REPORT_MODE_REDUCED;
        }
index 6ed9f22..fe24543 100644 (file)
@@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
        return 0;
 }
 
-static bool raydium_i2c_boot_trigger(struct i2c_client *client)
+static int raydium_i2c_boot_trigger(struct i2c_client *client)
 {
        static const u8 cmd[7][6] = {
                { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
@@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
                }
        }
 
-       return false;
+       return 0;
 }
 
-static bool raydium_i2c_fw_trigger(struct i2c_client *client)
+static int raydium_i2c_fw_trigger(struct i2c_client *client)
 {
        static const u8 cmd[5][11] = {
                { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
@@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
                }
        }
 
-       return false;
+       return 0;
 }
 
 static int raydium_i2c_check_path(struct i2c_client *client)
index 25a8f93..db8884a 100644 (file)
@@ -149,6 +149,7 @@ config NET_FC
 config IFB
        tristate "Intermediate Functional Block support"
        depends on NET_CLS_ACT
+       select NET_REDIRECT
        ---help---
          This is an intermediate driver that allows sharing of
          resources.
index 8e81bdf..63f2548 100644 (file)
@@ -141,29 +141,29 @@ static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
                return 0;
 
        /* Print out debug information. */
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "CAIF SPI debug information:\n");
-
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
-
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "STATE: %d\n", cfspi->dbg_state);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous CMD: 0x%x\n", cfspi->pcmd);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current CMD: 0x%x\n", cfspi->cmd);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous TX len: %d\n", cfspi->tx_ppck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Previous RX len: %d\n", cfspi->rx_ppck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current TX len: %d\n", cfspi->tx_cpck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current RX len: %d\n", cfspi->rx_cpck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Next TX len: %d\n", cfspi->tx_npck_len);
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Next RX len: %d\n", cfspi->rx_npck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "CAIF SPI debug information:\n");
+
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
+
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "STATE: %d\n", cfspi->dbg_state);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous CMD: 0x%x\n", cfspi->pcmd);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current CMD: 0x%x\n", cfspi->cmd);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous TX len: %d\n", cfspi->tx_ppck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Previous RX len: %d\n", cfspi->rx_ppck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current TX len: %d\n", cfspi->tx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current RX len: %d\n", cfspi->rx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Next TX len: %d\n", cfspi->tx_npck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Next RX len: %d\n", cfspi->rx_npck_len);
 
        if (len > DEBUGFS_BUF_SIZE)
                len = DEBUGFS_BUF_SIZE;
@@ -180,23 +180,23 @@ static ssize_t print_frame(char *buf, size_t size, char *frm,
        int len = 0;
        int i;
        for (i = 0; i < count; i++) {
-               len += snprintf((buf + len), (size - len),
+               len += scnprintf((buf + len), (size - len),
                                        "[0x" BYTE_HEX_FMT "]",
                                        frm[i]);
                if ((i == cut) && (count > (cut * 2))) {
                        /* Fast forward. */
                        i = count - cut;
-                       len += snprintf((buf + len), (size - len),
-                                       "--- %zu bytes skipped ---\n",
-                                       count - (cut * 2));
+                       len += scnprintf((buf + len), (size - len),
+                                        "--- %zu bytes skipped ---\n",
+                                        count - (cut * 2));
                }
 
                if ((!(i % 10)) && i) {
-                       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                                       "\n");
+                       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                                        "\n");
                }
        }
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
        return len;
 }
 
@@ -214,18 +214,18 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
                return 0;
 
        /* Print out debug information. */
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Current frame:\n");
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Current frame:\n");
 
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
                           cfspi->xfer.va_tx[0],
                           (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
 
-       len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
-                       "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
+       len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
+                        "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
 
        len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
                           cfspi->xfer.va_rx,
index 2f5c287..a366428 100644 (file)
@@ -625,7 +625,10 @@ err_free_chan:
        tty->disc_data = NULL;
        clear_bit(SLF_INUSE, &sl->flags);
        slc_free_netdev(sl->dev);
+       /* do not call free_netdev before rtnl_unlock */
+       rtnl_unlock();
        free_netdev(sl->dev);
+       return err;
 
 err_exit:
        rtnl_unlock();
index 022466c..7cbd1bd 100644 (file)
@@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
 static void
 mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
 {
-       u32 mask = PMCR_TX_EN | PMCR_RX_EN;
+       u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
 
        if (enable)
                mt7530_set(priv, MT7530_PMCR_P(port), mask);
@@ -1444,7 +1444,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
        mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
                     PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
        mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
-                  PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
+                  PMCR_BACKPR_EN | PMCR_FORCE_MODE;
 
        /* Are we connected to external phy */
        if (port == 5 && dsa_is_user_port(ds, 5))
index 0b2fd96..cada6e7 100644 (file)
@@ -1018,13 +1018,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                struct ena_rx_buffer *rx_info;
 
                req_id = rx_ring->free_ids[next_to_use];
-               rc = validate_rx_req_id(rx_ring, req_id);
-               if (unlikely(rc < 0))
-                       break;
 
                rx_info = &rx_ring->rx_buffer_info[req_id];
 
-
                rc = ena_alloc_rx_page(rx_ring, rx_info,
                                       GFP_ATOMIC | __GFP_COMP);
                if (unlikely(rc < 0)) {
@@ -1379,9 +1375,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        struct ena_rx_buffer *rx_info;
        u16 len, req_id, buf = 0;
        void *va;
+       int rc;
 
        len = ena_bufs[buf].len;
        req_id = ena_bufs[buf].req_id;
+
+       rc = validate_rx_req_id(rx_ring, req_id);
+       if (unlikely(rc < 0))
+               return NULL;
+
        rx_info = &rx_ring->rx_buffer_info[req_id];
 
        if (unlikely(!rx_info->page)) {
@@ -1454,6 +1456,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
                buf++;
                len = ena_bufs[buf].len;
                req_id = ena_bufs[buf].req_id;
+
+               rc = validate_rx_req_id(rx_ring, req_id);
+               if (unlikely(rc < 0))
+                       return NULL;
+
                rx_info = &rx_ring->rx_buffer_info[req_id];
        } while (1);
 
@@ -1968,7 +1975,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
        }
 
        /* Reserved the max msix vectors we might need */
-       msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
+       msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
        netif_dbg(adapter, probe, adapter->netdev,
                  "trying to enable MSI-X, vectors %d\n", msix_vecs);
 
@@ -2068,6 +2075,7 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
 
 static int ena_request_io_irq(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        unsigned long flags = 0;
        struct ena_irq *irq;
        int rc = 0, i, k;
@@ -2078,7 +2086,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
                return -EINVAL;
        }
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
                irq = &adapter->irq_tbl[i];
                rc = request_irq(irq->vector, irq->handler, flags, irq->name,
                                 irq->data);
@@ -2119,6 +2127,7 @@ static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
 
 static void ena_free_io_irq(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        struct ena_irq *irq;
        int i;
 
@@ -2129,7 +2138,7 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
        }
 #endif /* CONFIG_RFS_ACCEL */
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
                irq = &adapter->irq_tbl[i];
                irq_set_affinity_hint(irq->vector, NULL);
                free_irq(irq->vector, irq->data);
@@ -2144,12 +2153,13 @@ static void ena_disable_msix(struct ena_adapter *adapter)
 
 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
 {
+       u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
        int i;
 
        if (!netif_running(adapter->netdev))
                return;
 
-       for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
+       for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
                synchronize_irq(adapter->irq_tbl[i].vector);
 }
 
@@ -3476,6 +3486,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                netif_carrier_on(adapter->netdev);
 
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
+       adapter->last_keep_alive_jiffies = jiffies;
        dev_err(&pdev->dev,
                "Device reset completed successfully, Driver info: %s\n",
                version);
@@ -4325,13 +4336,15 @@ err_disable_device:
 
 /*****************************************************************************/
 
-/* ena_remove - Device Removal Routine
+/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
  * @pdev: PCI device information struct
+ * @shutdown: Is it a shutdown operation? If false, means it is a removal
  *
- * ena_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
+ * __ena_shutoff is a helper routine that does the real work on shutdown and
+ * removal paths; the difference between those paths is with regards to whether
+ * dettach or unregister the netdevice.
  */
-static void ena_remove(struct pci_dev *pdev)
+static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
 {
        struct ena_adapter *adapter = pci_get_drvdata(pdev);
        struct ena_com_dev *ena_dev;
@@ -4350,13 +4363,17 @@ static void ena_remove(struct pci_dev *pdev)
 
        cancel_work_sync(&adapter->reset_task);
 
-       rtnl_lock();
+       rtnl_lock(); /* lock released inside the below if-else block */
        ena_destroy_device(adapter, true);
-       rtnl_unlock();
-
-       unregister_netdev(netdev);
-
-       free_netdev(netdev);
+       if (shutdown) {
+               netif_device_detach(netdev);
+               dev_close(netdev);
+               rtnl_unlock();
+       } else {
+               rtnl_unlock();
+               unregister_netdev(netdev);
+               free_netdev(netdev);
+       }
 
        ena_com_rss_destroy(ena_dev);
 
@@ -4371,6 +4388,30 @@ static void ena_remove(struct pci_dev *pdev)
        vfree(ena_dev);
 }
 
+/* ena_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+
+static void ena_remove(struct pci_dev *pdev)
+{
+       __ena_shutoff(pdev, false);
+}
+
+/* ena_shutdown - Device Shutdown Routine
+ * @pdev: PCI device information struct
+ *
+ * ena_shutdown is called by the PCI subsystem to alert the driver that
+ * a shutdown/reboot (or kexec) is happening and device must be disabled.
+ */
+
+static void ena_shutdown(struct pci_dev *pdev)
+{
+       __ena_shutoff(pdev, true);
+}
+
 #ifdef CONFIG_PM
 /* ena_suspend - PM suspend callback
  * @pdev: PCI device information struct
@@ -4420,6 +4461,7 @@ static struct pci_driver ena_pci_driver = {
        .id_table       = ena_pci_tbl,
        .probe          = ena_probe,
        .remove         = ena_remove,
+       .shutdown       = ena_shutdown,
 #ifdef CONFIG_PM
        .suspend    = ena_suspend,
        .resume     = ena_resume,
index c5c8eff..d28b406 100644 (file)
@@ -6880,12 +6880,12 @@ skip_rdma:
        }
        ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
        rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
-       if (rc)
+       if (rc) {
                netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
                           rc);
-       else
-               ctx->flags |= BNXT_CTX_FLAG_INITED;
-
+               return rc;
+       }
+       ctx->flags |= BNXT_CTX_FLAG_INITED;
        return 0;
 }
 
@@ -7406,14 +7406,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
                pri2cos = &resp2->pri0_cos_queue_id;
                for (i = 0; i < 8; i++) {
                        u8 queue_id = pri2cos[i];
+                       u8 queue_idx;
 
+                       /* Per port queue IDs start from 0, 10, 20, etc */
+                       queue_idx = queue_id % 10;
+                       if (queue_idx > BNXT_MAX_QUEUE) {
+                               bp->pri2cos_valid = false;
+                               goto qstats_done;
+                       }
                        for (j = 0; j < bp->max_q; j++) {
                                if (bp->q_ids[j] == queue_id)
-                                       bp->pri2cos[i] = j;
+                                       bp->pri2cos_idx[i] = queue_idx;
                        }
                }
                bp->pri2cos_valid = 1;
        }
+qstats_done:
        mutex_unlock(&bp->hwrm_cmd_lock);
        return rc;
 }
@@ -11669,6 +11677,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
                bp->rx_nr_rings++;
                bp->cp_nr_rings++;
        }
+       if (rc) {
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
+       }
        return rc;
 }
 
@@ -11962,12 +11974,12 @@ init_err_pci_clean:
        bnxt_hwrm_func_drv_unrgtr(bp);
        bnxt_free_hwrm_short_cmd_req(bp);
        bnxt_free_hwrm_resources(bp);
-       bnxt_free_ctx_mem(bp);
-       kfree(bp->ctx);
-       bp->ctx = NULL;
        kfree(bp->fw_health);
        bp->fw_health = NULL;
        bnxt_cleanup_pci(bp);
+       bnxt_free_ctx_mem(bp);
+       kfree(bp->ctx);
+       bp->ctx = NULL;
 
 init_err_free:
        free_netdev(dev);
index cabef0b..63b1706 100644 (file)
@@ -1716,7 +1716,7 @@ struct bnxt {
        u16                     fw_rx_stats_ext_size;
        u16                     fw_tx_stats_ext_size;
        u16                     hw_ring_stats_size;
-       u8                      pri2cos[8];
+       u8                      pri2cos_idx[8];
        u8                      pri2cos_valid;
 
        u16                     hwrm_max_req_len;
index fb6f30d..b1511bc 100644 (file)
@@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
 {
        struct bnxt *bp = netdev_priv(dev);
        struct ieee_ets *my_ets = bp->ieee_ets;
+       int rc;
 
        ets->ets_cap = bp->max_tc;
 
        if (!my_ets) {
-               int rc;
-
                if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
                        return 0;
 
                my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
                if (!my_ets)
-                       return 0;
+                       return -ENOMEM;
                rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
                if (rc)
-                       return 0;
+                       goto error;
                rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
                if (rc)
-                       return 0;
+                       goto error;
+
+               /* cache result */
+               bp->ieee_ets = my_ets;
        }
 
        ets->cbs = my_ets->cbs;
@@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
        memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
        memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
        return 0;
+error:
+       kfree(my_ets);
+       return rc;
 }
 
 static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
index 1f67e67..3f8a1de 100644 (file)
@@ -589,25 +589,25 @@ skip_ring_stats:
                if (bp->pri2cos_valid) {
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_rx_bytes_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_rx_pkts_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_tx_bytes_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
                        }
                        for (i = 0; i < 8; i++, j++) {
                                long n = bnxt_tx_pkts_pri_arr[i].base_off +
-                                        bp->pri2cos[i];
+                                        bp->pri2cos_idx[i];
 
                                buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
                        }
index e50a153..1d678be 100644 (file)
@@ -94,12 +94,6 @@ static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
        bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
 }
 
-static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
-                                           void __iomem *d)
-{
-       return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
-}
-
 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
                                    void __iomem *d,
                                    dma_addr_t addr)
@@ -508,61 +502,6 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev,
        return phy_ethtool_ksettings_set(dev->phydev, cmd);
 }
 
-static void bcmgenet_set_rx_csum(struct net_device *dev,
-                                netdev_features_t wanted)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       u32 rbuf_chk_ctrl;
-       bool rx_csum_en;
-
-       rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
-
-       rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
-
-       /* enable rx checksumming */
-       if (rx_csum_en)
-               rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
-       else
-               rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
-       priv->desc_rxchk_en = rx_csum_en;
-
-       /* If UniMAC forwards CRC, we need to skip over it to get
-        * a valid CHK bit to be set in the per-packet status word
-       */
-       if (rx_csum_en && priv->crc_fwd_en)
-               rbuf_chk_ctrl |= RBUF_SKIP_FCS;
-       else
-               rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
-
-       bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
-}
-
-static void bcmgenet_set_tx_csum(struct net_device *dev,
-                                netdev_features_t wanted)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       bool desc_64b_en;
-       u32 tbuf_ctrl, rbuf_ctrl;
-
-       tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
-       rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
-
-       desc_64b_en = !!(wanted & NETIF_F_HW_CSUM);
-
-       /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
-       if (desc_64b_en) {
-               tbuf_ctrl |= RBUF_64B_EN;
-               rbuf_ctrl |= RBUF_64B_EN;
-       } else {
-               tbuf_ctrl &= ~RBUF_64B_EN;
-               rbuf_ctrl &= ~RBUF_64B_EN;
-       }
-       priv->desc_64b_en = desc_64b_en;
-
-       bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
-       bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
-}
-
 static int bcmgenet_set_features(struct net_device *dev,
                                 netdev_features_t features)
 {
@@ -578,9 +517,6 @@ static int bcmgenet_set_features(struct net_device *dev,
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
-       bcmgenet_set_tx_csum(dev, features);
-       bcmgenet_set_rx_csum(dev, features);
-
        clk_disable_unprepare(priv->clk);
 
        return ret;
@@ -1475,8 +1411,8 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
 /* Reallocate the SKB to put enough headroom in front of it and insert
  * the transmit checksum offsets in the descriptors
  */
-static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
-                                           struct sk_buff *skb)
+static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
+                                       struct sk_buff *skb)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct status_64 *status = NULL;
@@ -1590,13 +1526,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        GENET_CB(skb)->bytes_sent = skb->len;
 
-       /* set the SKB transmit checksum */
-       if (priv->desc_64b_en) {
-               skb = bcmgenet_put_tx_csum(dev, skb);
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
-                       goto out;
-               }
+       /* add the Transmit Status Block */
+       skb = bcmgenet_add_tsb(dev, skb);
+       if (!skb) {
+               ret = NETDEV_TX_OK;
+               goto out;
        }
 
        for (i = 0; i <= nr_frags; i++) {
@@ -1775,6 +1709,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
 
        while ((rxpktprocessed < rxpkttoprocess) &&
               (rxpktprocessed < budget)) {
+               struct status_64 *status;
+               __be16 rx_csum;
+
                cb = &priv->rx_cbs[ring->read_ptr];
                skb = bcmgenet_rx_refill(priv, cb);
 
@@ -1783,20 +1720,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                        goto next;
                }
 
-               if (!priv->desc_64b_en) {
-                       dma_length_status =
-                               dmadesc_get_length_status(priv, cb->bd_addr);
-               } else {
-                       struct status_64 *status;
-                       __be16 rx_csum;
-
-                       status = (struct status_64 *)skb->data;
-                       dma_length_status = status->length_status;
+               status = (struct status_64 *)skb->data;
+               dma_length_status = status->length_status;
+               if (dev->features & NETIF_F_RXCSUM) {
                        rx_csum = (__force __be16)(status->rx_csum & 0xffff);
-                       if (priv->desc_rxchk_en) {
-                               skb->csum = (__force __wsum)ntohs(rx_csum);
-                               skb->ip_summed = CHECKSUM_COMPLETE;
-                       }
+                       skb->csum = (__force __wsum)ntohs(rx_csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
                }
 
                /* DMA flags and length are still valid no matter how
@@ -1840,14 +1769,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
                } /* error packet */
 
                skb_put(skb, len);
-               if (priv->desc_64b_en) {
-                       skb_pull(skb, 64);
-                       len -= 64;
-               }
 
-               /* remove hardware 2bytes added for IP alignment */
-               skb_pull(skb, 2);
-               len -= 2;
+               /* remove RSB and hardware 2bytes added for IP alignment */
+               skb_pull(skb, 66);
+               len -= 66;
 
                if (priv->crc_fwd_en) {
                        skb_trim(skb, len - ETH_FCS_LEN);
@@ -1965,6 +1890,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
        u32 reg;
 
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET)
+               return;
        if (enable)
                reg |= mask;
        else
@@ -1984,11 +1911,9 @@ static void reset_umac(struct bcmgenet_priv *priv)
        bcmgenet_rbuf_ctrl_set(priv, 0);
        udelay(10);
 
-       /* disable MAC while updating its registers */
-       bcmgenet_umac_writel(priv, 0, UMAC_CMD);
-
-       /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
-       bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
+       /* issue soft reset and disable MAC while updating its registers */
+       bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+       udelay(2);
 }
 
 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2038,11 +1963,28 @@ static void init_umac(struct bcmgenet_priv *priv)
 
        bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
 
-       /* init rx registers, enable ip header optimization */
+       /* init tx registers, enable TSB */
+       reg = bcmgenet_tbuf_ctrl_get(priv);
+       reg |= TBUF_64B_EN;
+       bcmgenet_tbuf_ctrl_set(priv, reg);
+
+       /* init rx registers, enable ip header optimization and RSB */
        reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
-       reg |= RBUF_ALIGN_2B;
+       reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
        bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
 
+       /* enable rx checksumming */
+       reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+       reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
+       /* If UniMAC forwards CRC, we need to skip over it to get
+        * a valid CHK bit to be set in the per-packet status word
+        */
+       if (priv->crc_fwd_en)
+               reg |= RBUF_SKIP_FCS;
+       else
+               reg &= ~RBUF_SKIP_FCS;
+       bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
+
        if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
                bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
 
index 61a6fe9..daf8fb2 100644 (file)
@@ -273,6 +273,7 @@ struct bcmgenet_mib_counters {
 #define  RBUF_FLTR_LEN_SHIFT           8
 
 #define TBUF_CTRL                      0x00
+#define  TBUF_64B_EN                   (1 << 0)
 #define TBUF_BP_MC                     0x0C
 #define TBUF_ENERGY_CTRL               0x14
 #define  TBUF_EEE_EN                   (1 << 0)
@@ -662,8 +663,6 @@ struct bcmgenet_priv {
        unsigned int irq0_stat;
 
        /* HW descriptors/checksum variables */
-       bool desc_64b_en;
-       bool desc_rxchk_en;
        bool crc_fwd_en;
 
        u32 dma_max_burst_length;
index ea20d94..c9a4369 100644 (file)
@@ -132,8 +132,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
                return -EINVAL;
        }
 
-       /* disable RX */
+       /* Can't suspend with WoL if MAC is still in reset */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (reg & CMD_SW_RESET)
+               reg &= ~CMD_SW_RESET;
+
+       /* disable RX */
        reg &= ~CMD_RX_EN;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        mdelay(10);
index 1024494..b5930f8 100644 (file)
@@ -95,6 +95,12 @@ void bcmgenet_mii_setup(struct net_device *dev)
                               CMD_HD_EN |
                               CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
                reg |= cmd_bits;
+               if (reg & CMD_SW_RESET) {
+                       reg &= ~CMD_SW_RESET;
+                       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+                       udelay(2);
+                       reg |= CMD_TX_EN | CMD_RX_EN;
+               }
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
        } else {
                /* done if nothing has changed */
@@ -181,38 +187,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
        const char *phy_name = NULL;
        u32 id_mode_dis = 0;
        u32 port_ctrl;
-       int bmcr = -1;
-       int ret;
        u32 reg;
 
-       /* MAC clocking workaround during reset of umac state machines */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       if (reg & CMD_SW_RESET) {
-               /* An MII PHY must be isolated to prevent TXC contention */
-               if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
-                       ret = phy_read(phydev, MII_BMCR);
-                       if (ret >= 0) {
-                               bmcr = ret;
-                               ret = phy_write(phydev, MII_BMCR,
-                                               bmcr | BMCR_ISOLATE);
-                       }
-                       if (ret) {
-                               netdev_err(dev, "failed to isolate PHY\n");
-                               return ret;
-                       }
-               }
-               /* Switch MAC clocking to RGMII generated clock */
-               bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
-               /* Ensure 5 clks with Rx disabled
-                * followed by 5 clks with Reset asserted
-                */
-               udelay(4);
-               reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
-               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-               /* Ensure 5 more clocks before Rx is enabled */
-               udelay(2);
-       }
-
        switch (priv->phy_interface) {
        case PHY_INTERFACE_MODE_INTERNAL:
                phy_name = "internal PHY";
@@ -282,10 +258,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 
        bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-       /* Restore the MII PHY after isolation */
-       if (bmcr >= 0)
-               phy_write(phydev, MII_BMCR, bmcr);
-
        priv->ext_phy = !priv->internal_phy &&
                        (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
index 2a2938b..fc05248 100644 (file)
@@ -902,7 +902,7 @@ void clear_all_filters(struct adapter *adapter)
                                adapter->tids.tid_tab[i];
 
                        if (f && (f->valid || f->pending))
-                               cxgb4_del_filter(dev, i, &f->fs);
+                               cxgb4_del_filter(dev, f->tid, &f->fs);
                }
 
                sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
@@ -910,7 +910,7 @@ void clear_all_filters(struct adapter *adapter)
                        f = (struct filter_entry *)adapter->tids.tid_tab[i];
 
                        if (f && (f->valid || f->pending))
-                               cxgb4_del_filter(dev, i, &f->fs);
+                               cxgb4_del_filter(dev, f->tid, &f->fs);
                }
        }
 }
index 58a039c..af1f40c 100644 (file)
@@ -246,6 +246,9 @@ static int  cxgb4_ptp_fineadjtime(struct adapter *adapter, s64 delta)
                             FW_PTP_CMD_PORTID_V(0));
        c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
        c.u.ts.sc = FW_PTP_SC_ADJ_FTIME;
+       c.u.ts.sign = (delta < 0) ? 1 : 0;
+       if (delta < 0)
+               delta = -delta;
        c.u.ts.tm = cpu_to_be64(delta);
 
        err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), NULL);
index 97cda50..cab3d17 100644 (file)
@@ -1307,8 +1307,9 @@ static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
                                 int maxreclaim)
 {
+       unsigned int reclaimed, hw_cidx;
        struct sge_txq *q = &eq->q;
-       unsigned int reclaimed;
+       int hw_in_use;
 
        if (!q->in_use || !__netif_tx_trylock(eq->txq))
                return 0;
@@ -1316,12 +1317,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
        /* Reclaim pending completed TX Descriptors. */
        reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
 
+       hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+       hw_in_use = q->pidx - hw_cidx;
+       if (hw_in_use < 0)
+               hw_in_use += q->size;
+
        /* If the TX Queue is currently stopped and there's now more than half
         * the queue available, restart it.  Otherwise bail out since the rest
         * of what we want do here is with the possibility of shipping any
         * currently buffered Coalesced TX Work Request.
         */
-       if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
+       if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
                netif_tx_wake_queue(eq->txq);
                eq->q.restarts++;
        }
@@ -1486,16 +1492,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                 * has opened up.
                 */
                eth_txq_stop(q);
-
-               /* If we're using the SGE Doorbell Queue Timer facility, we
-                * don't need to ask the Firmware to send us Egress Queue CIDX
-                * Updates: the Hardware will do this automatically.  And
-                * since we send the Ingress Queue CIDX Updates to the
-                * corresponding Ethernet Response Queue, we'll get them very
-                * quickly.
-                */
-               if (!q->dbqt)
-                       wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+               wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
        }
 
        wr = (void *)&q->q.desc[q->q.pidx];
@@ -1805,16 +1802,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
                 * has opened up.
                 */
                eth_txq_stop(txq);
-
-               /* If we're using the SGE Doorbell Queue Timer facility, we
-                * don't need to ask the Firmware to send us Egress Queue CIDX
-                * Updates: the Hardware will do this automatically.  And
-                * since we send the Ingress Queue CIDX Updates to the
-                * corresponding Ethernet Response Queue, we'll get them very
-                * quickly.
-                */
-               if (!txq->dbqt)
-                       wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+               wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
        }
 
        /* Start filling in our Work Request.  Note that we do _not_ handle
@@ -3370,26 +3358,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
        }
 
        txq = &s->ethtxq[pi->first_qset + rspq->idx];
-
-       /* We've got the Hardware Consumer Index Update in the Egress Update
-        * message.  If we're using the SGE Doorbell Queue Timer mechanism,
-        * these Egress Update messages will be our sole CIDX Updates we get
-        * since we don't want to chew up PCIe bandwidth for both Ingress
-        * Messages and Status Page writes.  However, The code which manages
-        * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
-        * stored in the Status Page at the end of the TX Queue.  It's easiest
-        * to simply copy the CIDX Update value from the Egress Update message
-        * to the Status Page.  Also note that no Endian issues need to be
-        * considered here since both are Big Endian and we're just copying
-        * bytes consistently ...
-        */
-       if (txq->dbqt) {
-               struct cpl_sge_egr_update *egr;
-
-               egr = (struct cpl_sge_egr_update *)rsp;
-               WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
-       }
-
        t4_sge_eth_txq_egress_update(adapter, txq, -1);
 }
 
index e190187..0d2b4ab 100644 (file)
@@ -782,7 +782,7 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
        /* Set full duplex */
        tmp &= ~IF_MODE_HD;
 
-       if (memac->phy_if == PHY_INTERFACE_MODE_RGMII) {
+       if (phy_interface_mode_is_rgmii(memac->phy_if)) {
                /* Configure RGMII in manual mode */
                tmp &= ~IF_MODE_RGMII_AUTO;
                tmp &= ~IF_MODE_RGMII_SP_MASK;
index eb53c15..5f2d57d 100644 (file)
@@ -389,7 +389,8 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
 
        spin_unlock_bh(&cmdq->cmdq_lock);
 
-       if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
+       if (!wait_for_completion_timeout(&done,
+                                        msecs_to_jiffies(CMDQ_TIMEOUT))) {
                spin_lock_bh(&cmdq->cmdq_lock);
 
                if (cmdq->errcode[curr_prod_idx] == &errcode)
@@ -623,6 +624,8 @@ static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
        if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
                return -EBUSY;
 
+       dma_rmb();
+
        errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
 
        cmdq_sync_cmd_handler(cmdq, ci, errcode);
index 79b3d53..c7c75b7 100644 (file)
@@ -360,50 +360,6 @@ static int wait_for_db_state(struct hinic_hwdev *hwdev)
        return -EFAULT;
 }
 
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
-       struct hinic_cmd_io_status cmd_io_status;
-       struct hinic_hwif *hwif = hwdev->hwif;
-       struct pci_dev *pdev = hwif->pdev;
-       struct hinic_pfhwdev *pfhwdev;
-       unsigned long end;
-       u16 out_size;
-       int err;
-
-       if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
-               dev_err(&pdev->dev, "Unsupported PCI Function type\n");
-               return -EINVAL;
-       }
-
-       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
-       cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
-       end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
-       do {
-               err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
-                                       HINIC_COMM_CMD_IO_STATUS_GET,
-                                       &cmd_io_status, sizeof(cmd_io_status),
-                                       &cmd_io_status, &out_size,
-                                       HINIC_MGMT_MSG_SYNC);
-               if ((err) || (out_size != sizeof(cmd_io_status))) {
-                       dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
-                               err);
-                       return err;
-               }
-
-               if (cmd_io_status.status == IO_STOPPED) {
-                       dev_info(&pdev->dev, "IO stopped\n");
-                       return 0;
-               }
-
-               msleep(20);
-       } while (time_before(jiffies, end));
-
-       dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
-       return -ETIMEDOUT;
-}
-
 /**
  * clear_io_resource - set the IO resources as not active in the NIC
  * @hwdev: the NIC HW device
@@ -423,11 +379,8 @@ static int clear_io_resources(struct hinic_hwdev *hwdev)
                return -EINVAL;
        }
 
-       err = wait_for_io_stopped(hwdev);
-       if (err) {
-               dev_err(&pdev->dev, "IO has not stopped yet\n");
-               return err;
-       }
+       /* sleep 100ms to wait for firmware stopping I/O */
+       msleep(100);
 
        cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
 
index 79243b6..c0b6bcb 100644 (file)
@@ -188,7 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
  * eq_update_ci - update the HW cons idx of event queue
  * @eq: the event queue to update the cons idx for
  **/
-static void eq_update_ci(struct hinic_eq *eq)
+static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
 {
        u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
 
@@ -202,7 +202,7 @@ static void eq_update_ci(struct hinic_eq *eq)
 
        val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX)    |
               HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
-              HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
+              HINIC_EQ_CI_SET(arm_state, INT_ARMED);
 
        val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
 
@@ -235,6 +235,8 @@ static void aeq_irq_handler(struct hinic_eq *eq)
                if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
                        break;
 
+               dma_rmb();
+
                event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
                if (event >= HINIC_MAX_AEQ_EVENTS) {
                        dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
@@ -347,7 +349,7 @@ static void eq_irq_handler(void *data)
        else if (eq->type == HINIC_CEQ)
                ceq_irq_handler(eq);
 
-       eq_update_ci(eq);
+       eq_update_ci(eq, EQ_ARMED);
 }
 
 /**
@@ -702,7 +704,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
        }
 
        set_eq_ctrls(eq);
-       eq_update_ci(eq);
+       eq_update_ci(eq, EQ_ARMED);
 
        err = alloc_eq_pages(eq);
        if (err) {
@@ -752,18 +754,28 @@ err_req_irq:
  **/
 static void remove_eq(struct hinic_eq *eq)
 {
-       struct msix_entry *entry = &eq->msix_entry;
-
-       free_irq(entry->vector, eq);
+       hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
+                            HINIC_MSIX_DISABLE);
+       free_irq(eq->msix_entry.vector, eq);
 
        if (eq->type == HINIC_AEQ) {
                struct hinic_eq_work *aeq_work = &eq->aeq_work;
 
                cancel_work_sync(&aeq_work->work);
+               /* clear aeq_len to avoid hw access host memory */
+               hinic_hwif_write_reg(eq->hwif,
+                                    HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
        } else if (eq->type == HINIC_CEQ) {
                tasklet_kill(&eq->ceq_tasklet);
+               /* clear ceq_len to avoid hw access host memory */
+               hinic_hwif_write_reg(eq->hwif,
+                                    HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
        }
 
+       /* update cons_idx to avoid invalid interrupt */
+       eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
+       eq_update_ci(eq, EQ_NOT_ARMED);
+
        free_eq_pages(eq);
 }
 
index c1a6be6..8995e32 100644 (file)
@@ -43,7 +43,7 @@
 
 #define MSG_NOT_RESP                    0xFFFF
 
-#define MGMT_MSG_TIMEOUT                1000
+#define MGMT_MSG_TIMEOUT                5000
 
 #define mgmt_to_pfhwdev(pf_mgmt)        \
                container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -267,7 +267,8 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
                goto unlock_sync_msg;
        }
 
-       if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
+       if (!wait_for_completion_timeout(recv_done,
+                                        msecs_to_jiffies(MGMT_MSG_TIMEOUT))) {
                dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
                err = -ETIMEDOUT;
                goto unlock_sync_msg;
index 2695ad6..815649e 100644 (file)
@@ -350,6 +350,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
                if (!rq_wqe)
                        break;
 
+               /* make sure we read rx_done before packet length */
+               dma_rmb();
+
                cqe = rq->cqe[ci];
                status =  be32_to_cpu(cqe->status);
                hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
index 0e13d1c..3650164 100644 (file)
@@ -45,7 +45,7 @@
 
 #define HW_CONS_IDX(sq)                 be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
 
-#define MIN_SKB_LEN                     17
+#define MIN_SKB_LEN                    32
 
 #define        MAX_PAYLOAD_OFFSET              221
 #define TRANSPORT_OFFSET(l4_hdr, skb)  ((u32)((l4_hdr) - (skb)->data))
@@ -622,6 +622,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
        do {
                hw_ci = HW_CONS_IDX(sq) & wq->mask;
 
+               dma_rmb();
+
                /* Reading a WQEBB to get real WQE size and consumer index. */
                sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
                if ((!sq_wqe) ||
index d2e2dc5..d14762d 100644 (file)
@@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                writel(MVMDIO_ERR_INT_SMI_DONE,
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
-       } else if (dev->err_interrupt < 0) {
-               ret = dev->err_interrupt;
+       } else if (dev->err_interrupt == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
                goto out_mdio;
        }
 
index 98017e7..11babc7 100644 (file)
@@ -3036,11 +3036,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        /* For the case where the last mvneta_poll did not process all
         * RX packets
         */
-       rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
-
        cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
                port->cause_rx_tx;
 
+       rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
        if (rx_queue) {
                rx_queue = rx_queue - 1;
                if (pp->bm_priv)
index 9c48182..9486cae 100644 (file)
@@ -906,59 +906,59 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
        int len = 0;
 
        mlx4_err(dev, "%s", str);
-       len += snprintf(buf + len, BUF_SIZE - len,
-                       "port = %d prio = 0x%x qp = 0x%x ",
-                       rule->port, rule->priority, rule->qpn);
+       len += scnprintf(buf + len, BUF_SIZE - len,
+                        "port = %d prio = 0x%x qp = 0x%x ",
+                        rule->port, rule->priority, rule->qpn);
 
        list_for_each_entry(cur, &rule->list, list) {
                switch (cur->id) {
                case MLX4_NET_TRANS_RULE_ID_ETH:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dmac = %pM ", &cur->eth.dst_mac);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dmac = %pM ", &cur->eth.dst_mac);
                        if (cur->eth.ether_type)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "ethertype = 0x%x ",
-                                               be16_to_cpu(cur->eth.ether_type));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "ethertype = 0x%x ",
+                                                be16_to_cpu(cur->eth.ether_type));
                        if (cur->eth.vlan_id)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "vlan-id = %d ",
-                                               be16_to_cpu(cur->eth.vlan_id));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "vlan-id = %d ",
+                                                be16_to_cpu(cur->eth.vlan_id));
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_IPV4:
                        if (cur->ipv4.src_ip)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "src-ip = %pI4 ",
-                                               &cur->ipv4.src_ip);
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "src-ip = %pI4 ",
+                                                &cur->ipv4.src_ip);
                        if (cur->ipv4.dst_ip)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "dst-ip = %pI4 ",
-                                               &cur->ipv4.dst_ip);
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "dst-ip = %pI4 ",
+                                                &cur->ipv4.dst_ip);
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_TCP:
                case MLX4_NET_TRANS_RULE_ID_UDP:
                        if (cur->tcp_udp.src_port)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "src-port = %d ",
-                                               be16_to_cpu(cur->tcp_udp.src_port));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "src-port = %d ",
+                                                be16_to_cpu(cur->tcp_udp.src_port));
                        if (cur->tcp_udp.dst_port)
-                               len += snprintf(buf + len, BUF_SIZE - len,
-                                               "dst-port = %d ",
-                                               be16_to_cpu(cur->tcp_udp.dst_port));
+                               len += scnprintf(buf + len, BUF_SIZE - len,
+                                                "dst-port = %d ",
+                                                be16_to_cpu(cur->tcp_udp.dst_port));
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_IB:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dst-gid = %pI6\n", cur->ib.dst_gid);
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "dst-gid-mask = %pI6\n",
-                                       cur->ib.dst_gid_msk);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dst-gid = %pI6\n", cur->ib.dst_gid);
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "dst-gid-mask = %pI6\n",
+                                        cur->ib.dst_gid_msk);
                        break;
 
                case MLX4_NET_TRANS_RULE_ID_VXLAN:
-                       len += snprintf(buf + len, BUF_SIZE - len,
-                                       "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
+                       len += scnprintf(buf + len, BUF_SIZE - len,
+                                        "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
                        break;
                case MLX4_NET_TRANS_RULE_ID_IPV6:
                        break;
@@ -967,7 +967,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
                        break;
                }
        }
-       len += snprintf(buf + len, BUF_SIZE - len, "\n");
+       len += scnprintf(buf + len, BUF_SIZE - len, "\n");
        mlx4_err(dev, "%s", buf);
 
        if (len >= BUF_SIZE)
index 220ef9f..c9606b8 100644 (file)
@@ -371,6 +371,7 @@ enum {
 
 struct mlx5e_sq_wqe_info {
        u8  opcode;
+       u8 num_wqebbs;
 
        /* Auxiliary data for different opcodes. */
        union {
@@ -1059,6 +1060,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
 void mlx5e_activate_rq(struct mlx5e_rq *rq);
 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
 
index d3693fa..e54f70d 100644 (file)
@@ -10,8 +10,7 @@
 
 static inline bool cqe_syndrome_needs_recover(u8 syndrome)
 {
-       return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
-              syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+       return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
               syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
               syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 }
index 6c72b59..a01e2de 100644 (file)
@@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
                goto out;
 
        mlx5e_reset_icosq_cc_pc(icosq);
-       mlx5e_free_rx_descs(rq);
+       mlx5e_free_rx_in_progress_descs(rq);
        clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
        mlx5e_activate_icosq(icosq);
        mlx5e_activate_rq(rq);
index a226277..f07b139 100644 (file)
@@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
 
 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
 {
-       if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+       if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                mlx5_wq_ll_reset(&rq->mpwqe.wq);
-       else
+               rq->mpwqe.actual_wq_head = 0;
+       } else {
                mlx5_wq_cyc_reset(&rq->wqe.wq);
+       }
 }
 
 /* SW parser related functions */
index a3efa29..63116be 100644 (file)
@@ -38,8 +38,8 @@ enum {
 
 enum {
        MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START     = 0,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 1,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 2,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 1,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
 };
 
 struct mlx5e_ktls_offload_context_tx {
index f260dd9..52a5662 100644 (file)
@@ -218,7 +218,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
         *    this packet was already acknowledged and its record info
         *    was released.
         */
-       ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+       ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
 
        if (unlikely(tls_record_is_start_marker(record))) {
                ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
index 21de476..4ef3dc7 100644 (file)
@@ -813,6 +813,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
        return -ETIMEDOUT;
 }
 
+void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq;
+       u16 head;
+       int i;
+
+       if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+               return;
+
+       wq = &rq->mpwqe.wq;
+       head = wq->head;
+
+       /* Outstanding UMR WQEs (in progress) start at wq->head */
+       for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+               rq->dealloc_wqe(rq, head);
+               head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+       }
+
+       rq->mpwqe.actual_wq_head = wq->head;
+       rq->mpwqe.umr_in_progress = 0;
+       rq->mpwqe.umr_completed = 0;
+}
+
 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 {
        __be16 wqe_ix_be;
@@ -820,14 +843,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
 
        if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
                struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
-               u16 head = wq->head;
-               int i;
 
-               /* Outstanding UMR WQEs (in progress) start at wq->head */
-               for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
-                       rq->dealloc_wqe(rq, head);
-                       head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
-               }
+               mlx5e_free_rx_in_progress_descs(rq);
 
                while (!mlx5_wq_ll_is_empty(wq)) {
                        struct mlx5e_rx_wqe_ll *wqe;
index 1c3ab69..312d469 100644 (file)
@@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
        /* fill sq frag edge with nops to avoid wqe wrapping two pages */
        for (; wi < edge_wi; wi++) {
                wi->opcode = MLX5_OPCODE_NOP;
+               wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
 }
@@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
 
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+       sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
        sq->db.ico_wqe[pi].umr.rq = rq;
        sq->pc += MLX5E_UMR_WQEBBS;
 
@@ -621,6 +623,7 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 
                        ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                        wi = &sq->db.ico_wqe[ci];
+                       sqcc += wi->num_wqebbs;
 
                        if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
                                netdev_WARN_ONCE(cq->channel->netdev,
@@ -631,16 +634,12 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                                break;
                        }
 
-                       if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
-                               sqcc += MLX5E_UMR_WQEBBS;
+                       if (likely(wi->opcode == MLX5_OPCODE_UMR))
                                wi->umr.rq->mpwqe.umr_completed++;
-                       } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
-                               sqcc++;
-                       } else {
+                       else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
                                netdev_WARN_ONCE(cq->channel->netdev,
                                                 "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
                                                 wi->opcode);
-                       }
 
                } while (!last_wqe);
 
index 74091f7..ec5fc52 100644 (file)
@@ -2476,10 +2476,11 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
                        continue;
 
                if (f->field_bsize == 32) {
-                       mask_be32 = *(__be32 *)&mask;
+                       mask_be32 = (__be32)mask;
                        mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
                } else if (f->field_bsize == 16) {
-                       mask_be16 = *(__be16 *)&mask;
+                       mask_be32 = (__be32)mask;
+                       mask_be16 = *(__be16 *)&mask_be32;
                        mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
                }
 
index 257a7c9..800d34e 100644 (file)
@@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
        sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+       sq->db.ico_wqe[pi].num_wqebbs = 1;
        nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
 }
index 8e19f6a..93052b0 100644 (file)
@@ -615,8 +615,10 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
                        break;
 
        if (i == MLX5_MAX_PORTS) {
-               if (ldev->nb.notifier_call)
+               if (ldev->nb.notifier_call) {
                        unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+                       ldev->nb.notifier_call = NULL;
+               }
                mlx5_lag_mp_cleanup(ldev);
                cancel_delayed_work_sync(&ldev->bond_work);
                mlx5_lag_dev_free(ldev);
index 6dec2a5..2d93228 100644 (file)
@@ -933,7 +933,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
 
        action->rewrite.data = (void *)ops;
        action->rewrite.num_of_actions = i;
-       action->rewrite.chunk->byte_size = i * sizeof(*ops);
 
        ret = mlx5dr_send_postsend_action(dmn, action);
        if (ret) {
index c7f10d4..095ec7b 100644 (file)
@@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
        int ret;
 
        send_info.write.addr = (uintptr_t)action->rewrite.data;
-       send_info.write.length = action->rewrite.chunk->byte_size;
+       send_info.write.length = action->rewrite.num_of_actions *
+                                DR_MODIFY_ACTION_SIZE;
        send_info.write.lkey = 0;
        send_info.remote_addr = action->rewrite.chunk->mr_addr;
        send_info.rkey = action->rewrite.chunk->rkey;
index 1faac31..23f879d 100644 (file)
@@ -1071,6 +1071,9 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
                MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
        if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
                MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
+                req->cap_mask1_perm);
        err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
 ex:
        kfree(in);
index 914c33e..e9ded1a 100644 (file)
@@ -1322,36 +1322,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
                            mbox->mapaddr);
 }
 
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
-                             const struct pci_device_id *id)
+static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
+                                   const struct pci_device_id *id,
+                                   u32 *p_sys_status)
 {
        unsigned long end;
-       char mrsr_pl[MLXSW_REG_MRSR_LEN];
-       int err;
+       u32 val;
 
-       mlxsw_reg_mrsr_pack(mrsr_pl);
-       err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
-       if (err)
-               return err;
        if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
                msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
                return 0;
        }
 
-       /* We must wait for the HW to become responsive once again. */
+       /* We must wait for the HW to become responsive. */
        msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
 
        end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
        do {
-               u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
-
+               val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
                if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
                        return 0;
                cond_resched();
        } while (time_before(jiffies, end));
+
+       *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
+
        return -EBUSY;
 }
 
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+                             const struct pci_device_id *id)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       char mrsr_pl[MLXSW_REG_MRSR_LEN];
+       u32 sys_status;
+       int err;
+
+       err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
+                       sys_status);
+               return err;
+       }
+
+       mlxsw_reg_mrsr_pack(mrsr_pl);
+       err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
+       if (err)
+               return err;
+
+       err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
+                       sys_status);
+               return err;
+       }
+
+       return 0;
+}
+
 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
 {
        int err;
index dd66851..e05d1d1 100644 (file)
@@ -3572,7 +3572,7 @@ MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
  * When in bytes mode, value is specified in units of 1000bps.
  * Access: RW
  */
-MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 31);
 
 /* reg_qeec_de
  * DWRR configuration enable. Enables configuration of the dwrr and
index 5427562..336e5ec 100644 (file)
@@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
        return 0;
 
 err_erif_unresolve:
-       list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
-                                        vif_node)
+       list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
+                                            vif_node)
                mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
 err_irif_unresolve:
-       list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
-                                        vif_node)
+       list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
+                                            vif_node)
                mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
        mr_vif->rif = NULL;
        return err;
index 58579ba..45cc840 100644 (file)
@@ -156,6 +156,50 @@ static int msg_enable;
  * chip is busy transferring packet data (RX/TX FIFO accesses).
  */
 
+/**
+ * ks_check_endian - Check whether endianness of the bus is correct
+ * @ks   : The chip information
+ *
+ * The KS8851-16MLL EESK pin allows selecting the endianness of the 16bit
+ * bus. To maintain optimum performance, the bus endianness should be set
+ * such that it matches the endianness of the CPU.
+ */
+
+static int ks_check_endian(struct ks_net *ks)
+{
+       u16 cider;
+
+       /*
+        * Read CIDER register first, however read it the "wrong" way around.
+        * If the endian strap on the KS8851-16MLL in incorrect and the chip
+        * is operating in different endianness than the CPU, then the meaning
+        * of BE[3:0] byte-enable bits is also swapped such that:
+        *    BE[3,2,1,0] becomes BE[1,0,3,2]
+        *
+        * Luckily for us, the byte-enable bits are the top four MSbits of
+        * the address register and the CIDER register is at offset 0xc0.
+        * Hence, by reading address 0xc0c0, which is not impacted by endian
+        * swapping, we assert either BE[3:2] or BE[1:0] while reading the
+        * CIDER register.
+        *
+        * If the bus configuration is correct, reading 0xc0c0 asserts
+        * BE[3:2] and this read returns 0x0000, because to read register
+        * with bottom two LSbits of address set to 0, BE[1:0] must be
+        * asserted.
+        *
+        * If the bus configuration is NOT correct, reading 0xc0c0 asserts
+        * BE[1:0] and this read returns non-zero 0x8872 value.
+        */
+       iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
+       cider = ioread16(ks->hw_addr);
+       if (!cider)
+               return 0;
+
+       netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
+
+       return -EINVAL;
+}
+
 /**
  * ks_rdreg16 - read 16 bit register from device
  * @ks   : The chip information
@@ -166,7 +210,7 @@ static int msg_enable;
 
 static u16 ks_rdreg16(struct ks_net *ks, int offset)
 {
-       ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
        iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
        return ioread16(ks->hw_addr);
 }
@@ -181,7 +225,7 @@ static u16 ks_rdreg16(struct ks_net *ks, int offset)
 
 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
 {
-       ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
+       ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
        iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
        iowrite16(value, ks->hw_addr);
 }
@@ -197,7 +241,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
 {
        len >>= 1;
        while (len--)
-               *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
+               *wptr++ = (u16)ioread16(ks->hw_addr);
 }
 
 /**
@@ -211,7 +255,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
 {
        len >>= 1;
        while (len--)
-               iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
+               iowrite16(*wptr++, ks->hw_addr);
 }
 
 static void ks_disable_int(struct ks_net *ks)
@@ -1218,6 +1262,10 @@ static int ks8851_probe(struct platform_device *pdev)
                goto err_free;
        }
 
+       err = ks_check_endian(ks);
+       if (err)
+               goto err_free;
+
        netdev->irq = platform_get_irq(pdev, 0);
 
        if ((int)netdev->irq < 0) {
index e678ba3..628fa9b 100644 (file)
@@ -2045,7 +2045,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
        if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
            (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
                if ((mask & VXGE_DEBUG_MASK) == mask)                          \
-                       printk(fmt "\n", __VA_ARGS__);                         \
+                       printk(fmt "\n", ##__VA_ARGS__);                       \
 } while (0)
 #else
 #define vxge_debug_ll(level, mask, fmt, ...)
index 59a57ff..9c86f4f 100644 (file)
@@ -452,49 +452,49 @@ int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
 
 #if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
 #define vxge_debug_ll_config(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_ll_config(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
 #define vxge_debug_init(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_init(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
 #define vxge_debug_tx(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_tx(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
 #define vxge_debug_rx(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_rx(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
 #define vxge_debug_mem(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_mem(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
 #define vxge_debug_entryexit(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_entryexit(level, fmt, ...)
 #endif
 
 #if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
 #define vxge_debug_intr(level, fmt, ...) \
-       vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
+       vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, ##__VA_ARGS__)
 #else
 #define vxge_debug_intr(level, fmt, ...)
 #endif
index b454db2..684e4e0 100644 (file)
@@ -616,7 +616,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
        if (bar->iomem) {
                int pf;
 
-               msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
+               msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, ");
                atomic_inc(&bar->refcnt);
                bars_free--;
 
@@ -661,7 +661,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
 
        /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
        bar = &nfp->bar[1];
-       msg += snprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
+       msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, ");
        atomic_inc(&bar->refcnt);
        bars_free--;
 
@@ -680,8 +680,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
                bar->iomem = ioremap(nfp_bar_resource_start(bar),
                                             nfp_bar_resource_len(bar));
                if (bar->iomem) {
-                       msg += snprintf(msg, end - msg,
-                                       "0.%d: Explicit%d, ", 4 + i, i);
+                       msg += scnprintf(msg, end - msg,
+                                        "0.%d: Explicit%d, ", 4 + i, i);
                        atomic_inc(&bar->refcnt);
                        bars_free--;
 
index 54547d5..51adf50 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
 /* Copyright (c) 2017-2019 Pensando Systems, Inc.  All rights reserved. */
 
 #ifndef _IONIC_IF_H_
index c2f5b69..938e19e 100644 (file)
@@ -948,18 +948,18 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
        int i;
 #define REMAIN(__x) (sizeof(buf) - (__x))
 
-       i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
-                    lif->rx_mode, rx_mode);
+       i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
+                     lif->rx_mode, rx_mode);
        if (rx_mode & IONIC_RX_MODE_F_UNICAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
        if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
        if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
        if (rx_mode & IONIC_RX_MODE_F_PROMISC)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
        if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
-               i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
+               i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
        netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
 
        err = ionic_adminq_post_wait(lif, &ctx);
index 03ee5a3..2e174f4 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB OR BSD-2-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
 /* Copyright (c) 2018-2019 Pensando Systems, Inc.  All rights reserved. */
 
 #ifndef IONIC_REGS_H
index 07f9067..cda5b0a 100644 (file)
@@ -1720,7 +1720,7 @@ static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_d
 
        ahw->reset.seq_error = 0;
        ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
-       if (p_dev->ahw->reset.buff == NULL)
+       if (ahw->reset.buff == NULL)
                return -ENOMEM;
 
        p_buff = p_dev->ahw->reset.buff;
index a2168a1..791d99b 100644 (file)
@@ -5194,7 +5194,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
                rtl_lock_config_regs(tp);
                /* fall through */
-       case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
                flags = PCI_IRQ_LEGACY;
                break;
        default:
@@ -5285,6 +5285,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
        if (!tp->phydev) {
                mdiobus_unregister(new_bus);
                return -ENODEV;
+       } else if (!tp->phydev->drv) {
+               /* Most chip versions fail with the genphy driver.
+                * Therefore ensure that the dedicated PHY driver is loaded.
+                */
+               dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
+               mdiobus_unregister(new_bus);
+               return -EUNATCH;
        }
 
        /* PHY will be woken up in rtl_open() */
@@ -5446,15 +5453,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        int chipset, region;
        int jumbo_max, rc;
 
-       /* Some tools for creating an initramfs don't consider softdeps, then
-        * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
-        * PHY driver is used that doesn't work with most chip versions.
-        */
-       if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
-               dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
-               return -ENOENT;
-       }
-
        dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
        if (!dev)
                return -ENOMEM;
index 2713300..15c731d 100644 (file)
@@ -212,12 +212,14 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
                 * progress on a NIC at any one time.  So no need for locking.
                 */
                for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr[i].u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x",
+                                          le32_to_cpu(hdr[i].u32[0]));
 
                for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(inbuf[i].u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x",
+                                          le32_to_cpu(inbuf[i].u32[0]));
 
                netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
        }
@@ -302,15 +304,15 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
                 */
                for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
                        efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr.u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x", le32_to_cpu(hdr.u32[0]));
                }
 
                for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
                        efx->type->mcdi_read_response(efx, &hdr,
                                        mcdi->resp_hdr_len + (i * 4), 4);
-                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
-                                         " %08x", le32_to_cpu(hdr.u32[0]));
+                       bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+                                          " %08x", le32_to_cpu(hdr.u32[0]));
                }
 
                netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
@@ -1417,9 +1419,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
        }
 
        ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
-       offset = snprintf(buf, len, "%u.%u.%u.%u",
-                         le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
-                         le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+       offset = scnprintf(buf, len, "%u.%u.%u.%u",
+                          le16_to_cpu(ver_words[0]),
+                          le16_to_cpu(ver_words[1]),
+                          le16_to_cpu(ver_words[2]),
+                          le16_to_cpu(ver_words[3]));
 
        /* EF10 may have multiple datapath firmware variants within a
         * single version.  Report which variants are running.
@@ -1427,9 +1431,9 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
        if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
                struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
-               offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
-                                  nic_data->rx_dpcpu_fw_id,
-                                  nic_data->tx_dpcpu_fw_id);
+               offset += scnprintf(buf + offset, len - offset, " rx%x tx%x",
+                                   nic_data->rx_dpcpu_fw_id,
+                                   nic_data->tx_dpcpu_fw_id);
 
                /* It's theoretically possible for the string to exceed 31
                 * characters, though in practice the first three version
index dc50ba1..2d5573b 100644 (file)
@@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
 
        ret = rk_gmac_clk_init(plat_dat);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = rk_gmac_powerup(plat_dat->bsp_priv);
        if (ret)
index d10ac54..13fafd9 100644 (file)
@@ -663,16 +663,22 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
         * In case the wake up interrupt is not passed from the platform
         * so the driver will continue to use the mac irq (ndev->irq)
         */
-       stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       stmmac_res->wol_irq =
+               platform_get_irq_byname_optional(pdev, "eth_wake_irq");
        if (stmmac_res->wol_irq < 0) {
                if (stmmac_res->wol_irq == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
+               dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
                stmmac_res->wol_irq = stmmac_res->irq;
        }
 
-       stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (stmmac_res->lpi_irq == -EPROBE_DEFER)
-               return -EPROBE_DEFER;
+       stmmac_res->lpi_irq =
+               platform_get_irq_byname_optional(pdev, "eth_lpi");
+       if (stmmac_res->lpi_irq < 0) {
+               if (stmmac_res->lpi_irq == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
index 75757e9..09f279c 100644 (file)
@@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
                if (!net_eq(dev_net(geneve->dev), net))
                        unregister_netdevice_queue(geneve->dev, head);
        }
-
-       WARN_ON_ONCE(!list_empty(&gn->sock_list));
 }
 
 static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
@@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
        /* unregister the devices gathered above */
        unregister_netdevice_many(&list);
        rtnl_unlock();
+
+       list_for_each_entry(net, net_list, exit_list) {
+               const struct geneve_net *gn = net_generic(net, geneve_net_id);
+
+               WARN_ON_ONCE(!list_empty(&gn->sock_list));
+       }
 }
 
 static struct pernet_operations geneve_net_ops = {
index 242b9b0..7fe306e 100644 (file)
@@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
        }
 
        while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
-               skb->tc_redirected = 0;
+               skb->redirected = 0;
                skb->tc_skip_classify = 1;
 
                u64_stats_update_begin(&txp->tsync);
@@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
                rcu_read_unlock();
                skb->skb_iif = txp->dev->ifindex;
 
-               if (!skb->tc_from_ingress) {
+               if (!skb->from_ingress) {
                        dev_queue_xmit(skb);
                } else {
                        skb_pull_rcsum(skb, skb->mac_len);
@@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
        txp->rx_bytes += skb->len;
        u64_stats_update_end(&txp->rsync);
 
-       if (!skb->tc_redirected || !skb->skb_iif) {
+       if (!skb->redirected || !skb->skb_iif) {
                dev_kfree_skb(skb);
                dev->stats.rx_dropped++;
                return NETDEV_TX_OK;
index 6ec6fc1..92bc2b2 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/gro_cells.h>
 #include <net/macsec.h>
 #include <linux/phy.h>
+#include <linux/if_arp.h>
 
 #include <uapi/linux/if_macsec.h>
 
@@ -3665,6 +3666,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
+       if (real_dev->type != ARPHRD_ETHER)
+               return -EINVAL;
 
        dev->priv_flags |= IFF_MACSEC;
 
index e27fc1a..3811f1b 100644 (file)
@@ -29,9 +29,9 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
                return -ENOMEM;
 
        p = buf;
-       p += snprintf(p, bufsize - (p - buf),
-                     "SA count=%u tx=%u\n",
-                     ipsec->count, ipsec->tx);
+       p += scnprintf(p, bufsize - (p - buf),
+                      "SA count=%u tx=%u\n",
+                      ipsec->count, ipsec->tx);
 
        for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
                struct nsim_sa *sap = &ipsec->sa[i];
@@ -39,18 +39,18 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
                if (!sap->used)
                        continue;
 
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
-                             i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
-                             sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
-                             i, be32_to_cpu(sap->xs->id.spi),
-                             sap->xs->id.proto, sap->salt, sap->crypt);
-               p += snprintf(p, bufsize - (p - buf),
-                             "sa[%i]    key=0x%08x %08x %08x %08x\n",
-                             i, sap->key[0], sap->key[1],
-                             sap->key[2], sap->key[3]);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+                              i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+                              sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i]    spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+                              i, be32_to_cpu(sap->xs->id.spi),
+                              sap->xs->id.proto, sap->salt, sap->crypt);
+               p += scnprintf(p, bufsize - (p - buf),
+                              "sa[%i]    key=0x%08x %08x %08x %08x\n",
+                              i, sap->key[0], sap->key[1],
+                              sap->key[2], sap->key[3]);
        }
 
        len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
index 967f57e..9a07ad1 100644 (file)
@@ -28,7 +28,8 @@
 #define DP83867_CTRL           0x1f
 
 /* Extended Registers */
-#define DP83867_CFG4            0x0031
+#define DP83867_FLD_THR_CFG    0x002e
+#define DP83867_CFG4           0x0031
 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
 #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS   (3 << 5)
 #define DP83867_CFG4_SGMII_ANEG_TIMER_800US  (2 << 5)
@@ -91,6 +92,7 @@
 #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK    GENMASK(2, 0)
 #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT   0
 #define DP83867_STRAP_STS2_CLK_SKEW_NONE       BIT(2)
+#define DP83867_STRAP_STS2_STRAP_FLD           BIT(10)
 
 /* PHY CTRL bits */
 #define DP83867_PHYCR_TX_FIFO_DEPTH_SHIFT      14
 /* CFG4 bits */
 #define DP83867_CFG4_PORT_MIRROR_EN              BIT(0)
 
+/* FLD_THR_CFG */
+#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK       0x7
+
 enum {
        DP83867_PORT_MIRROING_KEEP,
        DP83867_PORT_MIRROING_EN,
@@ -476,6 +481,20 @@ static int dp83867_config_init(struct phy_device *phydev)
                phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
                                   BIT(7));
 
+       bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
+       if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
+               /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
+                * be set to 0x2. This may causes the PHY link to be unstable -
+                * the default value 0x1 need to be restored.
+                */
+               ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
+                                    DP83867_FLD_THR_CFG,
+                                    DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
+                                    0x1);
+               if (ret)
+                       return ret;
+       }
+
        if (phy_interface_is_rgmii(phydev) ||
            phydev->interface == PHY_INTERFACE_MODE_SGMII) {
                val = phy_read(phydev, MII_DP83867_PHYCTRL);
index 4a28fb2..fbd3689 100644 (file)
@@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       priv->clk = devm_clk_get(&pdev->dev, NULL);
-       if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
+       priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk))
                return PTR_ERR(priv->clk);
-       else
-               priv->clk = NULL;
 
        ret = clk_prepare_enable(priv->clk);
        if (ret)
index 88d409e..aad6809 100644 (file)
@@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
 static int mdio_mux_iproc_resume(struct device *dev)
 {
        struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
+       int rc;
 
-       clk_prepare_enable(md->core_clk);
+       rc = clk_prepare_enable(md->core_clk);
+       if (rc) {
+               dev_err(md->dev, "failed to enable core clk\n");
+               return rc;
+       }
        mdio_mux_iproc_config(md);
 
        return 0;
index d949ea7..6900c68 100644 (file)
@@ -572,13 +572,15 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
  * the sfp_bus structure, incrementing its reference count.  This must
  * be put via sfp_bus_put() when done.
  *
- * Returns: on success, a pointer to the sfp_bus structure,
- *         %NULL if no SFP is specified,
- *         on failure, an error pointer value:
- *             corresponding to the errors detailed for
- *             fwnode_property_get_reference_args().
- *             %-ENOMEM if we failed to allocate the bus.
- *             an error from the upstream's connect_phy() method.
+ * Returns:
+ *         - on success, a pointer to the sfp_bus structure,
+ *         - %NULL if no SFP is specified,
+ *         - on failure, an error pointer value:
+ *
+ *           - corresponding to the errors detailed for
+ *             fwnode_property_get_reference_args().
+ *           - %-ENOMEM if we failed to allocate the bus.
+ *           - an error from the upstream's connect_phy() method.
  */
 struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
 {
@@ -612,13 +614,15 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
  * the SFP bus using sfp_register_upstream().  This takes a reference on the
  * bus, so it is safe to put the bus after this call.
  *
- * Returns: on success, a pointer to the sfp_bus structure,
- *         %NULL if no SFP is specified,
- *         on failure, an error pointer value:
- *             corresponding to the errors detailed for
- *             fwnode_property_get_reference_args().
- *             %-ENOMEM if we failed to allocate the bus.
- *             an error from the upstream's connect_phy() method.
+ * Returns:
+ *         - on success, a pointer to the sfp_bus structure,
+ *         - %NULL if no SFP is specified,
+ *         - on failure, an error pointer value:
+ *
+ *           - corresponding to the errors detailed for
+ *             fwnode_property_get_reference_args().
+ *           - %-ENOMEM if we failed to allocate the bus.
+ *           - an error from the upstream's connect_phy() method.
  */
 int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
                         const struct sfp_upstream_ops *ops)
index 5754bb6..6c738a2 100644 (file)
@@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1435, 0xd182, 5)},    /* Wistron NeWeb D18 */
        {QMI_FIXED_INTF(0x1435, 0xd191, 4)},    /* Wistron NeWeb D19Q1 */
        {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
+       {QMI_FIXED_INTF(0x1690, 0x7588, 4)},    /* ASKEY WWHC050 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
        {QMI_FIXED_INTF(0x16d8, 0x6007, 0)},    /* CMOTech CHE-628S */
        {QMI_FIXED_INTF(0x16d8, 0x6008, 0)},    /* CMOTech CMU-301 */
index d3b08b7..45308b3 100644 (file)
@@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
 /* Setup stats when device is created */
 static int vxlan_init(struct net_device *dev)
 {
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       int err;
+
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
+       err = gro_cells_init(&vxlan->gro_cells, dev);
+       if (err) {
+               free_percpu(dev->tstats);
+               return err;
+       }
+
        return 0;
 }
 
@@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
 
        vxlan->dev = dev;
 
-       gro_cells_init(&vxlan->gro_cells, dev);
-
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
                spin_lock_init(&vxlan->hash_lock[h]);
                INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
index cdc9696..3ac3f85 100644 (file)
@@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 mtu;
        int ret;
 
-       if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) {
+       if (unlikely(!wg_check_packet_protocol(skb))) {
                ret = -EPROTONOSUPPORT;
                net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
                goto err;
index bda2640..802099c 100644 (file)
@@ -411,11 +411,7 @@ static int set_peer(struct wg_device *wg, struct nlattr **attrs)
 
                peer = wg_peer_create(wg, public_key, preshared_key);
                if (IS_ERR(peer)) {
-                       /* Similar to the above, if the key is invalid, we skip
-                        * it without fanfare, so that services don't need to
-                        * worry about doing key validation themselves.
-                        */
-                       ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer);
+                       ret = PTR_ERR(peer);
                        peer = NULL;
                        goto out;
                }
@@ -569,7 +565,7 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
                                                         private_key);
                list_for_each_entry_safe(peer, temp, &wg->peer_list,
                                         peer_list) {
-                       BUG_ON(!wg_noise_precompute_static_static(peer));
+                       wg_noise_precompute_static_static(peer);
                        wg_noise_expire_current_peer_keypairs(peer);
                }
                wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
index 919d9d8..708dc61 100644 (file)
@@ -44,32 +44,23 @@ void __init wg_noise_init(void)
 }
 
 /* Must hold peer->handshake.static_identity->lock */
-bool wg_noise_precompute_static_static(struct wg_peer *peer)
+void wg_noise_precompute_static_static(struct wg_peer *peer)
 {
-       bool ret;
-
        down_write(&peer->handshake.lock);
-       if (peer->handshake.static_identity->has_identity) {
-               ret = curve25519(
-                       peer->handshake.precomputed_static_static,
+       if (!peer->handshake.static_identity->has_identity ||
+           !curve25519(peer->handshake.precomputed_static_static,
                        peer->handshake.static_identity->static_private,
-                       peer->handshake.remote_static);
-       } else {
-               u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 };
-
-               ret = curve25519(empty, empty, peer->handshake.remote_static);
+                       peer->handshake.remote_static))
                memset(peer->handshake.precomputed_static_static, 0,
                       NOISE_PUBLIC_KEY_LEN);
-       }
        up_write(&peer->handshake.lock);
-       return ret;
 }
 
-bool wg_noise_handshake_init(struct noise_handshake *handshake,
-                          struct noise_static_identity *static_identity,
-                          const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
-                          const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
-                          struct wg_peer *peer)
+void wg_noise_handshake_init(struct noise_handshake *handshake,
+                            struct noise_static_identity *static_identity,
+                            const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+                            const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+                            struct wg_peer *peer)
 {
        memset(handshake, 0, sizeof(*handshake));
        init_rwsem(&handshake->lock);
@@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct noise_handshake *handshake,
                       NOISE_SYMMETRIC_KEY_LEN);
        handshake->static_identity = static_identity;
        handshake->state = HANDSHAKE_ZEROED;
-       return wg_noise_precompute_static_static(peer);
+       wg_noise_precompute_static_static(peer);
 }
 
 static void handshake_zero(struct noise_handshake *handshake)
@@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
        return true;
 }
 
+static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
+                                           u8 key[NOISE_SYMMETRIC_KEY_LEN],
+                                           const u8 precomputed[NOISE_PUBLIC_KEY_LEN])
+{
+       static u8 zero_point[NOISE_PUBLIC_KEY_LEN];
+       if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN)))
+               return false;
+       kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN,
+           NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
+           chaining_key);
+       return true;
+}
+
 static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
 {
        struct blake2s_state blake;
@@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
                        NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
 
        /* ss */
-       kdf(handshake->chaining_key, key, NULL,
-           handshake->precomputed_static_static, NOISE_HASH_LEN,
-           NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
-           handshake->chaining_key);
+       if (!mix_precomputed_dh(handshake->chaining_key, key,
+                               handshake->precomputed_static_static))
+               goto out;
 
        /* {t} */
        tai64n_now(timestamp);
@@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
        handshake = &peer->handshake;
 
        /* ss */
-       kdf(chaining_key, key, NULL, handshake->precomputed_static_static,
-           NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
-           chaining_key);
+       if (!mix_precomputed_dh(chaining_key, key,
+                               handshake->precomputed_static_static))
+           goto out;
 
        /* {t} */
        if (!message_decrypt(t, src->encrypted_timestamp,
index 138a07b..f532d59 100644 (file)
@@ -94,11 +94,11 @@ struct noise_handshake {
 struct wg_device;
 
 void wg_noise_init(void);
-bool wg_noise_handshake_init(struct noise_handshake *handshake,
-                          struct noise_static_identity *static_identity,
-                          const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
-                          const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
-                          struct wg_peer *peer);
+void wg_noise_handshake_init(struct noise_handshake *handshake,
+                            struct noise_static_identity *static_identity,
+                            const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
+                            const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
+                            struct wg_peer *peer);
 void wg_noise_handshake_clear(struct noise_handshake *handshake);
 static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns)
 {
@@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer);
 void wg_noise_set_static_identity_private_key(
        struct noise_static_identity *static_identity,
        const u8 private_key[NOISE_PUBLIC_KEY_LEN]);
-bool wg_noise_precompute_static_static(struct wg_peer *peer);
+void wg_noise_precompute_static_static(struct wg_peer *peer);
 
 bool
 wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
index 071eedf..1d634bd 100644 (file)
@@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
                return ERR_PTR(ret);
        peer->device = wg;
 
-       if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
-                                    public_key, preshared_key, peer)) {
-               ret = -EKEYREJECTED;
-               goto err_1;
-       }
+       wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
+                               public_key, preshared_key, peer);
        if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
                goto err_1;
        if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
index fecb559..3432232 100644 (file)
@@ -66,7 +66,7 @@ struct packet_cb {
 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 
 /* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
+static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
 {
        if (skb_network_header(skb) >= skb->head &&
            (skb_network_header(skb) + sizeof(struct iphdr)) <=
@@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
        return 0;
 }
 
+static inline bool wg_check_packet_protocol(struct sk_buff *skb)
+{
+       __be16 real_protocol = wg_examine_packet_protocol(skb);
+       return real_protocol && skb->protocol == real_protocol;
+}
+
 static inline void wg_reset_packet(struct sk_buff *skb)
 {
        skb_scrub_packet(skb, true);
@@ -94,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb)
        skb->dev = NULL;
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
-       skb_reset_tc(skb);
 #endif
+       skb_reset_redirect(skb);
        skb->hdr_len = skb_headroom(skb);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
index 4a15389..da3b782 100644 (file)
@@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
        size_t data_offset, data_len, header_len;
        struct udphdr *udp;
 
-       if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol ||
+       if (unlikely(!wg_check_packet_protocol(skb) ||
                     skb_transport_header(skb) < skb->head ||
                     (skb_transport_header(skb) + sizeof(struct udphdr)) >
                             skb_tail_pointer(skb)))
@@ -388,7 +388,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
         */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->csum_level = ~0; /* All levels */
-       skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb);
+       skb->protocol = wg_examine_packet_protocol(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
@@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
                wg_packet_consume_data(wg, skb);
                break;
        default:
-               net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n",
-                                       wg->dev->name, skb);
+               WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
                goto err;
        }
        return;
index a22a830..355af47 100644 (file)
@@ -283,6 +283,7 @@ const struct iwl_cfg iwl_ax101_cfg_qu_c0_hr_b0 = {
         * HT size; mac80211 would otherwise pick the HE max (256) by default.
         */
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .tx_with_siso_diversity = true,
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
@@ -309,6 +310,7 @@ const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
         * HT size; mac80211 would otherwise pick the HE max (256) by default.
         */
        .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .tx_with_siso_diversity = true,
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
index 48d375a..ba2aff3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -491,13 +491,13 @@ int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
 }
 IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
 
-void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                     struct iwl_per_chain_offset_group *table)
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                    struct iwl_per_chain_offset_group *table)
 {
        int ret, i, j;
 
        if (!iwl_sar_geo_support(fwrt))
-               return;
+               return -EOPNOTSUPP;
 
        ret = iwl_sar_get_wgds_table(fwrt);
        if (ret < 0) {
@@ -505,7 +505,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
                                "Geo SAR BIOS table invalid or unavailable. (%d)\n",
                                ret);
                /* we don't fail if the table is not available */
-               return;
+               return -ENOENT;
        }
 
        BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
@@ -530,5 +530,7 @@ void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
                                        i, j, value[1], value[2], value[0]);
                }
        }
+
+       return 0;
 }
 IWL_EXPORT_SYMBOL(iwl_sar_geo_init);
index 4a6e826..5590e5c 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019        Intel Corporation
+ * Copyright(c) 2018 - 2020        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019       Intel Corporation
+ * Copyright(c) 2018 - 2020       Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -171,8 +171,9 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
 int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
                                 struct iwl_host_cmd *cmd);
 
-void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                     struct iwl_per_chain_offset_group *table);
+int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                    struct iwl_per_chain_offset_group *table);
+
 #else /* CONFIG_ACPI */
 
 static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
@@ -243,9 +244,10 @@ static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
        return -ENOENT;
 }
 
-static inline void iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
-                                   struct iwl_per_chain_offset_group *table)
+static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
+                                  struct iwl_per_chain_offset_group *table)
 {
+       return -ENOENT;
 }
 
 #endif /* CONFIG_ACPI */
index 91df1ee..8796ab8 100644 (file)
@@ -8,7 +8,7 @@
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1409,11 +1409,7 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
                goto out;
        }
 
-       /*
-        * region register have absolute value so apply rxf offset after
-        * reading the registers
-        */
-       offs += rxf_data.offset;
+       offs = rxf_data.offset;
 
        /* Lock fence */
        iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
@@ -2494,10 +2490,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
                goto out;
        }
 
-       if (iwl_fw_dbg_stop_restart_recording(fwrt, &params, true)) {
-               IWL_ERR(fwrt, "Failed to stop DBGC recording, aborting dump\n");
-               goto out;
-       }
+       iwl_fw_dbg_stop_restart_recording(fwrt, &params, true);
 
        IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
        if (iwl_trans_dbg_ini_valid(fwrt->trans))
@@ -2662,14 +2655,14 @@ static int iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
        return 0;
 }
 
-int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
-                                     struct iwl_fw_dbg_params *params,
-                                     bool stop)
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+                                      struct iwl_fw_dbg_params *params,
+                                      bool stop)
 {
        int ret = 0;
 
        if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
-               return 0;
+               return;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP))
@@ -2686,7 +2679,5 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
                        iwl_fw_set_dbg_rec_on(fwrt);
        }
 #endif
-
-       return ret;
 }
 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording);
index 179f290..9d35132 100644 (file)
@@ -239,9 +239,9 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
        _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev),         \
                                        iwl_fw_dbg_get_trigger((fwrt)->fw,\
                                                               (trig)))
-int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
-                                     struct iwl_fw_dbg_params *params,
-                                     bool stop);
+void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
+                                      struct iwl_fw_dbg_params *params,
+                                      bool stop);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
index 2d1cb46..0481796 100644 (file)
@@ -1467,7 +1467,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                kmemdup(pieces->dbg_conf_tlv[i],
                                        pieces->dbg_conf_tlv_len[i],
                                        GFP_KERNEL);
-                       if (!pieces->dbg_conf_tlv_len[i])
+                       if (!pieces->dbg_conf_tlv[i])
                                goto out_free_fw;
                }
        }
index 54c094e..98263cd 100644 (file)
@@ -762,10 +762,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
        union geo_tx_power_profiles_cmd cmd;
        u16 len;
+       int ret;
 
        cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
 
-       iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
+       ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
+       /*
+        * It is a valid scenario to not support SAR, or miss wgds table,
+        * but in that case there is no need to send the command.
+        */
+       if (ret)
+               return 0;
 
        cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
 
index e2cf9e0..ca99a9c 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,11 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
             (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
                flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
 
-       /* consider our LDPC support in case of HE */
+       /* consider LDPC support in case of HE */
+       if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] &
+           IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+               flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
        if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
            !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
             IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
@@ -191,11 +195,13 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
 {
        u16 supp;
        int i, highest_mcs;
+       u8 nss = sta->rx_nss;
 
-       for (i = 0; i < sta->rx_nss; i++) {
-               if (i == IWL_TLC_NSS_MAX)
-                       break;
+       /* the station support only a single receive chain */
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               nss = 1;
 
+       for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
                highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
                if (!highest_mcs)
                        continue;
@@ -241,8 +247,13 @@ rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
        u16 tx_mcs_160 =
                le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
        int i;
+       u8 nss = sta->rx_nss;
+
+       /* the station support only a single receive chain */
+       if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+               nss = 1;
 
-       for (i = 0; i < sta->rx_nss && i < IWL_TLC_NSS_MAX; i++) {
+       for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
                u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
                u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
                u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
@@ -303,8 +314,14 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
                cmd->mode = IWL_TLC_MNG_MODE_HT;
                cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_HT_BW_NONE_160] =
                        cpu_to_le16(ht_cap->mcs.rx_mask[0]);
-               cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
-                       cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+
+               /* the station support only a single receive chain */
+               if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+                               0;
+               else
+                       cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_HT_BW_NONE_160] =
+                               cpu_to_le16(ht_cap->mcs.rx_mask[1]);
        }
 }
 
index c0b420f..1babc4b 100644 (file)
@@ -785,7 +785,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                if (!le32_to_cpu(notif->status)) {
                        iwl_mvm_te_check_disconnect(mvm, vif,
                                                    "Session protection failure");
+                       spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
+                       spin_unlock_bh(&mvm->time_event_lock);
                }
 
                if (le32_to_cpu(notif->start)) {
@@ -801,7 +803,9 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
                         */
                        iwl_mvm_te_check_disconnect(mvm, vif,
                                                    "No beacon heard and the session protection is over already...");
+                       spin_lock_bh(&mvm->time_event_lock);
                        iwl_mvm_te_clear_data(mvm, te_data);
+                       spin_unlock_bh(&mvm->time_event_lock);
                }
 
                goto out_unlock;
index 97f227f..f441b20 100644 (file)
@@ -981,6 +981,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x2526, 0x0014, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x0018, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x001C, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x4010, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x4018, iwl9260_2ac_160_cfg, iwl9260_160_name),
+       IWL_DEV_INFO(0x2526, 0x401C, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x6010, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x6014, iwl9260_2ac_160_cfg, iwl9260_160_name),
        IWL_DEV_INFO(0x2526, 0x8014, iwl9260_2ac_160_cfg, iwl9260_160_name),
index 9177298..e17f70b 100644 (file)
@@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
         rxmcs == DESC92C_RATE11M)
 
 struct phy_status_rpt {
+       u8      padding[2];
        u8      ch_corr[2];
        u8      cck_sig_qual_ofdm_pwdb_all;
        u8      cck_agc_rpt_ofdm_cfosho_a;
index ed049c9..f140f7d 100644 (file)
@@ -6274,7 +6274,7 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
                                WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                                WIPHY_FLAG_HAS_CHANNEL_SWITCH |
-+                              WIPHY_FLAG_IBSS_RSN;
+                               WIPHY_FLAG_IBSS_RSN;
 
        wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
 
index 0cc9ac8..ed21231 100644 (file)
@@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
        const struct firmware *fw;
        struct sk_buff *skb;
        unsigned long len;
-       u8 max_size, payload_size;
+       int max_size, payload_size;
        int rc = 0;
 
        if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
@@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
 
        while (len) {
 
-               payload_size = min_t(unsigned long, (unsigned long) max_size,
-                                    len);
+               payload_size = min_t(unsigned long, max_size, len);
 
                skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
                                    GFP_KERNEL);
index 3e85c5c..0fe08c4 100644 (file)
@@ -850,9 +850,11 @@ out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
 out_free_async_qe:
-       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
-               sizeof(struct nvme_command), DMA_TO_DEVICE);
-       ctrl->async_event_sqe.data = NULL;
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
index af674fc..5bb5342 100644 (file)
@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
        return 1;
 }
 
-static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
        int ret;
@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
        while (cmd->cur_sg) {
                struct page *page = sg_page(cmd->cur_sg);
                u32 left = cmd->cur_sg->length - cmd->offset;
+               int flags = MSG_DONTWAIT;
+
+               if ((!last_in_batch && cmd->queue->send_list_len) ||
+                   cmd->wbytes_done + left < cmd->req.transfer_len ||
+                   queue->data_digest || !queue->nvme_sq.sqhd_disabled)
+                       flags |= MSG_MORE;
 
                ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
-                                       left, MSG_DONTWAIT | MSG_MORE);
+                                       left, flags);
                if (ret <= 0)
                        return ret;
 
@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
        }
 
        if (cmd->state == NVMET_TCP_SEND_DATA) {
-               ret = nvmet_try_send_data(cmd);
+               ret = nvmet_try_send_data(cmd, last_in_batch);
                if (ret <= 0)
                        goto done_send;
        }
index b520a98..7a94e11 100644 (file)
@@ -864,7 +864,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
                goto qc24_fail_command;
        }
 
-       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+       if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        ql_dbg(ql_dbg_io, vha, 0x3005,
@@ -946,7 +946,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
                goto qc24_fail_command;
        }
 
-       if (atomic_read(&fcport->state) != FCS_ONLINE) {
+       if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
                if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
                        atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
                        ql_dbg(ql_dbg_io, vha, 0x3077,
index 8ca9299..2710a0e 100644 (file)
@@ -3169,9 +3169,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
                q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
                rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
-       } else
+       } else {
+               q->limits.io_opt = 0;
                rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
                                      (sector_t)BLK_DEF_MAX_SECTORS);
+       }
 
        /* Do not exceed controller limit */
        rw_max = min(rw_max, queue_max_hw_sectors(q));
index 70014ec..7b642c3 100644 (file)
@@ -233,10 +233,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
                goto err_allocate_irqs;
        }
 
-       err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
-       if (err)
-               goto err_register_dpio_irq;
-
        priv->io = dpaa2_io_create(&desc, dev);
        if (!priv->io) {
                dev_err(dev, "dpaa2_io_create failed\n");
@@ -244,6 +240,10 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
                goto err_dpaa2_io_create;
        }
 
+       err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
+       if (err)
+               goto err_register_dpio_irq;
+
        dev_info(dev, "probed\n");
        dev_dbg(dev, "   receives_notifications = %d\n",
                desc.receives_notifications);
index 2dad496..8d4d050 100644 (file)
@@ -59,7 +59,7 @@ static int __init exynos_chipid_early_init(void)
        syscon = of_find_compatible_node(NULL, NULL,
                                         "samsung,exynos4210-chipid");
        if (!syscon)
-               return ENODEV;
+               return -ENODEV;
 
        regmap = device_node_to_regmap(syscon);
        of_node_put(syscon);
index 0026eb6..27b4cd7 100644 (file)
@@ -139,6 +139,9 @@ static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
        u32 index = get_session_index(session);
        struct amdtee_session *sess;
 
+       if (index >= TEE_NUM_SESSIONS)
+               return NULL;
+
        list_for_each_entry(sess, &ctxdata->sess_list, list_node)
                if (ta_handle == sess->ta_handle &&
                    test_bit(index, sess->sess_mask))
index ff3994a..6765949 100644 (file)
@@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call)
        call->buffer = NULL;
 }
 
+/*
+ * Abort a service call from within an action function.
+ */
+static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
+                                  const char *why)
+{
+       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+                               abort_code, error, why);
+       afs_set_call_complete(call, error, 0);
+}
+
 /*
  * The server supplied a list of callbacks that it wanted to break.
  */
@@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
                afs_send_empty_reply(call);
        else
-               rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                       1, 1, "K-1");
+               afs_abort_service_call(call, 1, 1, "K-1");
 
        afs_put_call(call);
        _leave("");
index cfe62b1..e1b9ed6 100644 (file)
@@ -145,6 +145,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
        read_lock(&server->fs_lock);
        ac.alist = rcu_dereference_protected(server->addresses,
                                             lockdep_is_held(&server->fs_lock));
+       afs_get_addrlist(ac.alist);
        read_unlock(&server->fs_lock);
 
        atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
@@ -163,6 +164,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
 
        if (!in_progress)
                afs_fs_probe_done(server);
+       afs_put_addrlist(ac.alist);
        return in_progress;
 }
 
index 35f951a..ef732dd 100644 (file)
@@ -154,7 +154,7 @@ struct afs_call {
        };
        unsigned char           unmarshall;     /* unmarshalling phase */
        unsigned char           addr_ix;        /* Address in ->alist */
-       bool                    incoming;       /* T if incoming call */
+       bool                    drop_ref;       /* T if need to drop ref for incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
        bool                    need_attention; /* T if RxRPC poked us */
        bool                    async;          /* T if asynchronous */
@@ -1209,8 +1209,16 @@ static inline void afs_set_call_complete(struct afs_call *call,
                ok = true;
        }
        spin_unlock_bh(&call->state_lock);
-       if (ok)
+       if (ok) {
                trace_afs_call_done(call);
+
+               /* Asynchronous calls have two refs to release - one from the alloc and
+                * one queued with the work item - and we can't just deallocate the
+                * call because the work item may be queued again.
+                */
+               if (call->drop_ref)
+                       afs_put_call(call);
+       }
 }
 
 /*
index 58d3965..1ecc67d 100644 (file)
@@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls;
 
 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
-static void afs_delete_async_call(struct work_struct *);
 static void afs_process_async_call(struct work_struct *);
 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call)
        int n = atomic_dec_return(&call->usage);
        int o = atomic_read(&net->nr_outstanding_calls);
 
-       trace_afs_call(call, afs_call_trace_put, n + 1, o,
+       trace_afs_call(call, afs_call_trace_put, n, o,
                       __builtin_return_address(0));
 
        ASSERTCMP(n, >=, 0);
@@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
        /* If the call is going to be asynchronous, we need an extra ref for
         * the call to hold itself so the caller need not hang on to its ref.
         */
-       if (call->async)
+       if (call->async) {
                afs_get_call(call, afs_call_trace_get);
+               call->drop_ref = true;
+       }
 
        /* create a call */
        rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
@@ -413,7 +414,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
                                          afs_wake_up_async_call :
                                          afs_wake_up_call_waiter),
                                         call->upgrade,
-                                        call->intr,
+                                        (call->intr ? RXRPC_PREINTERRUPTIBLE :
+                                         RXRPC_UNINTERRUPTIBLE),
                                         call->debug_id);
        if (IS_ERR(rxcall)) {
                ret = PTR_ERR(rxcall);
@@ -584,8 +586,6 @@ static void afs_deliver_to_call(struct afs_call *call)
 done:
        if (call->type->done)
                call->type->done(call);
-       if (state == AFS_CALL_COMPLETE && call->incoming)
-               afs_put_call(call);
 out:
        _leave("");
        return;
@@ -604,11 +604,7 @@ call_complete:
 long afs_wait_for_call_to_complete(struct afs_call *call,
                                   struct afs_addr_cursor *ac)
 {
-       signed long rtt2, timeout;
        long ret;
-       bool stalled = false;
-       u64 rtt;
-       u32 life, last_life;
        bool rxrpc_complete = false;
 
        DECLARE_WAITQUEUE(myself, current);
@@ -619,14 +615,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
        if (ret < 0)
                goto out;
 
-       rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
-       rtt2 = nsecs_to_jiffies64(rtt) * 2;
-       if (rtt2 < 2)
-               rtt2 = 2;
-
-       timeout = rtt2;
-       rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
-
        add_wait_queue(&call->waitq, &myself);
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
@@ -637,37 +625,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
                        call->need_attention = false;
                        __set_current_state(TASK_RUNNING);
                        afs_deliver_to_call(call);
-                       timeout = rtt2;
                        continue;
                }
 
                if (afs_check_call_state(call, AFS_CALL_COMPLETE))
                        break;
 
-               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
+               if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
                        /* rxrpc terminated the call. */
                        rxrpc_complete = true;
                        break;
                }
 
-               if (call->intr && timeout == 0 &&
-                   life == last_life && signal_pending(current)) {
-                       if (stalled)
-                               break;
-                       __set_current_state(TASK_RUNNING);
-                       rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
-                       timeout = rtt2;
-                       stalled = true;
-                       continue;
-               }
-
-               if (life != last_life) {
-                       timeout = rtt2;
-                       last_life = life;
-                       stalled = false;
-               }
-
-               timeout = schedule_timeout(timeout);
+               schedule();
        }
 
        remove_wait_queue(&call->waitq, &myself);
@@ -735,7 +705,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 
        u = atomic_fetch_add_unless(&call->usage, 1, 0);
        if (u != 0) {
-               trace_afs_call(call, afs_call_trace_wake, u,
+               trace_afs_call(call, afs_call_trace_wake, u + 1,
                               atomic_read(&call->net->nr_outstanding_calls),
                               __builtin_return_address(0));
 
@@ -744,21 +714,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
        }
 }
 
-/*
- * Delete an asynchronous call.  The work item carries a ref to the call struct
- * that we need to release.
- */
-static void afs_delete_async_call(struct work_struct *work)
-{
-       struct afs_call *call = container_of(work, struct afs_call, async_work);
-
-       _enter("");
-
-       afs_put_call(call);
-
-       _leave("");
-}
-
 /*
  * Perform I/O processing on an asynchronous call.  The work item carries a ref
  * to the call struct that we either need to release or to pass on.
@@ -774,16 +729,6 @@ static void afs_process_async_call(struct work_struct *work)
                afs_deliver_to_call(call);
        }
 
-       if (call->state == AFS_CALL_COMPLETE) {
-               /* We have two refs to release - one from the alloc and one
-                * queued with the work item - and we can't just deallocate the
-                * call because the work item may be queued again.
-                */
-               call->async_work.func = afs_delete_async_call;
-               if (!queue_work(afs_async_calls, &call->async_work))
-                       afs_put_call(call);
-       }
-
        afs_put_call(call);
        _leave("");
 }
@@ -810,6 +755,7 @@ void afs_charge_preallocation(struct work_struct *work)
                        if (!call)
                                break;
 
+                       call->drop_ref = true;
                        call->async = true;
                        call->state = AFS_CALL_SV_AWAIT_OP_ID;
                        init_waitqueue_head(&call->waitq);
index 404e050..7f09147 100644 (file)
@@ -856,9 +856,9 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
                                found_raid1c34 = true;
                        up_read(&sinfo->groups_sem);
                }
-               if (found_raid56)
+               if (!found_raid56)
                        btrfs_clear_fs_incompat(fs_info, RAID56);
-               if (found_raid1c34)
+               if (!found_raid1c34)
                        btrfs_clear_fs_incompat(fs_info, RAID1C34);
        }
 }
index 27076eb..d267eb5 100644 (file)
@@ -9496,6 +9496,10 @@ out_fail:
                ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
                if (ret)
                        commit_transaction = true;
+       } else if (sync_log) {
+               mutex_lock(&root->log_mutex);
+               list_del(&ctx.list);
+               mutex_unlock(&root->log_mutex);
        }
        if (commit_transaction) {
                ret = btrfs_commit_transaction(trans);
index 7e0190b..5a478cd 100644 (file)
@@ -1415,10 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_cap_flush *prealloc_cf;
        ssize_t count, written = 0;
        int err, want, got;
        bool direct_lock = false;
+       u32 map_flags;
+       u64 pool_flags;
        loff_t pos;
        loff_t limit = max(i_size_read(inode), fsc->max_file_size);
 
@@ -1481,8 +1484,12 @@ retry_snap:
                        goto out;
        }
 
-       /* FIXME: not complete since it doesn't account for being at quota */
-       if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
+       down_read(&osdc->lock);
+       map_flags = osdc->osdmap->flags;
+       pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
+       up_read(&osdc->lock);
+       if ((map_flags & CEPH_OSDMAP_FULL) ||
+           (pool_flags & CEPH_POOL_FLAG_FULL)) {
                err = -ENOSPC;
                goto out;
        }
@@ -1575,7 +1582,8 @@ retry_snap:
        }
 
        if (written >= 0) {
-               if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
+               if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
+                   (pool_flags & CEPH_POOL_FLAG_NEARFULL))
                        iocb->ki_flags |= IOCB_DSYNC;
                written = generic_write_sync(iocb, written);
        }
index ccfcc66..923be93 100644 (file)
@@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
                        pr_err("snapid map %llx -> %x still in use\n",
                               sm->snap, sm->dev);
                }
+               kfree(sm);
        }
 }
index b041b66..eee3c92 100644 (file)
@@ -1854,9 +1854,9 @@ fetch_events:
                waiter = true;
                init_waitqueue_entry(&wait, current);
 
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __add_wait_queue_exclusive(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        for (;;) {
@@ -1904,9 +1904,9 @@ send_events:
                goto fetch_events;
 
        if (waiter) {
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __remove_wait_queue(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        return res;
index a364e1a..c8a4e4c 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -540,9 +540,14 @@ static int alloc_fd(unsigned start, unsigned flags)
        return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
 }
 
+int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
+{
+       return __alloc_fd(current->files, 0, nofile, flags);
+}
+
 int get_unused_fd_flags(unsigned flags)
 {
-       return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
+       return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
 }
 EXPORT_SYMBOL(get_unused_fd_flags);
 
index 1b25172..3affd96 100644 (file)
@@ -343,6 +343,7 @@ struct io_accept {
        struct sockaddr __user          *addr;
        int __user                      *addr_len;
        int                             flags;
+       unsigned long                   nofile;
 };
 
 struct io_sync {
@@ -397,6 +398,7 @@ struct io_open {
        struct filename                 *filename;
        struct statx __user             *buffer;
        struct open_how                 how;
+       unsigned long                   nofile;
 };
 
 struct io_files_update {
@@ -2577,6 +2579,7 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2618,6 +2621,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2636,7 +2640,7 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
        if (ret)
                goto err;
 
-       ret = get_unused_fd_flags(req->open.how.flags);
+       ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
        if (ret < 0)
                goto err;
 
@@ -3321,6 +3325,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
+       accept->nofile = rlimit(RLIMIT_NOFILE);
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -3337,7 +3342,8 @@ static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 
        file_flags = force_nonblock ? O_NONBLOCK : 0;
        ret = __sys_accept4_file(req->file, file_flags, accept->addr,
-                                       accept->addr_len, accept->flags);
+                                       accept->addr_len, accept->flags,
+                                       accept->nofile);
        if (ret == -EAGAIN && force_nonblock)
                return -EAGAIN;
        if (ret == -ERESTARTSYS)
@@ -4131,6 +4137,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
 {
        ssize_t ret = 0;
 
+       if (!sqe)
+               return 0;
+
        if (io_op_defs[req->opcode].file_table) {
                ret = io_grab_files(req);
                if (unlikely(ret))
@@ -4907,6 +4916,11 @@ err_req:
                if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                        req->flags |= REQ_F_LINK;
                        INIT_LIST_HEAD(&req->link_list);
+
+                       if (io_alloc_async_ctx(req)) {
+                               ret = -EAGAIN;
+                               goto err_req;
+                       }
                        ret = io_req_defer_prep(req, sqe);
                        if (ret)
                                req->flags |= REQ_F_FAIL_LINK;
index 69aee3d..3ce9829 100644 (file)
@@ -178,7 +178,8 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
  * amount of readable data in the zone.
  */
 static loff_t zonefs_check_zone_condition(struct inode *inode,
-                                         struct blk_zone *zone, bool warn)
+                                         struct blk_zone *zone, bool warn,
+                                         bool mount)
 {
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
 
@@ -196,13 +197,26 @@ static loff_t zonefs_check_zone_condition(struct inode *inode,
                zone->wp = zone->start;
                return 0;
        case BLK_ZONE_COND_READONLY:
-               /* Do not allow writes in read-only zones */
+               /*
+                * The write pointer of read-only zones is invalid. If such a
+                * zone is found during mount, the file size cannot be retrieved
+                * so we treat the zone as offline (mount == true case).
+                * Otherwise, keep the file size as it was when last updated
+                * so that the user can recover data. In both cases, writes are
+                * always disabled for the zone.
+                */
                if (warn)
                        zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
                                    inode->i_ino);
                inode->i_flags |= S_IMMUTABLE;
+               if (mount) {
+                       zone->cond = BLK_ZONE_COND_OFFLINE;
+                       inode->i_mode &= ~0777;
+                       zone->wp = zone->start;
+                       return 0;
+               }
                inode->i_mode &= ~0222;
-               /* fallthrough */
+               return i_size_read(inode);
        default:
                if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
                        return zi->i_max_size;
@@ -231,7 +245,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
         * as there is no inconsistency between the inode size and the amount of
         * data writen in the zone (data_size).
         */
-       data_size = zonefs_check_zone_condition(inode, zone, true);
+       data_size = zonefs_check_zone_condition(inode, zone, true, false);
        isize = i_size_read(inode);
        if (zone->cond != BLK_ZONE_COND_OFFLINE &&
            zone->cond != BLK_ZONE_COND_READONLY &&
@@ -274,7 +288,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                if (zone->cond != BLK_ZONE_COND_OFFLINE) {
                        zone->cond = BLK_ZONE_COND_OFFLINE;
                        data_size = zonefs_check_zone_condition(inode, zone,
-                                                               false);
+                                                               false, false);
                }
        } else if (zone->cond == BLK_ZONE_COND_READONLY ||
                   sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
@@ -283,7 +297,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                if (zone->cond != BLK_ZONE_COND_READONLY) {
                        zone->cond = BLK_ZONE_COND_READONLY;
                        data_size = zonefs_check_zone_condition(inode, zone,
-                                                               false);
+                                                               false, false);
                }
        }
 
@@ -975,7 +989,7 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
        zi->i_zsector = zone->start;
        zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
                               zone->len << SECTOR_SHIFT);
-       zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true);
+       zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
 
        inode->i_uid = sbi->s_uid;
        inode->i_gid = sbi->s_gid;
index 49b1a70..212991f 100644 (file)
@@ -160,6 +160,7 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
 }
 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
                           bool lock_src);
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
 
 struct bpf_offload_dev;
 struct bpf_offloaded_map;
index c4458dc..76371aa 100644 (file)
@@ -175,9 +175,10 @@ struct ceph_msg_data {
 #endif /* CONFIG_BLOCK */
                struct ceph_bvec_iter   bvec_pos;
                struct {
-                       struct page     **pages;        /* NOT OWNER. */
+                       struct page     **pages;
                        size_t          length;         /* total # bytes */
                        unsigned int    alignment;      /* first page */
+                       bool            own_pages;
                };
                struct ceph_pagelist    *pagelist;
        };
@@ -356,8 +357,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
 extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
                                       unsigned long interval);
 
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-                               size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+                            size_t length, size_t alignment, bool own_pages);
 extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
                                struct ceph_pagelist *pagelist);
 #ifdef CONFIG_BLOCK
index e081b56..5e60197 100644 (file)
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
 #define CEPH_POOL_FLAG_HASHPSPOOL      (1ULL << 0) /* hash pg seed and pool id
                                                       together */
 #define CEPH_POOL_FLAG_FULL            (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA      (1ULL << 10) /* pool ran out of quota,
+                                                       will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL                (1ULL << 11) /* pool is nearfull */
 
 struct ceph_pg_pool_info {
        struct rb_node node;
@@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
 
 extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
 extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
 
 #endif
index 59bdfd4..88ed3c5 100644 (file)
@@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
 /*
  * osd map flag bits
  */
-#define CEPH_OSDMAP_NEARFULL (1<<0)  /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL     (1<<1)  /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0)  /* sync writes (near ENOSPC),
+                                       not set since ~luminous */
+#define CEPH_OSDMAP_FULL     (1<<1)  /* no data writes (ENOSPC),
+                                       not set since ~luminous */
 #define CEPH_OSDMAP_PAUSERD  (1<<2)  /* pause all reads */
 #define CEPH_OSDMAP_PAUSEWR  (1<<3)  /* pause all writes */
 #define CEPH_OSDMAP_PAUSEREC (1<<4)  /* pause recovery */
index 952ac03..bd1ee90 100644 (file)
@@ -522,9 +522,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
  * @clk_gate_flags: gate-specific flags for this clock
  * @lock: shared register lock for this clock
  */
-#define clk_hw_register_gate_parent_hw(dev, name, parent_name, flags, reg,    \
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg,      \
                                       bit_idx, clk_gate_flags, lock)         \
-       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
+       __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw),        \
                               NULL, (flags), (reg), (bit_idx),               \
                               (clk_gate_flags), (lock))
 /**
@@ -539,10 +539,10 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
  * @clk_gate_flags: gate-specific flags for this clock
  * @lock: shared register lock for this clock
  */
-#define clk_hw_register_gate_parent_data(dev, name, parent_name, flags, reg,  \
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg,  \
                                       bit_idx, clk_gate_flags, lock)         \
-       __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL,      \
-                              NULL, (flags), (reg), (bit_idx),               \
+       __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+                              (flags), (reg), (bit_idx),                     \
                               (clk_gate_flags), (lock))
 void clk_unregister_gate(struct clk *clk);
 void clk_hw_unregister_gate(struct clk_hw *hw);
index 0aa803c..c620d91 100644 (file)
@@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
 
 int dsa_8021q_rx_source_port(u16 vid);
 
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
-
 #else
 
 int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
@@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
        return 0;
 }
 
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
-       return NULL;
-}
-
 #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
 
 #endif /* _NET_DSA_8021Q_H */
index c6c7b24..142d102 100644 (file)
@@ -85,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
 extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
 extern bool get_close_on_exec(unsigned int fd);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
 extern int get_unused_fd_flags(unsigned flags);
 extern void put_unused_fd(unsigned int fd);
 
index f834687..f6b9421 100644 (file)
@@ -506,7 +506,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
  * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
  *   so e.g. PMICs can be accessed very late before shutdown. Optional.
  * @functionality: Return the flags that this algorithm/adapter pair supports
- *   from the I2C_FUNC_* flags.
+ *   from the ``I2C_FUNC_*`` flags.
  * @reg_slave: Register given client to I2C slave mode of this adapter
  * @unreg_slave: Unregister given client from I2C slave mode of this adapter
  *
@@ -515,7 +515,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
  * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
  * to name two of the most common.
  *
- * The return codes from the @master_xfer{_atomic} fields should indicate the
+ * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
  * type of error code that occurred during the transfer, as documented in the
  * Kernel Documentation file Documentation/i2c/fault-codes.rst.
  */
index 7d3f2ce..73c66a3 100644 (file)
@@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
 {
        struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
        u8 spr_len = sizeof(struct ieee80211_he_spr);
-       u32 he_spr_params;
+       u8 he_spr_params;
 
        /* Make sure the input is not NULL */
        if (!he_spr_ie)
                return 0;
 
        /* Calc required length */
-       he_spr_params = le32_to_cpu(he_spr->he_sr_control);
+       he_spr_params = he_spr->he_sr_control;
        if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
                spr_len++;
        if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
index 205fa7b..60739d0 100644 (file)
@@ -115,6 +115,19 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
 {
        u64 __cookie = cookie;
 
+       if (!extack)
+               return;
+       memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+       extack->cookie_len = sizeof(__cookie);
+}
+
+static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
+                                           u32 cookie)
+{
+       u32 __cookie = cookie;
+
+       if (!extack)
+               return;
        memcpy(extack->cookie, &__cookie, sizeof(__cookie));
        extack->cookie_len = sizeof(__cookie);
 }
index 1bf83c8..77de28b 100644 (file)
@@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
 
 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
 PAGEFLAG(Referenced, referenced, PF_HEAD)
        TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
        __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
index 5b50278..e596202 100644 (file)
@@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t;
  *     @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
  *     @tc_skip_classify: do not classify packet. set by IFB device
  *     @tc_at_ingress: used within tc_classify to distinguish in/egress
- *     @tc_redirected: packet was redirected by a tc action
- *     @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
+ *     @redirected: packet was redirected by packet classifier
+ *     @from_ingress: packet was redirected from the ingress path
  *     @peeked: this packet has been seen already, so stats have been
  *             done for it, don't do them again
  *     @nf_trace: netfilter packet trace flag
@@ -848,8 +848,10 @@ struct sk_buff {
 #ifdef CONFIG_NET_CLS_ACT
        __u8                    tc_skip_classify:1;
        __u8                    tc_at_ingress:1;
-       __u8                    tc_redirected:1;
-       __u8                    tc_from_ingress:1;
+#endif
+#ifdef CONFIG_NET_REDIRECT
+       __u8                    redirected:1;
+       __u8                    from_ingress:1;
 #endif
 #ifdef CONFIG_TLS_DEVICE
        __u8                    decrypted:1;
@@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
        return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
 }
 
+static inline bool skb_is_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+       return skb->redirected;
+#else
+       return false;
+#endif
+}
+
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
+{
+#ifdef CONFIG_NET_REDIRECT
+       skb->redirected = 1;
+       skb->from_ingress = from_ingress;
+       if (skb->from_ingress)
+               skb->tstamp = 0;
+#endif
+}
+
+static inline void skb_reset_redirect(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+       skb->redirected = 0;
+#endif
+}
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
index 2d23134..15f3412 100644 (file)
@@ -401,7 +401,8 @@ extern int __sys_sendto(int fd, void __user *buff, size_t len,
                        int addr_len);
 extern int __sys_accept4_file(struct file *file, unsigned file_flags,
                        struct sockaddr __user *upeer_sockaddr,
-                        int __user *upeer_addrlen, int flags);
+                        int __user *upeer_addrlen, int flags,
+                        unsigned long nofile);
 extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags);
 extern int __sys_socket(int family, int type, int protocol);
index ec38132..0507a16 100644 (file)
@@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
 
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                        unsigned long pgoff);
-void vmalloc_sync_all(void);
+void vmalloc_sync_mappings(void);
+void vmalloc_sync_unmappings(void);
+
 /*
  *     Lowlevel-APIs (not for driver use!)
  */
index 1abae3c..04e97ba 100644 (file)
@@ -16,6 +16,12 @@ struct sock;
 struct socket;
 struct rxrpc_call;
 
+enum rxrpc_interruptibility {
+       RXRPC_INTERRUPTIBLE,    /* Call is interruptible */
+       RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
+       RXRPC_UNINTERRUPTIBLE,  /* Call should not be interruptible at all */
+};
+
 /*
  * Debug ID counter for tracing.
  */
@@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
                                           gfp_t,
                                           rxrpc_notify_rx_t,
                                           bool,
-                                          bool,
+                                          enum rxrpc_interruptibility,
                                           unsigned int);
 int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
                           struct msghdr *, size_t,
@@ -58,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
                               rxrpc_user_attach_call_t, unsigned long, gfp_t,
                               unsigned int);
 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
-                            u32 *);
-void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
index 1512087..c30f914 100644 (file)
@@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
 int skb_do_redirect(struct sk_buff *);
 
-static inline void skb_reset_tc(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_redirected = 0;
-#endif
-}
-
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
-       return skb->tc_redirected;
-#else
-       return false;
-#endif
-}
-
 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_CLS_ACT
index 564ba1b..c612cab 100644 (file)
@@ -233,7 +233,7 @@ enum afs_cb_break_reason {
        EM(afs_call_trace_get,                  "GET  ") \
        EM(afs_call_trace_put,                  "PUT  ") \
        EM(afs_call_trace_wake,                 "WAKE ") \
-       E_(afs_call_trace_work,                 "WORK ")
+       E_(afs_call_trace_work,                 "QUEUE")
 
 #define afs_server_traces \
        EM(afs_server_trace_alloc,              "ALLOC    ") \
index 0f1db1c..6923dc7 100644 (file)
 /* Electronic privacy screen control */
 #define KEY_PRIVACY_SCREEN_TOGGLE      0x279
 
+/* Select an area of screen to be copied */
+#define KEY_SELECTIVE_SCREENSHOT       0x27a
+
 /*
  * Some keyboards have keys which do not have a defined meaning, these keys
  * are intended to be programmed / bound to macros by the user. For most
index 50e9919..ed2a96f 100644 (file)
@@ -9,7 +9,7 @@
 #ifndef _UAPI_SERIO_H
 #define _UAPI_SERIO_H
 
-
+#include <linux/const.h>
 #include <linux/ioctl.h>
 
 #define SPIOCSTYPE     _IOW('q', 0x01, unsigned long)
 /*
  * bit masks for use in "interrupt" flags (3rd argument)
  */
-#define SERIO_TIMEOUT  BIT(0)
-#define SERIO_PARITY   BIT(1)
-#define SERIO_FRAME    BIT(2)
-#define SERIO_OOB_DATA BIT(3)
+#define SERIO_TIMEOUT  _BITUL(0)
+#define SERIO_PARITY   _BITUL(1)
+#define SERIO_FRAME    _BITUL(2)
+#define SERIO_OOB_DATA _BITUL(3)
 
 /*
  * Serio types
index 042f955..68a89a9 100644 (file)
@@ -482,13 +482,21 @@ static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
        prev_state = cmpxchg(&st_map->kvalue.state,
                             BPF_STRUCT_OPS_STATE_INUSE,
                             BPF_STRUCT_OPS_STATE_TOBEFREE);
-       if (prev_state == BPF_STRUCT_OPS_STATE_INUSE) {
+       switch (prev_state) {
+       case BPF_STRUCT_OPS_STATE_INUSE:
                st_map->st_ops->unreg(&st_map->kvalue.data);
                if (refcount_dec_and_test(&st_map->kvalue.refcnt))
                        bpf_map_put(map);
+               return 0;
+       case BPF_STRUCT_OPS_STATE_TOBEFREE:
+               return -EINPROGRESS;
+       case BPF_STRUCT_OPS_STATE_INIT:
+               return -ENOENT;
+       default:
+               WARN_ON_ONCE(1);
+               /* Should never happen.  Treat it as not found. */
+               return -ENOENT;
        }
-
-       return 0;
 }
 
 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
index 7871400..7787bdc 100644 (file)
@@ -2418,7 +2418,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
 
        struct_size = struct_type->size;
        bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
-       if (struct_size - bytes_offset < sizeof(int)) {
+       if (struct_size - bytes_offset < member_type->size) {
                btf_verifier_log_member(env, struct_type, member,
                                        "Member exceeds struct_size");
                return -EINVAL;
@@ -4564,7 +4564,7 @@ int btf_get_info_by_fd(const struct btf *btf,
                       union bpf_attr __user *uattr)
 {
        struct bpf_btf_info __user *uinfo;
-       struct bpf_btf_info info = {};
+       struct bpf_btf_info info;
        u32 info_copy, btf_copy;
        void __user *ubtf;
        u32 uinfo_len;
@@ -4573,6 +4573,7 @@ int btf_get_info_by_fd(const struct btf *btf,
        uinfo_len = attr->info.info_len;
 
        info_copy = min_t(u32, uinfo_len, sizeof(info));
+       memset(&info, 0, sizeof(info));
        if (copy_from_user(&info, uinfo, info_copy))
                return -EFAULT;
 
index 9a500fa..4f14724 100644 (file)
@@ -227,6 +227,9 @@ cleanup:
        for (i = 0; i < NR; i++)
                bpf_prog_array_free(arrays[i]);
 
+       for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
+               cgroup_bpf_put(p);
+
        percpu_ref_exit(&cgrp->bpf.refcnt);
 
        return -ENOMEM;
@@ -302,8 +305,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
        u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
        struct list_head *progs = &cgrp->bpf.progs[type];
        struct bpf_prog *old_prog = NULL;
-       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
-               *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
+       struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
+       struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
        struct bpf_prog_list *pl, *replace_pl = NULL;
        enum bpf_cgroup_storage_type stype;
        int err;
index a91ad51..966b7b3 100644 (file)
@@ -696,14 +696,15 @@ int bpf_get_file_flag(int flags)
                   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
                   sizeof(attr->CMD##_LAST_FIELD)) != NULL
 
-/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes.
- * Return 0 on success and < 0 on error.
+/* dst and src must have at least "size" number of bytes.
+ * Return strlen on success and < 0 on error.
  */
-static int bpf_obj_name_cpy(char *dst, const char *src)
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
 {
-       const char *end = src + BPF_OBJ_NAME_LEN;
+       const char *end = src + size;
+       const char *orig_src = src;
 
-       memset(dst, 0, BPF_OBJ_NAME_LEN);
+       memset(dst, 0, size);
        /* Copy all isalnum(), '_' and '.' chars. */
        while (src < end && *src) {
                if (!isalnum(*src) &&
@@ -712,11 +713,11 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
                *dst++ = *src++;
        }
 
-       /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */
+       /* No '\0' found in "size" number of bytes */
        if (src == end)
                return -EINVAL;
 
-       return 0;
+       return src - orig_src;
 }
 
 int map_check_no_btf(const struct bpf_map *map,
@@ -810,8 +811,9 @@ static int map_create(union bpf_attr *attr)
        if (IS_ERR(map))
                return PTR_ERR(map);
 
-       err = bpf_obj_name_cpy(map->name, attr->map_name);
-       if (err)
+       err = bpf_obj_name_cpy(map->name, attr->map_name,
+                              sizeof(attr->map_name));
+       if (err < 0)
                goto free_map;
 
        atomic64_set(&map->refcnt, 1);
@@ -1510,6 +1512,11 @@ static int map_freeze(const union bpf_attr *attr)
        if (IS_ERR(map))
                return PTR_ERR(map);
 
+       if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
+               fdput(f);
+               return -ENOTSUPP;
+       }
+
        mutex_lock(&map->freeze_mutex);
 
        if (map->writecnt) {
@@ -2093,8 +2100,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
                goto free_prog;
 
        prog->aux->load_time = ktime_get_boottime_ns();
-       err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name);
-       if (err)
+       err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
+                              sizeof(attr->prog_name));
+       if (err < 0)
                goto free_prog;
 
        /* run eBPF verifier */
@@ -2787,7 +2795,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                                   union bpf_attr __user *uattr)
 {
        struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
-       struct bpf_prog_info info = {};
+       struct bpf_prog_info info;
        u32 info_len = attr->info.info_len;
        struct bpf_prog_stats stats;
        char __user *uinsns;
@@ -2799,6 +2807,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
 
+       memset(&info, 0, sizeof(info));
        if (copy_from_user(&info, uinfo, info_len))
                return -EFAULT;
 
@@ -3062,7 +3071,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
                                  union bpf_attr __user *uattr)
 {
        struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
-       struct bpf_map_info info = {};
+       struct bpf_map_info info;
        u32 info_len = attr->info.info_len;
        int err;
 
@@ -3071,6 +3080,7 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
                return err;
        info_len = min_t(u32, sizeof(info), info_len);
 
+       memset(&info, 0, sizeof(info));
        info.type = map->map_type;
        info.id = map->id;
        info.key_size = map->key_size;
@@ -3354,7 +3364,7 @@ err_put:
 
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
 {
-       union bpf_attr attr = {};
+       union bpf_attr attr;
        int err;
 
        if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
@@ -3366,6 +3376,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        size = min_t(u32, size, sizeof(attr));
 
        /* copy attributes from user space, may be less than sizeof(bpf_attr) */
+       memset(&attr, 0, sizeof(attr));
        if (copy_from_user(&attr, uattr, size) != 0)
                return -EFAULT;
 
index 7eee98c..fe40c65 100644 (file)
@@ -323,7 +323,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 
        if (desc->affinity_notify) {
                kref_get(&desc->affinity_notify->kref);
-               schedule_work(&desc->affinity_notify->work);
+               if (!schedule_work(&desc->affinity_notify->work)) {
+                       /* Work was already scheduled, drop our extra ref */
+                       kref_put(&desc->affinity_notify->kref,
+                                desc->affinity_notify->release);
+               }
        }
        irqd_set(data, IRQD_AFFINITY_SET);
 
@@ -423,7 +427,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
        if (old_notify) {
-               cancel_work_sync(&old_notify->work);
+               if (cancel_work_sync(&old_notify->work)) {
+                       /* Pending work had a ref, put that one too */
+                       kref_put(&old_notify->kref, old_notify->release);
+               }
                kref_put(&old_notify->kref, old_notify->release);
        }
 
index 63d7501..5989bbb 100644 (file)
@@ -519,7 +519,7 @@ NOKPROBE_SYMBOL(notify_die);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
        return atomic_notifier_chain_register(&die_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_die_notifier);
index 19e793a..68250d4 100644 (file)
@@ -732,7 +732,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
        if (unlikely(!nmi_uaccess_okay()))
                return -EPERM;
 
-       if (in_nmi()) {
+       if (irqs_disabled()) {
                /* Do an early check on signal validity. Otherwise,
                 * the error is lost in deferred irq_work.
                 */
index c391a91..fa43ded 100644 (file)
@@ -9028,10 +9028,15 @@ bool __init chacha20poly1305_selftest(void)
             && total_len <= 1 << 10; ++total_len) {
                for (i = 0; i <= total_len; ++i) {
                        for (j = i; j <= total_len; ++j) {
+                               k = 0;
                                sg_init_table(sg_src, 3);
-                               sg_set_buf(&sg_src[0], input, i);
-                               sg_set_buf(&sg_src[1], input + i, j - i);
-                               sg_set_buf(&sg_src[2], input + j, total_len - j);
+                               if (i)
+                                       sg_set_buf(&sg_src[k++], input, i);
+                               if (j - i)
+                                       sg_set_buf(&sg_src[k++], input + i, j - i);
+                               if (total_len - j)
+                                       sg_set_buf(&sg_src[k++], input + j, total_len - j);
+                               sg_init_marker(sg_src, k);
                                memset(computed_output, 0, total_len);
                                memset(input, 0, total_len);
 
index 43b47d3..4bb30ed 100644 (file)
@@ -335,12 +335,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                }
 
                page = pmd_page(orig_pmd);
+
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       goto huge_unlock;
+
                if (next - addr != HPAGE_PMD_SIZE) {
                        int err;
 
-                       if (page_mapcount(page) != 1)
-                               goto huge_unlock;
-
                        get_page(page);
                        spin_unlock(ptl);
                        lock_page(page);
@@ -426,6 +428,10 @@ regular_page:
                        continue;
                }
 
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       continue;
+
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                if (pte_young(ptent)) {
index 2058b8d..7a4bd8b 100644 (file)
@@ -2297,28 +2297,41 @@ static void high_work_func(struct work_struct *work)
  #define MEMCG_DELAY_SCALING_SHIFT 14
 
 /*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Get the number of jiffies that we should penalise a mischievous cgroup which
+ * is exceeding its memory.high by checking both it and its ancestors.
  */
-void mem_cgroup_handle_over_high(void)
+static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
+                                         unsigned int nr_pages)
 {
-       unsigned long usage, high, clamped_high;
-       unsigned long pflags;
-       unsigned long penalty_jiffies, overage;
-       unsigned int nr_pages = current->memcg_nr_pages_over_high;
-       struct mem_cgroup *memcg;
+       unsigned long penalty_jiffies;
+       u64 max_overage = 0;
 
-       if (likely(!nr_pages))
-               return;
+       do {
+               unsigned long usage, high;
+               u64 overage;
 
-       memcg = get_mem_cgroup_from_mm(current->mm);
-       reclaim_high(memcg, nr_pages, GFP_KERNEL);
-       current->memcg_nr_pages_over_high = 0;
+               usage = page_counter_read(&memcg->memory);
+               high = READ_ONCE(memcg->high);
+
+               /*
+                * Prevent division by 0 in overage calculation by acting as if
+                * it was a threshold of 1 page
+                */
+               high = max(high, 1UL);
+
+               overage = usage - high;
+               overage <<= MEMCG_DELAY_PRECISION_SHIFT;
+               overage = div64_u64(overage, high);
+
+               if (overage > max_overage)
+                       max_overage = overage;
+       } while ((memcg = parent_mem_cgroup(memcg)) &&
+                !mem_cgroup_is_root(memcg));
+
+       if (!max_overage)
+               return 0;
 
        /*
-        * memory.high is breached and reclaim is unable to keep up. Throttle
-        * allocators proactively to slow down excessive growth.
-        *
         * We use overage compared to memory.high to calculate the number of
         * jiffies to sleep (penalty_jiffies). Ideally this value should be
         * fairly lenient on small overages, and increasingly harsh when the
@@ -2326,24 +2339,9 @@ void mem_cgroup_handle_over_high(void)
         * its crazy behaviour, so we exponentially increase the delay based on
         * overage amount.
         */
-
-       usage = page_counter_read(&memcg->memory);
-       high = READ_ONCE(memcg->high);
-
-       if (usage <= high)
-               goto out;
-
-       /*
-        * Prevent division by 0 in overage calculation by acting as if it was a
-        * threshold of 1 page
-        */
-       clamped_high = max(high, 1UL);
-
-       overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
-                         clamped_high);
-
-       penalty_jiffies = ((u64)overage * overage * HZ)
-               >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
+       penalty_jiffies = max_overage * max_overage * HZ;
+       penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
+       penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
 
        /*
         * Factor in the task's own contribution to the overage, such that four
@@ -2360,7 +2358,32 @@ void mem_cgroup_handle_over_high(void)
         * application moving forwards and also permit diagnostics, albeit
         * extremely slowly.
         */
-       penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+       return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+}
+
+/*
+ * Scheduled by try_charge() to be executed from the userland return path
+ * and reclaims memory over the high limit.
+ */
+void mem_cgroup_handle_over_high(void)
+{
+       unsigned long penalty_jiffies;
+       unsigned long pflags;
+       unsigned int nr_pages = current->memcg_nr_pages_over_high;
+       struct mem_cgroup *memcg;
+
+       if (likely(!nr_pages))
+               return;
+
+       memcg = get_mem_cgroup_from_mm(current->mm);
+       reclaim_high(memcg, nr_pages, GFP_KERNEL);
+       current->memcg_nr_pages_over_high = 0;
+
+       /*
+        * memory.high is breached and reclaim is unable to keep up. Throttle
+        * allocators proactively to slow down excessive growth.
+        */
+       penalty_jiffies = calculate_high_delay(memcg, nr_pages);
 
        /*
         * Don't sleep if the amount of jiffies this memcg owes us is so low
@@ -4027,7 +4050,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        struct mem_cgroup_thresholds *thresholds;
        struct mem_cgroup_threshold_ary *new;
        unsigned long usage;
-       int i, j, size;
+       int i, j, size, entries;
 
        mutex_lock(&memcg->thresholds_lock);
 
@@ -4047,14 +4070,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        __mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
        /* Calculate new number of threshold */
-       size = 0;
+       size = entries = 0;
        for (i = 0; i < thresholds->primary->size; i++) {
                if (thresholds->primary->entries[i].eventfd != eventfd)
                        size++;
+               else
+                       entries++;
        }
 
        new = thresholds->spare;
 
+       /* If no items related to eventfd have been cleared, nothing to do */
+       if (!entries)
+               goto unlock;
+
        /* Set thresholds array to NULL if we don't have thresholds */
        if (!size) {
                kfree(new);
index ef3973a..06852b8 100644 (file)
@@ -307,7 +307,8 @@ static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
         * ->release returns.
         */
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist)
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu))
                /*
                 * If ->release runs before mmu_notifier_unregister it must be
                 * handled, as it's the only way for the driver to flush all
@@ -370,7 +371,8 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_flush_young)
                        young |= subscription->ops->clear_flush_young(
                                subscription, mm, start, end);
@@ -389,7 +391,8 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_young)
                        young |= subscription->ops->clear_young(subscription,
                                                                mm, start, end);
@@ -407,7 +410,8 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->test_young) {
                        young = subscription->ops->test_young(subscription, mm,
                                                              address);
@@ -428,7 +432,8 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->change_pte)
                        subscription->ops->change_pte(subscription, mm, address,
                                                      pte);
@@ -476,7 +481,8 @@ static int mn_hlist_invalidate_range_start(
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                const struct mmu_notifier_ops *ops = subscription->ops;
 
                if (ops->invalidate_range_start) {
@@ -528,7 +534,8 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                /*
                 * Call invalidate_range here too to avoid the need for the
                 * subsystem of having to register an invalidate_range_end
@@ -582,7 +589,8 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->invalidate_range)
                        subscription->ops->invalidate_range(subscription, mm,
                                                            start, end);
@@ -714,7 +722,8 @@ find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 
        spin_lock(&mm->notifier_subscriptions->lock);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                lockdep_is_held(&mm->notifier_subscriptions->lock)) {
                if (subscription->ops != ops)
                        continue;
 
index bd2b4e5..318df4e 100644 (file)
@@ -370,10 +370,14 @@ void vm_unmap_aliases(void)
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
+ * chose not to have one.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
+{
+}
+
+void __weak vmalloc_sync_unmappings(void)
 {
 }
 
index 97580b4..6589b41 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,8 +1973,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 
        if (node == NUMA_NO_NODE)
                searchnode = numa_mem_id();
-       else if (!node_present_pages(node))
-               searchnode = node_to_mem_node(node);
 
        object = get_partial_node(s, get_node(s, searchnode), c, flags);
        if (object || node != NUMA_NO_NODE)
@@ -2563,17 +2561,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        struct page *page;
 
        page = c->page;
-       if (!page)
+       if (!page) {
+               /*
+                * if the node is not online or has no normal memory, just
+                * ignore the node constraint
+                */
+               if (unlikely(node != NUMA_NO_NODE &&
+                            !node_state(node, N_NORMAL_MEMORY)))
+                       node = NUMA_NO_NODE;
                goto new_slab;
+       }
 redo:
 
        if (unlikely(!node_match(page, node))) {
-               int searchnode = node;
-
-               if (node != NUMA_NO_NODE && !node_present_pages(node))
-                       searchnode = node_to_mem_node(node);
-
-               if (unlikely(!node_match(page, searchnode))) {
+               /*
+                * same as above but node_match() being false already
+                * implies node != NUMA_NO_NODE
+                */
+               if (!node_state(node, N_NORMAL_MEMORY)) {
+                       node = NUMA_NO_NODE;
+                       goto redo;
+               } else {
                        stat(s, ALLOC_NODE_MISMATCH);
                        deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
index 596b2a4..aadb729 100644 (file)
@@ -734,6 +734,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
        struct mem_section *ms = __pfn_to_section(pfn);
        bool section_is_early = early_section(ms);
        struct page *memmap = NULL;
+       bool empty;
        unsigned long *subsection_map = ms->usage
                ? &ms->usage->subsection_map[0] : NULL;
 
@@ -764,7 +765,8 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
         * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
         */
        bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
-       if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
+       empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
+       if (empty) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
                /*
@@ -779,13 +781,15 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
                        ms->usage = NULL;
                }
                memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-               ms->section_mem_map = (unsigned long)NULL;
        }
 
        if (section_is_early && memmap)
                free_map_bootmem(memmap);
        else
                depopulate_section_memmap(pfn, nr_pages, altmap);
+
+       if (empty)
+               ms->section_mem_map = (unsigned long)NULL;
 }
 
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
index 1f46c3b..6b8eeb0 100644 (file)
@@ -1295,7 +1295,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
         * First make sure the mappings are removed from all page-tables
         * before they are freed.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_unmappings();
 
        /*
         * TODO: to calculate a flush range without looping.
@@ -3128,16 +3128,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 EXPORT_SYMBOL(remap_vmalloc_range);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
+ * not to have one.
  *
  * The purpose of this function is to make sure the vmalloc area
  * mappings are identical in all page-tables in the system.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
 {
 }
 
+void __weak vmalloc_sync_unmappings(void)
+{
+}
 
 static int f(pte_t *pte, unsigned long addr, void *data)
 {
index 2eeb0e5..df8d8c9 100644 (file)
@@ -52,6 +52,9 @@ config NET_INGRESS
 config NET_EGRESS
        bool
 
+config NET_REDIRECT
+       bool
+
 config SKB_EXTENSIONS
        bool
 
index 77396a0..efea487 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/unistd.h>
 #include "msgfmt.h"
 
-int debug_fd;
+FILE *debug_f;
 
 static int handle_get_cmd(struct mbox_request *cmd)
 {
@@ -35,9 +35,10 @@ static void loop(void)
                struct mbox_reply reply;
                int n;
 
+               fprintf(debug_f, "testing the buffer\n");
                n = read(0, &req, sizeof(req));
                if (n != sizeof(req)) {
-                       dprintf(debug_fd, "invalid request %d\n", n);
+                       fprintf(debug_f, "invalid request %d\n", n);
                        return;
                }
 
@@ -47,7 +48,7 @@ static void loop(void)
 
                n = write(1, &reply, sizeof(reply));
                if (n != sizeof(reply)) {
-                       dprintf(debug_fd, "reply failed %d\n", n);
+                       fprintf(debug_f, "reply failed %d\n", n);
                        return;
                }
        }
@@ -55,9 +56,10 @@ static void loop(void)
 
 int main(void)
 {
-       debug_fd = open("/dev/kmsg", 00000002);
-       dprintf(debug_fd, "Started bpfilter\n");
+       debug_f = fopen("/dev/kmsg", "w");
+       setvbuf(debug_f, 0, _IOLBF, 0);
+       fprintf(debug_f, "Started bpfilter\n");
        loop();
-       close(debug_fd);
+       fclose(debug_f);
        return 0;
 }
index 5b4bd82..f8ca5ed 100644 (file)
@@ -3248,12 +3248,16 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
 
 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
 {
-       if (data->type == CEPH_MSG_DATA_PAGELIST)
+       if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
+               int num_pages = calc_pages_for(data->alignment, data->length);
+               ceph_release_page_vector(data->pages, num_pages);
+       } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
                ceph_pagelist_release(data->pagelist);
+       }
 }
 
 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-               size_t length, size_t alignment)
+                            size_t length, size_t alignment, bool own_pages)
 {
        struct ceph_msg_data *data;
 
@@ -3265,6 +3269,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
        data->pages = pages;
        data->length = length;
        data->alignment = alignment & ~PAGE_MASK;
+       data->own_pages = own_pages;
 
        msg->data_length += length;
 }
index b68b376..af868d3 100644 (file)
@@ -962,7 +962,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
                BUG_ON(length > (u64) SIZE_MAX);
                if (length)
                        ceph_msg_data_add_pages(msg, osd_data->pages,
-                                       length, osd_data->alignment);
+                                       length, osd_data->alignment, false);
        } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
                BUG_ON(!length);
                ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
@@ -4436,9 +4436,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
                                                        CEPH_MSG_DATA_PAGES);
                                        *lreq->preply_pages = data->pages;
                                        *lreq->preply_len = data->length;
-                               } else {
-                                       ceph_release_page_vector(data->pages,
-                                              calc_pages_for(0, data->length));
+                                       data->own_pages = false;
                                }
                        }
                        lreq->notify_finish_error = return_code;
@@ -5506,9 +5504,6 @@ out_unlock_osdc:
        return m;
 }
 
-/*
- * TODO: switch to a msg-owned pagelist
- */
 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 {
        struct ceph_msg *m;
@@ -5522,7 +5517,6 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
 
        if (data_len) {
                struct page **pages;
-               struct ceph_osd_data osd_data;
 
                pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
                                               GFP_NOIO);
@@ -5531,9 +5525,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
                        return NULL;
                }
 
-               ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
-                                        false);
-               ceph_osdc_msg_data_add(m, &osd_data);
+               ceph_msg_data_add_pages(m, pages, data_len, 0, true);
        }
 
        return m;
index 4e0de14..2a6e63a 100644 (file)
@@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
 }
 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
 
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
+{
+       struct ceph_pg_pool_info *pi;
+
+       pi = __lookup_pg_pool(&map->pg_pools, id);
+       return pi ? pi->flags : 0;
+}
+EXPORT_SYMBOL(ceph_pg_pool_flags);
+
 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
 {
        rb_erase(&pi->node, root);
index c6c985f..500bba8 100644 (file)
@@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
        /* Reinjected packets coming from act_mirred or similar should
         * not get XDP generic processing.
         */
-       if (skb_is_tc_redirected(skb))
+       if (skb_is_redirected(skb))
                return XDP_PASS;
 
        /* XDP packets must be linear and must have sufficient headroom
@@ -5063,7 +5063,7 @@ skip_taps:
                        goto out;
        }
 #endif
-       skb_reset_tc(skb);
+       skb_reset_redirect(skb);
 skip_classify:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
@@ -5195,7 +5195,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
  *
  *     More direct receive version of netif_receive_skb().  It should
  *     only be used by callers that have a need to skip RPS and Generic XDP.
- *     Caller must also take care of handling if (page_is_)pfmemalloc.
+ *     Caller must also take care of handling if ``(page_is_)pfmemalloc``.
  *
  *     This function may only be called from softirq context and interrupts
  *     should be enabled.
index acc849d..d0641bb 100644 (file)
@@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                        /* skb was 'freed' by stack, so clean few
                         * bits and reuse it
                         */
-                       skb_reset_tc(skb);
+                       skb_reset_redirect(skb);
                } while (--burst > 0);
                goto out; /* Skips xmit_mode M_START_XMIT */
        } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
index 085cef5..b70c844 100644 (file)
@@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map)
        struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
        int i;
 
+       /* After the sync no updates or deletes will be in-flight so it
+        * is safe to walk map and remove entries without risking a race
+        * in EEXIST update case.
+        */
        synchronize_rcu();
-       raw_spin_lock_bh(&stab->lock);
        for (i = 0; i < stab->map.max_entries; i++) {
                struct sock **psk = &stab->sks[i];
                struct sock *sk;
@@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map)
                        release_sock(sk);
                }
        }
-       raw_spin_unlock_bh(&stab->lock);
 
        /* wait for psock readers accessing its map link */
        synchronize_rcu();
@@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map)
        struct hlist_node *node;
        int i;
 
+       /* After the sync no updates or deletes will be in-flight so it
+        * is safe to walk map and remove entries without risking a race
+        * in EEXIST update case.
+        */
        synchronize_rcu();
        for (i = 0; i < htab->buckets_num; i++) {
                bucket = sock_hash_select_bucket(htab, i);
-               raw_spin_lock_bh(&bucket->lock);
                hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
                        hlist_del_rcu(&elem->node);
                        lock_sock(elem->sk);
@@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map)
                        rcu_read_unlock();
                        release_sock(elem->sk);
                }
-               raw_spin_unlock_bh(&bucket->lock);
        }
 
        /* wait for psock readers accessing its map link */
index 2fb6c26..b97ad93 100644 (file)
@@ -298,47 +298,4 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
 }
 EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
 
-/* In the DSA packet_type handler, skb->data points in the middle of the VLAN
- * tag, after tpid and before tci. This is because so far, ETH_HLEN
- * (DMAC, SMAC, EtherType) bytes were pulled.
- * There are 2 bytes of VLAN tag left in skb->data, and upper
- * layers expect the 'real' EtherType to be consumed as well.
- * Coincidentally, a VLAN header is also of the same size as
- * the number of bytes that need to be pulled.
- *
- * skb_mac_header                                      skb->data
- * |                                                       |
- * v                                                       v
- * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
- * +-----------------------+-----------------------+-------+-------+-------+
- * |    Destination MAC    |      Source MAC       |  TPID |  TCI  | EType |
- * +-----------------------+-----------------------+-------+-------+-------+
- * ^                                               |               |
- * |<--VLAN_HLEN-->to                              <---VLAN_HLEN--->
- * from            |
- *       >>>>>>>   v
- *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
- *       >>>>>>>   +-----------------------+-----------------------+-------+
- *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
- *                 +-----------------------+-----------------------+-------+
- *                 ^                                                       ^
- * (now part of    |                                                       |
- *  skb->head)     skb_mac_header                                  skb->data
- */
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
-       u8 *from = skb_mac_header(skb);
-       u8 *dest = from + VLAN_HLEN;
-
-       memmove(dest, from, ETH_HLEN - VLAN_HLEN);
-       skb_pull(skb, VLAN_HLEN);
-       skb_push(skb, ETH_HLEN);
-       skb_reset_mac_header(skb);
-       skb_reset_mac_len(skb);
-       skb_pull_rcsum(skb, ETH_HLEN);
-
-       return skb;
-}
-EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
-
 MODULE_LICENSE("GPL v2");
index 9c31141..9169b63 100644 (file)
@@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
        /* Remove Broadcom tag and update checksum */
        skb_pull_rcsum(skb, BRCM_TAG_LEN);
 
+       skb->offload_fwd_mark = 1;
+
        return skb;
 }
 #endif
index 5366ea4..d553bf3 100644 (file)
@@ -250,14 +250,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
 {
        struct sja1105_meta meta = {0};
        int source_port, switch_id;
-       struct vlan_ethhdr *hdr;
+       struct ethhdr *hdr;
        u16 tpid, vid, tci;
        bool is_link_local;
        bool is_tagged;
        bool is_meta;
 
-       hdr = vlan_eth_hdr(skb);
-       tpid = ntohs(hdr->h_vlan_proto);
+       hdr = eth_hdr(skb);
+       tpid = ntohs(hdr->h_proto);
        is_tagged = (tpid == ETH_P_SJA1105);
        is_link_local = sja1105_is_link_local(skb);
        is_meta = sja1105_is_meta_frame(skb);
@@ -266,7 +266,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
 
        if (is_tagged) {
                /* Normal traffic path. */
-               tci = ntohs(hdr->h_vlan_TCI);
+               skb_push_rcsum(skb, ETH_HLEN);
+               __skb_vlan_pop(skb, &tci);
+               skb_pull_rcsum(skb, ETH_HLEN);
+               skb_reset_network_header(skb);
+               skb_reset_transport_header(skb);
+
                vid = tci & VLAN_VID_MASK;
                source_port = dsa_8021q_rx_source_port(vid);
                switch_id = dsa_8021q_rx_switch_id(vid);
@@ -295,12 +300,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
                return NULL;
        }
 
-       /* Delete/overwrite fake VLAN header, DSA expects to not find
-        * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
-        */
-       if (is_tagged)
-               skb = dsa_8021q_remove_header(skb);
-
        return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
                                              is_meta);
 }
index aaef484..92599ad 100644 (file)
@@ -107,8 +107,9 @@ int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_msglevel || !dev->ethtool_ops->set_msglevel)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -129,6 +130,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 5d16cb4..6e9e0b5 100644 (file)
@@ -126,9 +126,10 @@ int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_link_ksettings ||
            !dev->ethtool_ops->set_link_ksettings)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -162,6 +163,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 96f20be..18cc37b 100644 (file)
@@ -338,9 +338,10 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_link_ksettings ||
            !dev->ethtool_ops->set_link_ksettings)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -370,6 +371,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 180c194..fc9e0b8 100644 (file)
@@ -40,6 +40,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
        struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1];
        const struct nlattr *devname_attr;
        struct net_device *dev = NULL;
+       u32 flags = 0;
        int ret;
 
        if (!header) {
@@ -50,8 +51,17 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
                               ethnl_header_policy, extack);
        if (ret < 0)
                return ret;
-       devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME];
+       if (tb[ETHTOOL_A_HEADER_FLAGS]) {
+               flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]);
+               if (flags & ~ETHTOOL_FLAG_ALL) {
+                       NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_FLAGS],
+                                           "unrecognized request flags");
+                       nl_set_extack_cookie_u32(extack, ETHTOOL_FLAG_ALL);
+                       return -EOPNOTSUPP;
+               }
+       }
 
+       devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME];
        if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) {
                u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]);
 
@@ -90,9 +100,7 @@ int ethnl_parse_header(struct ethnl_req_info *req_info,
        }
 
        req_info->dev = dev;
-       if (tb[ETHTOOL_A_HEADER_FLAGS])
-               req_info->flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]);
-
+       req_info->flags = flags;
        return 0;
 }
 
index e1b8a65..55e1eca 100644 (file)
@@ -128,8 +128,9 @@ int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
        if (ret < 0)
                return ret;
        dev = req_info.dev;
+       ret = -EOPNOTSUPP;
        if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol)
-               return -EOPNOTSUPP;
+               goto out_dev;
 
        rtnl_lock();
        ret = ethnl_ops_begin(dev);
@@ -172,6 +173,7 @@ out_ops:
        ethnl_ops_complete(dev);
 out_rtnl:
        rtnl_unlock();
+out_dev:
        dev_put(dev);
        return ret;
 }
index 3ba7f61..a64bb64 100644 (file)
@@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
        struct hsr_port *port;
        unsigned long tdiff;
 
-       rcu_read_lock();
        node = find_node_by_addr_A(&hsr->node_db, addr);
-       if (!node) {
-               rcu_read_unlock();
-               return -ENOENT; /* No such entry */
-       }
+       if (!node)
+               return -ENOENT;
 
        ether_addr_copy(addr_b, node->macaddress_B);
 
@@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
                *addr_b_ifindex = -1;
        }
 
-       rcu_read_unlock();
-
        return 0;
 }
index 8dc0547..fae21c8 100644 (file)
@@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        if (!na)
                goto invalid;
 
-       hsr_dev = __dev_get_by_index(genl_info_net(info),
-                                    nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       rcu_read_lock();
+       hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+                                      nla_get_u32(info->attrs[HSR_A_IFINDEX]));
        if (!hsr_dev)
-               goto invalid;
+               goto rcu_unlock;
        if (!is_hsr_master(hsr_dev))
-               goto invalid;
+               goto rcu_unlock;
 
        /* Send reply */
-       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
        if (!skb_out) {
                res = -ENOMEM;
                goto fail;
@@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
        if (res < 0)
                goto nla_put_failure;
-       rcu_read_lock();
        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
        if (port)
                res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
                                  port->dev->ifindex);
-       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
@@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
        if (res < 0)
                goto nla_put_failure;
-       rcu_read_lock();
        port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
        if (port)
                res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
                                  port->dev->ifindex);
-       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
+       rcu_read_unlock();
+
        genlmsg_end(skb_out, msg_head);
        genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
 
        return 0;
 
+rcu_unlock:
+       rcu_read_unlock();
 invalid:
        netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
        return 0;
@@ -351,6 +352,7 @@ nla_put_failure:
        /* Fall through */
 
 fail:
+       rcu_read_unlock();
        return res;
 }
 
@@ -358,16 +360,14 @@ fail:
  */
 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 {
-       /* For receiving */
-       struct nlattr *na;
+       unsigned char addr[ETH_ALEN];
        struct net_device *hsr_dev;
-
-       /* For sending */
        struct sk_buff *skb_out;
-       void *msg_head;
        struct hsr_priv *hsr;
-       void *pos;
-       unsigned char addr[ETH_ALEN];
+       bool restart = false;
+       struct nlattr *na;
+       void *pos = NULL;
+       void *msg_head;
        int res;
 
        if (!info)
@@ -377,15 +377,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        if (!na)
                goto invalid;
 
-       hsr_dev = __dev_get_by_index(genl_info_net(info),
-                                    nla_get_u32(info->attrs[HSR_A_IFINDEX]));
+       rcu_read_lock();
+       hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
+                                      nla_get_u32(info->attrs[HSR_A_IFINDEX]));
        if (!hsr_dev)
-               goto invalid;
+               goto rcu_unlock;
        if (!is_hsr_master(hsr_dev))
-               goto invalid;
+               goto rcu_unlock;
 
+restart:
        /* Send reply */
-       skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!skb_out) {
                res = -ENOMEM;
                goto fail;
@@ -399,18 +401,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
                goto nla_put_failure;
        }
 
-       res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
-       if (res < 0)
-               goto nla_put_failure;
+       if (!restart) {
+               res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
+               if (res < 0)
+                       goto nla_put_failure;
+       }
 
        hsr = netdev_priv(hsr_dev);
 
-       rcu_read_lock();
-       pos = hsr_get_next_node(hsr, NULL, addr);
+       if (!pos)
+               pos = hsr_get_next_node(hsr, NULL, addr);
        while (pos) {
                res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
                if (res < 0) {
-                       rcu_read_unlock();
+                       if (res == -EMSGSIZE) {
+                               genlmsg_end(skb_out, msg_head);
+                               genlmsg_unicast(genl_info_net(info), skb_out,
+                                               info->snd_portid);
+                               restart = true;
+                               goto restart;
+                       }
                        goto nla_put_failure;
                }
                pos = hsr_get_next_node(hsr, pos, addr);
@@ -422,15 +432,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 
        return 0;
 
+rcu_unlock:
+       rcu_read_unlock();
 invalid:
        netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
        return 0;
 
 nla_put_failure:
-       kfree_skb(skb_out);
+       nlmsg_free(skb_out);
        /* Fall through */
 
 fail:
+       rcu_read_unlock();
        return res;
 }
 
@@ -457,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = {
        .version = 1,
        .maxattr = HSR_A_MAX,
        .policy = hsr_genl_policy,
+       .netnsok = true,
        .module = THIS_MODULE,
        .ops = hsr_ops,
        .n_ops = ARRAY_SIZE(hsr_ops),
index fbfd0db..a9104d4 100644 (file)
@@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
        if (!port)
                return -ENOMEM;
 
+       port->hsr = hsr;
+       port->dev = dev;
+       port->type = type;
+
        if (type != HSR_PT_MASTER) {
                res = hsr_portdev_setup(dev, port);
                if (res)
                        goto fail_dev_setup;
        }
 
-       port->hsr = hsr;
-       port->dev = dev;
-       port->type = type;
-
        list_add_tail_rcu(&port->port_list, &hsr->ports);
        synchronize_rcu();
 
index f96bd48..6490b84 100644 (file)
@@ -303,6 +303,7 @@ config SYN_COOKIES
 
 config NET_IPVTI
        tristate "Virtual (secure) IP: tunneling"
+       depends on IPV6 || IPV6=n
        select INET_TUNNEL
        select NET_IP_TUNNEL
        select XFRM
index 574972b..2bf3abe 100644 (file)
@@ -184,7 +184,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
 {
        const struct tcp_congestion_ops *utcp_ca;
        struct tcp_congestion_ops *tcp_ca;
-       size_t tcp_ca_name_len;
        int prog_fd;
        u32 moff;
 
@@ -199,13 +198,11 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
                tcp_ca->flags = utcp_ca->flags;
                return 1;
        case offsetof(struct tcp_congestion_ops, name):
-               tcp_ca_name_len = strnlen(utcp_ca->name, sizeof(utcp_ca->name));
-               if (!tcp_ca_name_len ||
-                   tcp_ca_name_len == sizeof(utcp_ca->name))
+               if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
+                                    sizeof(tcp_ca->name)) <= 0)
                        return -EINVAL;
                if (tcp_ca_find(utcp_ca->name))
                        return -EEXIST;
-               memcpy(tcp_ca->name, utcp_ca->name, sizeof(tcp_ca->name));
                return 1;
        }
 
index 577db1d..213be9c 100644 (file)
@@ -997,7 +997,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                        return -ENOENT;
                }
 
+               rcu_read_lock();
                err = fib_table_dump(tb, skb, cb, &filter);
+               rcu_read_unlock();
                return skb->len ? : err;
        }
 
index 8274f98..029b24e 100644 (file)
@@ -1153,6 +1153,24 @@ static int ipgre_netlink_parms(struct net_device *dev,
        if (data[IFLA_GRE_FWMARK])
                *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
 
+       return 0;
+}
+
+static int erspan_netlink_parms(struct net_device *dev,
+                               struct nlattr *data[],
+                               struct nlattr *tb[],
+                               struct ip_tunnel_parm *parms,
+                               __u32 *fwmark)
+{
+       struct ip_tunnel *t = netdev_priv(dev);
+       int err;
+
+       err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
+       if (err)
+               return err;
+       if (!data)
+               return 0;
+
        if (data[IFLA_GRE_ERSPAN_VER]) {
                t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
@@ -1276,45 +1294,70 @@ static void ipgre_tap_setup(struct net_device *dev)
        ip_tunnel_setup(dev, gre_tap_net_id);
 }
 
-static int ipgre_newlink(struct net *src_net, struct net_device *dev,
-                        struct nlattr *tb[], struct nlattr *data[],
-                        struct netlink_ext_ack *extack)
+static int
+ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
 {
-       struct ip_tunnel_parm p;
        struct ip_tunnel_encap ipencap;
-       __u32 fwmark = 0;
-       int err;
 
        if (ipgre_netlink_encap_parms(data, &ipencap)) {
                struct ip_tunnel *t = netdev_priv(dev);
-               err = ip_tunnel_encap_setup(t, &ipencap);
+               int err = ip_tunnel_encap_setup(t, &ipencap);
 
                if (err < 0)
                        return err;
        }
 
+       return 0;
+}
+
+static int ipgre_newlink(struct net *src_net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[],
+                        struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel_parm p;
+       __u32 fwmark = 0;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
        err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
        if (err < 0)
                return err;
        return ip_tunnel_newlink(dev, tb, &p, fwmark);
 }
 
+static int erspan_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[],
+                         struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel_parm p;
+       __u32 fwmark = 0;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
+       err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err)
+               return err;
+       return ip_tunnel_newlink(dev, tb, &p, fwmark);
+}
+
 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct nlattr *data[],
                            struct netlink_ext_ack *extack)
 {
        struct ip_tunnel *t = netdev_priv(dev);
-       struct ip_tunnel_encap ipencap;
        __u32 fwmark = t->fwmark;
        struct ip_tunnel_parm p;
        int err;
 
-       if (ipgre_netlink_encap_parms(data, &ipencap)) {
-               err = ip_tunnel_encap_setup(t, &ipencap);
-
-               if (err < 0)
-                       return err;
-       }
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
 
        err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
        if (err < 0)
@@ -1327,8 +1370,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
        t->parms.i_flags = p.i_flags;
        t->parms.o_flags = p.o_flags;
 
-       if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
-               ipgre_link_update(dev, !tb[IFLA_MTU]);
+       ipgre_link_update(dev, !tb[IFLA_MTU]);
+
+       return 0;
+}
+
+static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
+                            struct nlattr *data[],
+                            struct netlink_ext_ack *extack)
+{
+       struct ip_tunnel *t = netdev_priv(dev);
+       __u32 fwmark = t->fwmark;
+       struct ip_tunnel_parm p;
+       int err;
+
+       err = ipgre_newlink_encap_setup(dev, data);
+       if (err)
+               return err;
+
+       err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
+       if (err < 0)
+               return err;
+
+       err = ip_tunnel_changelink(dev, tb, &p, fwmark);
+       if (err < 0)
+               return err;
+
+       t->parms.i_flags = p.i_flags;
+       t->parms.o_flags = p.o_flags;
 
        return 0;
 }
@@ -1519,8 +1588,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
        .priv_size      = sizeof(struct ip_tunnel),
        .setup          = erspan_setup,
        .validate       = erspan_validate,
-       .newlink        = ipgre_newlink,
-       .changelink     = ipgre_changelink,
+       .newlink        = erspan_newlink,
+       .changelink     = erspan_changelink,
        .dellink        = ip_tunnel_dellink,
        .get_size       = ipgre_get_size,
        .fill_info      = ipgre_fill_info,
index 37cddd1..1b4e6f2 100644 (file)
@@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        int mtu;
 
        if (!dst) {
-               struct rtable *rt;
-
-               fl->u.ip4.flowi4_oif = dev->ifindex;
-               fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
-               rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
-               if (IS_ERR(rt)) {
+               switch (skb->protocol) {
+               case htons(ETH_P_IP): {
+                       struct rtable *rt;
+
+                       fl->u.ip4.flowi4_oif = dev->ifindex;
+                       fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+                       if (IS_ERR(rt)) {
+                               dev->stats.tx_carrier_errors++;
+                               goto tx_error_icmp;
+                       }
+                       dst = &rt->dst;
+                       skb_dst_set(skb, dst);
+                       break;
+               }
+#if IS_ENABLED(CONFIG_IPV6)
+               case htons(ETH_P_IPV6):
+                       fl->u.ip6.flowi6_oif = dev->ifindex;
+                       fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               dst = NULL;
+                               dev->stats.tx_carrier_errors++;
+                               goto tx_error_icmp;
+                       }
+                       skb_dst_set(skb, dst);
+                       break;
+#endif
+               default:
                        dev->stats.tx_carrier_errors++;
                        goto tx_error_icmp;
                }
-               dst = &rt->dst;
-               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index eb2d805..dc77c30 100644 (file)
@@ -2948,8 +2948,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        err = -EPERM;
                else if (tp->repair_queue == TCP_SEND_QUEUE)
                        WRITE_ONCE(tp->write_seq, val);
-               else if (tp->repair_queue == TCP_RECV_QUEUE)
+               else if (tp->repair_queue == TCP_RECV_QUEUE) {
                        WRITE_ONCE(tp->rcv_nxt, val);
+                       WRITE_ONCE(tp->copied_seq, val);
+               }
                else
                        err = -EINVAL;
                break;
index 306e25d..2f45cde 100644 (file)
@@ -1109,6 +1109,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
 
                if (unlikely(!skb))
                        return -ENOBUFS;
+               /* retransmit skbs might have a non zero value in skb->dev
+                * because skb->dev is aliased with skb->rbnode.rb_left
+                */
+               skb->dev = NULL;
        }
 
        inet = inet_sk(sk);
@@ -3037,8 +3041,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
 
                tcp_skb_tsorted_save(skb) {
                        nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
-                       err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-                                    -ENOBUFS;
+                       if (nskb) {
+                               nskb->dev = NULL;
+                               err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
+                       } else {
+                               err = -ENOBUFS;
+                       }
                } tcp_skb_tsorted_restore(skb);
 
                if (!err) {
index 524006a..cc6180e 100644 (file)
@@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb)
 
                if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
                        rcu_read_unlock();
-                       return 0;
+                       goto discard;
                }
 
                ipv6h = ipv6_hdr(skb);
@@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int mtu;
 
        if (!dst) {
-               fl->u.ip6.flowi6_oif = dev->ifindex;
-               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
-               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
-               if (dst->error) {
-                       dst_release(dst);
-                       dst = NULL;
+               switch (skb->protocol) {
+               case htons(ETH_P_IP): {
+                       struct rtable *rt;
+
+                       fl->u.ip4.flowi4_oif = dev->ifindex;
+                       fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+                       if (IS_ERR(rt))
+                               goto tx_err_link_failure;
+                       dst = &rt->dst;
+                       skb_dst_set(skb, dst);
+                       break;
+               }
+               case htons(ETH_P_IPV6):
+                       fl->u.ip6.flowi6_oif = dev->ifindex;
+                       fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               dst = NULL;
+                               goto tx_err_link_failure;
+                       }
+                       skb_dst_set(skb, dst);
+                       break;
+               default:
                        goto tx_err_link_failure;
                }
-               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index e11bdb0..25b7ebd 100644 (file)
@@ -78,7 +78,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
 
        hlist_for_each_entry_rcu(x6spi,
                             &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
-                            list_byaddr) {
+                            list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
                if (xfrm6_addr_equal(&x6spi->addr, saddr))
                        return x6spi;
        }
index c80b1e1..3419ed6 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
  */
 
 #include <linux/debugfs.h>
@@ -78,6 +78,7 @@ static const char * const sta_flag_names[] = {
        FLAG(MPSP_OWNER),
        FLAG(MPSP_RECIPIENT),
        FLAG(PS_DELIVER),
+       FLAG(USES_ENCRYPTION),
 #undef FLAG
 };
 
index 0f889b9..efc1acc 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright 2018-2019  Intel Corporation
+ * Copyright 2018-2020  Intel Corporation
  */
 
 #include <linux/if_ether.h>
@@ -262,22 +262,29 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
                          sta ? sta->sta.addr : bcast_addr, ret);
 }
 
-int ieee80211_set_tx_key(struct ieee80211_key *key)
+static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force)
 {
        struct sta_info *sta = key->sta;
        struct ieee80211_local *local = key->local;
 
        assert_key_lock(local);
 
+       set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
+
        sta->ptk_idx = key->conf.keyidx;
 
-       if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
+       if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
                clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
        ieee80211_check_fast_xmit(sta);
 
        return 0;
 }
 
+int ieee80211_set_tx_key(struct ieee80211_key *key)
+{
+       return _ieee80211_set_tx_key(key, false);
+}
+
 static void ieee80211_pairwise_rekey(struct ieee80211_key *old,
                                     struct ieee80211_key *new)
 {
@@ -441,11 +448,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
                if (pairwise) {
                        rcu_assign_pointer(sta->ptk[idx], new);
                        if (new &&
-                           !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) {
-                               sta->ptk_idx = idx;
-                               clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
-                               ieee80211_check_fast_xmit(sta);
-                       }
+                           !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
+                               _ieee80211_set_tx_key(new, true);
                } else {
                        rcu_assign_pointer(sta->gtk[idx], new);
                }
index 0f5f406..e3572be 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
  */
 
 #include <linux/module.h>
@@ -1049,6 +1049,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
        might_sleep();
        lockdep_assert_held(&local->sta_mtx);
 
+       while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
+               ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
+               WARN_ON_ONCE(ret);
+       }
+
        /* now keys can no longer be reached */
        ieee80211_free_sta_keys(local, sta);
 
index c00e285..552eed3 100644 (file)
@@ -98,6 +98,7 @@ enum ieee80211_sta_info_flags {
        WLAN_STA_MPSP_OWNER,
        WLAN_STA_MPSP_RECIPIENT,
        WLAN_STA_PS_DELIVER,
+       WLAN_STA_USES_ENCRYPTION,
 
        NUM_WLAN_STA_FLAGS,
 };
index 87def9c..d9cca6d 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018, 2020 Intel Corporation
  *
  * Transmit and frame generation functions.
  */
@@ -590,10 +590,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 
-       if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
                tx->key = NULL;
-       else if (tx->sta &&
-                (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
+               return TX_CONTINUE;
+       }
+
+       if (tx->sta &&
+           (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
                tx->key = key;
        else if (ieee80211_is_group_privacy_action(tx->skb) &&
                (key = rcu_dereference(tx->sdata->default_multicast_key)))
@@ -654,6 +657,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                if (!skip_hw && tx->key &&
                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
                        info->control.hw_key = &tx->key->conf;
+       } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+                  test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
+               return TX_DROP;
        }
 
        return TX_CONTINUE;
@@ -3598,8 +3604,25 @@ begin:
        tx.skb = skb;
        tx.sdata = vif_to_sdata(info->control.vif);
 
-       if (txq->sta)
+       if (txq->sta) {
                tx.sta = container_of(txq->sta, struct sta_info, sta);
+               /*
+                * Drop unicast frames to unauthorised stations unless they are
+                * EAPOL frames from the local station.
+                */
+               if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) &&
+                            tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
+                            !is_multicast_ether_addr(hdr->addr1) &&
+                            !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
+                            (!(info->control.flags &
+                               IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
+                             !ether_addr_equal(tx.sdata->vif.addr,
+                                               hdr->addr2)))) {
+                       I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
+                       ieee80211_free_txskb(&local->hw, skb);
+                       goto begin;
+               }
+       }
 
        /*
         * The key can be removed while the packet was queued, so need to call
@@ -5126,6 +5149,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
        struct ethhdr *ehdr;
+       u32 ctrl_flags = 0;
        u32 flags;
 
        /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
@@ -5135,6 +5159,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
            proto != cpu_to_be16(ETH_P_PREAUTH))
                return -EINVAL;
 
+       if (proto == sdata->control_port_protocol)
+               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+
        if (unencrypted)
                flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
        else
@@ -5160,7 +5187,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_mac_header(skb);
 
        local_bh_disable();
-       __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0);
+       __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags);
        local_bh_enable();
 
        return 0;
index 8af28e1..70ebeba 100644 (file)
@@ -554,6 +554,9 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
        nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
        nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
        nf_flow_table_offload_flush(flow_table);
+       if (nf_flowtable_hw_offload(flow_table))
+               nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
+                                     flow_table);
        rhashtable_destroy(&flow_table->rhashtable);
 }
 EXPORT_SYMBOL_GPL(nf_flow_table_free);
index 9e563fd..ba775ae 100644 (file)
@@ -146,11 +146,13 @@ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
 
        if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
            (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
-            nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
+            nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
                return -1;
+
+       iph = ip_hdr(skb);
        if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
            (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
-            nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
+            nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
                return -1;
 
        return 0;
@@ -189,6 +191,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
                return -1;
 
+       iph = ip_hdr(skb);
        ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v4.s_addr    = iph->saddr;
@@ -426,11 +429,13 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
 
        if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
            (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
-            nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+            nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
                return -1;
+
+       ip6h = ipv6_hdr(skb);
        if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
            (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
-            nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
+            nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
                return -1;
 
        return 0;
@@ -459,6 +464,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
        if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
                return -1;
 
+       ip6h = ipv6_hdr(skb);
        ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
 
        tuple->src_v6           = ip6h->saddr;
index 06f00cd..f2c22c6 100644 (file)
@@ -87,6 +87,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
        default:
                return -EOPNOTSUPP;
        }
+       mask->control.addr_type = 0xffff;
        match->dissector.used_keys |= BIT(key->control.addr_type);
        mask->basic.n_proto = 0xffff;
 
index 38c680f..d11f1a7 100644 (file)
@@ -5082,6 +5082,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                                err = -EBUSY;
                        else if (!(nlmsg_flags & NLM_F_EXCL))
                                err = 0;
+               } else if (err == -ENOTEMPTY) {
+                       /* ENOTEMPTY reports overlapping between this element
+                        * and an existing one.
+                        */
+                       err = -EEXIST;
                }
                goto err_element_clash;
        }
index aba11c2..3087e23 100644 (file)
@@ -28,6 +28,9 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
        struct nft_fwd_netdev *priv = nft_expr_priv(expr);
        int oif = regs->data[priv->sreg_dev];
 
+       /* This is used by ifb only. */
+       skb_set_redirected(pkt->skb, true);
+
        nf_fwd_netdev_egress(pkt, oif);
        regs->verdict.code = NF_STOLEN;
 }
@@ -190,6 +193,13 @@ nla_put_failure:
        return -1;
 }
 
+static int nft_fwd_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
+{
+       return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+}
+
 static struct nft_expr_type nft_fwd_netdev_type;
 static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
        .type           = &nft_fwd_netdev_type,
@@ -197,6 +207,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
        .eval           = nft_fwd_neigh_eval,
        .init           = nft_fwd_neigh_init,
        .dump           = nft_fwd_neigh_dump,
+       .validate       = nft_fwd_validate,
 };
 
 static const struct nft_expr_ops nft_fwd_netdev_ops = {
@@ -205,6 +216,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
        .eval           = nft_fwd_netdev_eval,
        .init           = nft_fwd_netdev_init,
        .dump           = nft_fwd_netdev_dump,
+       .validate       = nft_fwd_validate,
        .offload        = nft_fwd_netdev_offload,
 };
 
index 4fc0c92..ef7e8ad 100644 (file)
@@ -1098,21 +1098,41 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
        struct nft_pipapo_field *f;
        int i, bsize_max, err = 0;
 
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
+               end = (const u8 *)nft_set_ext_key_end(ext)->data;
+       else
+               end = start;
+
        dup = pipapo_get(net, set, start, genmask);
-       if (PTR_ERR(dup) == -ENOENT) {
-               if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) {
-                       end = (const u8 *)nft_set_ext_key_end(ext)->data;
-                       dup = pipapo_get(net, set, end, nft_genmask_next(net));
-               } else {
-                       end = start;
+       if (!IS_ERR(dup)) {
+               /* Check if we already have the same exact entry */
+               const struct nft_data *dup_key, *dup_end;
+
+               dup_key = nft_set_ext_key(&dup->ext);
+               if (nft_set_ext_exists(&dup->ext, NFT_SET_EXT_KEY_END))
+                       dup_end = nft_set_ext_key_end(&dup->ext);
+               else
+                       dup_end = dup_key;
+
+               if (!memcmp(start, dup_key->data, sizeof(*dup_key->data)) &&
+                   !memcmp(end, dup_end->data, sizeof(*dup_end->data))) {
+                       *ext2 = &dup->ext;
+                       return -EEXIST;
                }
+
+               return -ENOTEMPTY;
+       }
+
+       if (PTR_ERR(dup) == -ENOENT) {
+               /* Look for partially overlapping entries */
+               dup = pipapo_get(net, set, end, nft_genmask_next(net));
        }
 
        if (PTR_ERR(dup) != -ENOENT) {
                if (IS_ERR(dup))
                        return PTR_ERR(dup);
                *ext2 = &dup->ext;
-               return -EEXIST;
+               return -ENOTEMPTY;
        }
 
        /* Validate */
index 5000b93..8617fc1 100644 (file)
@@ -33,6 +33,11 @@ static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
               (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
 }
 
+static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
+{
+       return !nft_rbtree_interval_end(rbe);
+}
+
 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
                             const struct nft_rbtree_elem *interval)
 {
@@ -64,7 +69,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
                        if (interval &&
                            nft_rbtree_equal(set, this, interval) &&
                            nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(interval))
+                           nft_rbtree_interval_start(interval))
                                continue;
                        interval = rbe;
                } else if (d > 0)
@@ -89,7 +94,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
 
        if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
            nft_set_elem_active(&interval->ext, genmask) &&
-           !nft_rbtree_interval_end(interval)) {
+           nft_rbtree_interval_start(interval)) {
                *ext = &interval->ext;
                return true;
        }
@@ -208,8 +213,43 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
        u8 genmask = nft_genmask_next(net);
        struct nft_rbtree_elem *rbe;
        struct rb_node *parent, **p;
+       bool overlap = false;
        int d;
 
+       /* Detect overlaps as we descend the tree. Set the flag in these cases:
+        *
+        * a1. |__ _ _?  >|__ _ _  (insert start after existing start)
+        * a2. _ _ __>|  ?_ _ __|  (insert end before existing end)
+        * a3. _ _ ___|  ?_ _ _>|  (insert end after existing end)
+        * a4. >|__ _ _   _ _ __|  (insert start before existing end)
+        *
+        * and clear it later on, as we eventually reach the points indicated by
+        * '?' above, in the cases described below. We'll always meet these
+        * later, locally, due to tree ordering, and overlaps for the intervals
+        * that are the closest together are always evaluated last.
+        *
+        * b1. |__ _ _!  >|__ _ _  (insert start after existing end)
+        * b2. _ _ __>|  !_ _ __|  (insert end before existing start)
+        * b3. !_____>|            (insert end after existing start)
+        *
+        * Case a4. resolves to b1.:
+        * - if the inserted start element is the leftmost, because the '0'
+        *   element in the tree serves as end element
+        * - otherwise, if an existing end is found. Note that end elements are
+        *   always inserted after corresponding start elements.
+        *
+        * For a new, rightmost pair of elements, we'll hit cases b1. and b3.,
+        * in that order.
+        *
+        * The flag is also cleared in two special cases:
+        *
+        * b4. |__ _ _!|<_ _ _   (insert start right before existing end)
+        * b5. |__ _ >|!__ _ _   (insert end right after existing start)
+        *
+        * which always happen as last step and imply that no further
+        * overlapping is possible.
+        */
+
        parent = NULL;
        p = &priv->root.rb_node;
        while (*p != NULL) {
@@ -218,17 +258,42 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                d = memcmp(nft_set_ext_key(&rbe->ext),
                           nft_set_ext_key(&new->ext),
                           set->klen);
-               if (d < 0)
+               if (d < 0) {
                        p = &parent->rb_left;
-               else if (d > 0)
+
+                       if (nft_rbtree_interval_start(new)) {
+                               overlap = nft_rbtree_interval_start(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       } else {
+                               overlap = nft_rbtree_interval_end(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       }
+               } else if (d > 0) {
                        p = &parent->rb_right;
-               else {
+
+                       if (nft_rbtree_interval_end(new)) {
+                               overlap = nft_rbtree_interval_end(rbe) &&
+                                         nft_set_elem_active(&rbe->ext,
+                                                             genmask);
+                       } else if (nft_rbtree_interval_end(rbe) &&
+                                  nft_set_elem_active(&rbe->ext, genmask)) {
+                               overlap = true;
+                       }
+               } else {
                        if (nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(new)) {
+                           nft_rbtree_interval_start(new)) {
                                p = &parent->rb_left;
-                       } else if (!nft_rbtree_interval_end(rbe) &&
+
+                               if (nft_set_elem_active(&rbe->ext, genmask))
+                                       overlap = false;
+                       } else if (nft_rbtree_interval_start(rbe) &&
                                   nft_rbtree_interval_end(new)) {
                                p = &parent->rb_right;
+
+                               if (nft_set_elem_active(&rbe->ext, genmask))
+                                       overlap = false;
                        } else if (nft_set_elem_active(&rbe->ext, genmask)) {
                                *ext = &rbe->ext;
                                return -EEXIST;
@@ -237,6 +302,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                        }
                }
        }
+
+       if (overlap)
+               return -ENOTEMPTY;
+
        rb_link_node_rcu(&new->node, parent, p);
        rb_insert_color(&new->node, &priv->root);
        return 0;
@@ -317,10 +386,10 @@ static void *nft_rbtree_deactivate(const struct net *net,
                        parent = parent->rb_right;
                else {
                        if (nft_rbtree_interval_end(rbe) &&
-                           !nft_rbtree_interval_end(this)) {
+                           nft_rbtree_interval_start(this)) {
                                parent = parent->rb_left;
                                continue;
-                       } else if (!nft_rbtree_interval_end(rbe) &&
+                       } else if (nft_rbtree_interval_start(rbe) &&
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
index 5313f1c..2f23479 100644 (file)
@@ -2392,19 +2392,14 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
        if (nlk_has_extack && extack && extack->_msg)
                tlvlen += nla_total_size(strlen(extack->_msg) + 1);
 
-       if (err) {
-               if (!(nlk->flags & NETLINK_F_CAP_ACK))
-                       payload += nlmsg_len(nlh);
-               else
-                       flags |= NLM_F_CAPPED;
-               if (nlk_has_extack && extack && extack->bad_attr)
-                       tlvlen += nla_total_size(sizeof(u32));
-       } else {
+       if (err && !(nlk->flags & NETLINK_F_CAP_ACK))
+               payload += nlmsg_len(nlh);
+       else
                flags |= NLM_F_CAPPED;
-
-               if (nlk_has_extack && extack && extack->cookie_len)
-                       tlvlen += nla_total_size(extack->cookie_len);
-       }
+       if (err && nlk_has_extack && extack && extack->bad_attr)
+               tlvlen += nla_total_size(sizeof(u32));
+       if (nlk_has_extack && extack && extack->cookie_len)
+               tlvlen += nla_total_size(extack->cookie_len);
 
        if (tlvlen)
                flags |= NLM_F_ACK_TLVS;
@@ -2427,20 +2422,16 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                        WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
                                               extack->_msg));
                }
-               if (err) {
-                       if (extack->bad_attr &&
-                           !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
-                                    (u8 *)extack->bad_attr >= in_skb->data +
-                                                              in_skb->len))
-                               WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
-                                                   (u8 *)extack->bad_attr -
-                                                   (u8 *)nlh));
-               } else {
-                       if (extack->cookie_len)
-                               WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
-                                               extack->cookie_len,
-                                               extack->cookie));
-               }
+               if (err && extack->bad_attr &&
+                   !WARN_ON((u8 *)extack->bad_attr < in_skb->data ||
+                            (u8 *)extack->bad_attr >= in_skb->data +
+                                                      in_skb->len))
+                       WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
+                                           (u8 *)extack->bad_attr -
+                                           (u8 *)nlh));
+               if (extack->cookie_len)
+                       WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
+                                       extack->cookie_len, extack->cookie));
        }
 
        nlmsg_end(skb, rep);
index e5b0986..29bd405 100644 (file)
@@ -2173,6 +2173,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        struct timespec64 ts;
        __u32 ts_status;
        bool is_drop_n_account = false;
+       unsigned int slot_id = 0;
        bool do_vnet = false;
 
        /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
@@ -2275,6 +2276,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!h.raw)
                goto drop_n_account;
 
+       if (po->tp_version <= TPACKET_V2) {
+               slot_id = po->rx_ring.head;
+               if (test_bit(slot_id, po->rx_ring.rx_owner_map))
+                       goto drop_n_account;
+               __set_bit(slot_id, po->rx_ring.rx_owner_map);
+       }
+
        if (do_vnet &&
            virtio_net_hdr_from_skb(skb, h.raw + macoff -
                                    sizeof(struct virtio_net_hdr),
@@ -2380,7 +2388,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 #endif
 
        if (po->tp_version <= TPACKET_V2) {
+               spin_lock(&sk->sk_receive_queue.lock);
                __packet_set_status(po, h.raw, status);
+               __clear_bit(slot_id, po->rx_ring.rx_owner_map);
+               spin_unlock(&sk->sk_receive_queue.lock);
                sk->sk_data_ready(sk);
        } else {
                prb_clear_blk_fill_status(&po->rx_ring);
@@ -4277,6 +4288,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       unsigned long *rx_owner_map = NULL;
        int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
@@ -4362,6 +4374,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        }
                        break;
                default:
+                       if (!tx_ring) {
+                               rx_owner_map = bitmap_alloc(req->tp_frame_nr,
+                                       GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
+                               if (!rx_owner_map)
+                                       goto out_free_pg_vec;
+                       }
                        break;
                }
        }
@@ -4391,6 +4409,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                err = 0;
                spin_lock_bh(&rb_queue->lock);
                swap(rb->pg_vec, pg_vec);
+               if (po->tp_version <= TPACKET_V2)
+                       swap(rb->rx_owner_map, rx_owner_map);
                rb->frame_max = (req->tp_frame_nr - 1);
                rb->head = 0;
                rb->frame_size = req->tp_frame_size;
@@ -4422,6 +4442,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
 out_free_pg_vec:
+       bitmap_free(rx_owner_map);
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
index 82fb2b1..907f4cd 100644 (file)
@@ -70,7 +70,10 @@ struct packet_ring_buffer {
 
        unsigned int __percpu   *pending_refcnt;
 
-       struct tpacket_kbdq_core        prb_bdqc;
+       union {
+               unsigned long                   *rx_owner_map;
+               struct tpacket_kbdq_core        prb_bdqc;
+       };
 };
 
 extern struct mutex fanout_mutex;
index fe42f98..15ee92d 100644 (file)
@@ -285,7 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                                           gfp_t gfp,
                                           rxrpc_notify_rx_t notify_rx,
                                           bool upgrade,
-                                          bool intr,
+                                          enum rxrpc_interruptibility interruptibility,
                                           unsigned int debug_id)
 {
        struct rxrpc_conn_parameters cp;
@@ -310,7 +310,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        memset(&p, 0, sizeof(p));
        p.user_call_ID = user_call_ID;
        p.tx_total_len = tx_total_len;
-       p.intr = intr;
+       p.interruptibility = interruptibility;
 
        memset(&cp, 0, sizeof(cp));
        cp.local                = rx->local;
@@ -371,44 +371,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * rxrpc_kernel_check_life - Check to see whether a call is still alive
  * @sock: The socket the call is on
  * @call: The call to check
- * @_life: Where to store the life value
  *
- * Allow a kernel service to find out whether a call is still alive - ie. we're
- * getting ACKs from the server.  Passes back in *_life a number representing
- * the life state which can be compared to that returned by a previous call and
- * return true if the call is still alive.
- *
- * If the life state stalls, rxrpc_kernel_probe_life() should be called and
- * then 2RTT waited.
+ * Allow a kernel service to find out whether a call is still alive -
+ * ie. whether it has completed.
  */
 bool rxrpc_kernel_check_life(const struct socket *sock,
-                            const struct rxrpc_call *call,
-                            u32 *_life)
+                            const struct rxrpc_call *call)
 {
-       *_life = call->acks_latest;
        return call->state != RXRPC_CALL_COMPLETE;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
-/**
- * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
- * @sock: The socket the call is on
- * @call: The call to check
- *
- * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
- * find out whether a call is still alive by pinging it.  This should cause the
- * life state to be bumped in about 2*RTT.
- *
- * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
- */
-void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
-{
-       rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
-                         rxrpc_propose_ack_ping_for_check_life);
-       rxrpc_send_ack_packet(call, true, NULL);
-}
-EXPORT_SYMBOL(rxrpc_kernel_probe_life);
-
 /**
  * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  * @sock: The socket the call is on
index 7d730c4..3eb1ab4 100644 (file)
@@ -489,7 +489,6 @@ enum rxrpc_call_flag {
        RXRPC_CALL_BEGAN_RX_TIMER,      /* We began the expect_rx_by timer */
        RXRPC_CALL_RX_HEARD,            /* The peer responded at least once to this call */
        RXRPC_CALL_RX_UNDERRUN,         /* Got data underrun */
-       RXRPC_CALL_IS_INTR,             /* The call is interruptible */
        RXRPC_CALL_DISCONNECTED,        /* The call has been disconnected */
 };
 
@@ -598,6 +597,7 @@ struct rxrpc_call {
        atomic_t                usage;
        u16                     service_id;     /* service ID */
        u8                      security_ix;    /* Security type */
+       enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */
        u32                     call_id;        /* call ID on connection  */
        u32                     cid;            /* connection ID plus channel index */
        int                     debug_id;       /* debug ID for printks */
@@ -675,7 +675,6 @@ struct rxrpc_call {
 
        /* transmission-phase ACK management */
        ktime_t                 acks_latest_ts; /* Timestamp of latest ACK received */
-       rxrpc_serial_t          acks_latest;    /* serial number of latest ACK received */
        rxrpc_seq_t             acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
        rxrpc_seq_t             acks_lost_top;  /* tx_top at the time lost-ack ping sent */
        rxrpc_serial_t          acks_lost_ping; /* Serial number of probe ACK */
@@ -721,7 +720,7 @@ struct rxrpc_call_params {
                u32             normal;         /* Max time since last call packet (msec) */
        } timeouts;
        u8                      nr_timeouts;    /* Number of timeouts specified */
-       bool                    intr;           /* The call is interruptible */
+       enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
 };
 
 struct rxrpc_send_params {
index c9f34b0..f079702 100644 (file)
@@ -237,8 +237,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
                return call;
        }
 
-       if (p->intr)
-               __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
+       call->interruptibility = p->interruptibility;
        call->tx_total_len = p->tx_total_len;
        trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
                         atomic_read(&call->usage),
index ea7d4c2..f2a1a5d 100644 (file)
@@ -655,13 +655,20 @@ static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
 
                add_wait_queue_exclusive(&call->waitq, &myself);
                for (;;) {
-                       if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
+                       switch (call->interruptibility) {
+                       case RXRPC_INTERRUPTIBLE:
+                       case RXRPC_PREINTERRUPTIBLE:
                                set_current_state(TASK_INTERRUPTIBLE);
-                       else
+                               break;
+                       case RXRPC_UNINTERRUPTIBLE:
+                       default:
                                set_current_state(TASK_UNINTERRUPTIBLE);
+                               break;
+                       }
                        if (call->call_id)
                                break;
-                       if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
+                       if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+                            call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
                            signal_pending(current)) {
                                ret = -ERESTARTSYS;
                                break;
index ef10fbf..69e09d6 100644 (file)
@@ -882,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
            before(prev_pkt, call->ackr_prev_seq))
                goto out;
        call->acks_latest_ts = skb->tstamp;
-       call->acks_latest = sp->hdr.serial;
 
        call->ackr_first_seq = first_soft_ack;
        call->ackr_prev_seq = prev_pkt;
index 813fd68..0fcf157 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
+/*
+ * Return true if there's sufficient Tx queue space.
+ */
+static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
+{
+       unsigned int win_size =
+               min_t(unsigned int, call->tx_winsize,
+                     call->cong_cwnd + call->cong_extra);
+       rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack);
+
+       if (_tx_win)
+               *_tx_win = tx_win;
+       return call->tx_top - tx_win < win_size;
+}
+
 /*
  * Wait for space to appear in the Tx queue or a signal to occur.
  */
@@ -26,9 +41,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
 {
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               if (call->tx_top - call->tx_hard_ack <
-                   min_t(unsigned int, call->tx_winsize,
-                         call->cong_cwnd + call->cong_extra))
+               if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
                if (call->state >= RXRPC_CALL_COMPLETE)
@@ -49,7 +62,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
  * Wait for space to appear in the Tx queue uninterruptibly, but with
  * a timeout of 2*RTT if no progress was made and a signal occurred.
  */
-static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
+static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                                            struct rxrpc_call *call)
 {
        rxrpc_seq_t tx_start, tx_win;
@@ -58,8 +71,8 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
 
        rtt = READ_ONCE(call->peer->rtt);
        rtt2 = nsecs_to_jiffies64(rtt) * 2;
-       if (rtt2 < 1)
-               rtt2 = 1;
+       if (rtt2 < 2)
+               rtt2 = 2;
 
        timeout = rtt2;
        tx_start = READ_ONCE(call->tx_hard_ack);
@@ -68,16 +81,13 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
                set_current_state(TASK_UNINTERRUPTIBLE);
 
                tx_win = READ_ONCE(call->tx_hard_ack);
-               if (call->tx_top - tx_win <
-                   min_t(unsigned int, call->tx_winsize,
-                         call->cong_cwnd + call->cong_extra))
+               if (rxrpc_check_tx_space(call, &tx_win))
                        return 0;
 
                if (call->state >= RXRPC_CALL_COMPLETE)
                        return call->error;
 
-               if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
-                   timeout == 0 &&
+               if (timeout == 0 &&
                    tx_win == tx_start && signal_pending(current))
                        return -EINTR;
 
@@ -91,6 +101,26 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
        }
 }
 
+/*
+ * Wait for space to appear in the Tx queue uninterruptibly.
+ */
+static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
+                                           struct rxrpc_call *call,
+                                           long *timeo)
+{
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (rxrpc_check_tx_space(call, NULL))
+                       return 0;
+
+               if (call->state >= RXRPC_CALL_COMPLETE)
+                       return call->error;
+
+               trace_rxrpc_transmit(call, rxrpc_transmit_wait);
+               *timeo = schedule_timeout(*timeo);
+       }
+}
+
 /*
  * wait for space to appear in the transmit/ACK window
  * - caller holds the socket locked
@@ -108,10 +138,19 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
 
        add_wait_queue(&call->waitq, &myself);
 
-       if (waitall)
-               ret = rxrpc_wait_for_tx_window_nonintr(rx, call);
-       else
-               ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
+       switch (call->interruptibility) {
+       case RXRPC_INTERRUPTIBLE:
+               if (waitall)
+                       ret = rxrpc_wait_for_tx_window_waitall(rx, call);
+               else
+                       ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
+               break;
+       case RXRPC_PREINTERRUPTIBLE:
+       case RXRPC_UNINTERRUPTIBLE:
+       default:
+               ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
+               break;
+       }
 
        remove_wait_queue(&call->waitq, &myself);
        set_current_state(TASK_RUNNING);
@@ -302,9 +341,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
                        _debug("alloc");
 
-                       if (call->tx_top - call->tx_hard_ack >=
-                           min_t(unsigned int, call->tx_winsize,
-                                 call->cong_cwnd + call->cong_extra)) {
+                       if (!rxrpc_check_tx_space(call, NULL)) {
                                ret = -EAGAIN;
                                if (msg->msg_flags & MSG_DONTWAIT)
                                        goto maybe_error;
@@ -619,7 +656,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                .call.tx_total_len      = -1,
                .call.user_call_ID      = 0,
                .call.nr_timeouts       = 0,
-               .call.intr              = true,
+               .call.interruptibility  = RXRPC_INTERRUPTIBLE,
                .abort_code             = 0,
                .command                = RXRPC_CMD_SEND_DATA,
                .exclusive              = false,
index f685c0d..41114b4 100644 (file)
@@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
        if (params)
-               kfree_rcu(params, rcu);
+               call_rcu(&params->rcu, tcf_ct_params_free);
        if (res == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
index 1ad300e..83dd82f 100644 (file)
@@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
 
        /* mirror is always swallowed */
        if (is_redirect) {
-               skb2->tc_redirected = 1;
-               skb2->tc_from_ingress = skb2->tc_at_ingress;
-               if (skb2->tc_from_ingress)
-                       skb2->tstamp = 0;
+               skb_set_redirected(skb2, skb2->tc_at_ingress);
+
                /* let's the caller reinsert the packet, if possible */
                if (use_reinsert) {
                        res->ingress = want_ingress;
index 6f8786b..5efa3e7 100644 (file)
@@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
                        fp = &b->ht[h];
                        for (pfp = rtnl_dereference(*fp); pfp;
                             fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
-                               if (pfp == f) {
-                                       *fp = f->next;
+                               if (pfp == fold) {
+                                       rcu_assign_pointer(*fp, fold->next);
                                        break;
                                }
                        }
index 09b7dc5..9904299 100644 (file)
@@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
                                              struct tcindex_data,
                                              rwork);
 
+       rtnl_lock();
        kfree(p->perfect);
        kfree(p);
+       rtnl_unlock();
 }
 
 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
@@ -357,6 +359,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
 
                if (tcindex_alloc_perfect_hash(net, cp) < 0)
                        goto errout;
+               cp->alloc_hash = cp->hash;
                for (i = 0; i < min(cp->hash, p->hash); i++)
                        cp->perfect[i].res = p->perfect[i].res;
                balloc = 1;
index b2905b0..2eaac2f 100644 (file)
@@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        s64 credits;
        int len;
 
+       /* The previous packet is still being sent */
+       if (now < q->last) {
+               qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
+               return NULL;
+       }
        if (q->credits < 0) {
                credits = timediff_to_credits(now - q->last, q->idleslope);
 
@@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
        credits += q->credits;
 
        q->credits = max_t(s64, credits, q->locredit);
-       q->last = now;
+       /* Estimate of the transmission of the last byte of the packet in ns */
+       if (unlikely(atomic64_read(&q->port_rate) == 0))
+               q->last = now;
+       else
+               q->last = now + div64_s64(len * NSEC_PER_SEC,
+                                         atomic64_read(&q->port_rate));
 
        return skb;
 }
index b79a05d..2eecf15 100644 (file)
@@ -1707,7 +1707,8 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
 
 int __sys_accept4_file(struct file *file, unsigned file_flags,
                       struct sockaddr __user *upeer_sockaddr,
-                      int __user *upeer_addrlen, int flags)
+                      int __user *upeer_addrlen, int flags,
+                      unsigned long nofile)
 {
        struct socket *sock, *newsock;
        struct file *newfile;
@@ -1738,7 +1739,7 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(flags);
+       newfd = __get_unused_fd_flags(flags, nofile);
        if (unlikely(newfd < 0)) {
                err = newfd;
                sock_release(newsock);
@@ -1807,7 +1808,8 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
        f = fdget(fd);
        if (f.file) {
                ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
-                                               upeer_addrlen, flags);
+                                               upeer_addrlen, flags,
+                                               rlimit(RLIMIT_NOFILE));
                if (f.flags)
                        fput(f.file);
        }
index ec5d677..f0af23c 100644 (file)
@@ -16416,7 +16416,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
                goto nla_put_failure;
 
        if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) &&
-           nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
+           nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
                goto nla_put_failure;
 
        if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
index aef240f..328402a 100644 (file)
@@ -2022,7 +2022,11 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev,
 
        spin_lock_bh(&rdev->bss_lock);
 
-       if (WARN_ON(cbss->pub.channel == chan))
+       /*
+        * Some APs use CSA also for bandwidth changes, i.e., without actually
+        * changing the control channel, so no need to update in such a case.
+        */
+       if (cbss->pub.channel == chan)
                goto done;
 
        /* use transmitting bss */
index 50f567a..e2db468 100644 (file)
@@ -78,8 +78,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        int err;
        unsigned long flags;
        struct xfrm_state *x;
-       struct sk_buff *skb2, *nskb;
        struct softnet_data *sd;
+       struct sk_buff *skb2, *nskb, *pskb = NULL;
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
        struct sec_path *sp;
@@ -168,14 +168,14 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                } else {
                        if (skb == skb2)
                                skb = nskb;
-
-                       if (!skb)
-                               return NULL;
+                       else
+                               pskb->next = nskb;
 
                        continue;
                }
 
                skb_push(skb2, skb2->data - skb_mac_header(skb2));
+               pskb = skb2;
        }
 
        return skb;
@@ -383,6 +383,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
                return xfrm_dev_feat_change(dev);
 
        case NETDEV_DOWN:
+       case NETDEV_UNREGISTER:
                return xfrm_dev_down(dev);
        }
        return NOTIFY_DONE;
index dbda08e..8a4af86 100644 (file)
@@ -434,7 +434,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
 
 static void xfrm_policy_kill(struct xfrm_policy *policy)
 {
+       write_lock_bh(&policy->lock);
        policy->walk.dead = 1;
+       write_unlock_bh(&policy->lock);
 
        atomic_inc(&policy->genid);
 
index b88ba45..e6cfaa6 100644 (file)
@@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
                return 0;
 
        uctx = nla_data(rt);
-       if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
+       if (uctx->len > nla_len(rt) ||
+           uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
                return -EINVAL;
 
        return 0;
@@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
        xfrm_mark_get(attrs, &mark);
 
        err = verify_newpolicy_info(&ua->policy);
+       if (err)
+               goto free_state;
+       err = verify_sec_ctx_len(attrs);
        if (err)
                goto free_state;
 
index 5c6c3fd..b3b7270 100644 (file)
@@ -23,7 +23,6 @@ LINECOMMENT   "//".*\n
 #include "srcpos.h"
 #include "dtc-parser.tab.h"
 
-YYLTYPE yylloc;
 extern bool treesource_error;
 
 /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
index 255cef1..2ca4eb3 100755 (executable)
@@ -8,13 +8,14 @@ my $input_file = "MAINTAINERS";
 my $output_file = "MAINTAINERS.new";
 my $output_section = "SECTION.new";
 my $help = 0;
-
+my $order = 0;
 my $P = $0;
 
 if (!GetOptions(
                'input=s' => \$input_file,
                'output=s' => \$output_file,
                'section=s' => \$output_section,
+               'order!' => \$order,
                'h|help|usage' => \$help,
            )) {
     die "$P: invalid argument - use --help if necessary\n";
@@ -32,6 +33,22 @@ usage: $P [options] <pattern matching regexes>
   --input => MAINTAINERS file to read (default: MAINTAINERS)
   --output => sorted MAINTAINERS file to write (default: MAINTAINERS.new)
   --section => new sorted MAINTAINERS file to write to (default: SECTION.new)
+  --order => Use the preferred section content output ordering (default: 0)
+    Preferred ordering of section output is:
+      M:  Person acting as a maintainer
+      R:  Person acting as a patch reviewer
+      L:  Mailing list where patches should be sent
+      S:  Maintenance status
+      W:  URI for general information
+      Q:  URI for patchwork tracking
+      B:  URI for bug tracking/submission
+      C:  URI for chat
+      P:  URI or file for subsystem specific coding styles
+      T:  SCM tree type and location
+      F:  File and directory pattern
+      X:  File and directory exclusion pattern
+      N:  File glob
+      K:  Keyword - patch content regex
 
 If <pattern match regexes> exist, then the sections that match the
 regexes are not written to the output file but are written to the
@@ -56,7 +73,7 @@ sub by_category($$) {
 
 sub by_pattern($$) {
     my ($a, $b) = @_;
-    my $preferred_order = 'MRPLSWTQBCFXNK';
+    my $preferred_order = 'MRLSWQBCPTFXNK';
 
     my $a1 = uc(substr($a, 0, 1));
     my $b1 = uc(substr($b, 0, 1));
@@ -105,8 +122,14 @@ sub alpha_output {
                print $file $separator;
            }
            print $file $key . "\n";
-           foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
-               print $file ($pattern . "\n");
+           if ($order) {
+               foreach my $pattern (sort by_pattern split('\n', %$hashref{$key})) {
+                   print $file ($pattern . "\n");
+               }
+           } else {
+               foreach my $pattern (split('\n', %$hashref{$key})) {
+                   print $file ($pattern . "\n");
+               }
            }
        }
     }
index 1521073..8533bf0 100644 (file)
@@ -74,6 +74,8 @@ enum {
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
   IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
 #define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_ETHERNET = 143,      /* Ethernet-within-IPv6 Encapsulation   */
+#define IPPROTO_ETHERNET       IPPROTO_ETHERNET
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MPTCP = 262,         /* Multipath TCP connection             */
index 7902a56..b8fc7d9 100644 (file)
@@ -35,7 +35,7 @@ endif
 # Only pass canonical directory names as the output directory:
 #
 ifneq ($(O),)
-  FULL_O := $(shell readlink -f $(O) || echo $(O))
+  FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O))
 endif
 
 #
index 9542851..b342f74 100644 (file)
@@ -89,7 +89,7 @@ static inline bool replace_android_lib(const char *filename, char *newfilename)
                return true;
        }
 
-       if (!strncmp(filename, "/system/lib/", 11)) {
+       if (!strncmp(filename, "/system/lib/", 12)) {
                char *ndk, *app;
                const char *arch;
                size_t ndk_length;
index a149958..a7dc0b0 100644 (file)
@@ -1213,7 +1213,7 @@ static int config_attr(struct perf_event_attr *attr,
 static int get_config_terms(struct list_head *head_config,
                            struct list_head *head_terms __maybe_unused)
 {
-#define ADD_CONFIG_TERM(__type)                                        \
+#define ADD_CONFIG_TERM(__type, __weak)                                \
        struct perf_evsel_config_term *__t;                     \
                                                                \
        __t = zalloc(sizeof(*__t));                             \
@@ -1222,18 +1222,18 @@ static int get_config_terms(struct list_head *head_config,
                                                                \
        INIT_LIST_HEAD(&__t->list);                             \
        __t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;   \
-       __t->weak       = term->weak;                           \
+       __t->weak       = __weak;                               \
        list_add_tail(&__t->list, head_terms)
 
-#define ADD_CONFIG_TERM_VAL(__type, __name, __val)             \
+#define ADD_CONFIG_TERM_VAL(__type, __name, __val, __weak)     \
 do {                                                           \
-       ADD_CONFIG_TERM(__type);                                \
+       ADD_CONFIG_TERM(__type, __weak);                        \
        __t->val.__name = __val;                                \
 } while (0)
 
-#define ADD_CONFIG_TERM_STR(__type, __val)                     \
+#define ADD_CONFIG_TERM_STR(__type, __val, __weak)             \
 do {                                                           \
-       ADD_CONFIG_TERM(__type);                                \
+       ADD_CONFIG_TERM(__type, __weak);                        \
        __t->val.str = strdup(__val);                           \
        if (!__t->val.str) {                                    \
                zfree(&__t);                                    \
@@ -1247,62 +1247,62 @@ do {                                                            \
        list_for_each_entry(term, head_config, list) {
                switch (term->type_term) {
                case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
-                       ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num);
+                       ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
-                       ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num);
+                       ADD_CONFIG_TERM_VAL(FREQ, freq, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_TIME:
-                       ADD_CONFIG_TERM_VAL(TIME, time, term->val.num);
+                       ADD_CONFIG_TERM_VAL(TIME, time, term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
-                       ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str);
+                       ADD_CONFIG_TERM_STR(CALLGRAPH, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
-                       ADD_CONFIG_TERM_STR(BRANCH, term->val.str);
+                       ADD_CONFIG_TERM_STR(BRANCH, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
                        ADD_CONFIG_TERM_VAL(STACK_USER, stack_user,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_INHERIT:
                        ADD_CONFIG_TERM_VAL(INHERIT, inherit,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
                        ADD_CONFIG_TERM_VAL(INHERIT, inherit,
-                                           term->val.num ? 0 : 1);
+                                           term->val.num ? 0 : 1, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
                        ADD_CONFIG_TERM_VAL(MAX_STACK, max_stack,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
                        ADD_CONFIG_TERM_VAL(MAX_EVENTS, max_events,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
                        ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
                        ADD_CONFIG_TERM_VAL(OVERWRITE, overwrite,
-                                           term->val.num ? 0 : 1);
+                                           term->val.num ? 0 : 1, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
-                       ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str);
+                       ADD_CONFIG_TERM_STR(DRV_CFG, term->val.str, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_PERCORE:
                        ADD_CONFIG_TERM_VAL(PERCORE, percore,
-                                           term->val.num ? true : false);
+                                           term->val.num ? true : false, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
                        ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
-                                           term->val.num ? 1 : 0);
+                                           term->val.num ? 1 : 0, term->weak);
                        break;
                case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
                        ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
-                                           term->val.num);
+                                           term->val.num, term->weak);
                        break;
                default:
                        break;
@@ -1339,7 +1339,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
        }
 
        if (bits)
-               ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits);
+               ADD_CONFIG_TERM_VAL(CFG_CHG, cfg_chg, bits, false);
 
 #undef ADD_CONFIG_TERM
        return 0;
index 0f5fda1..8c85294 100644 (file)
@@ -206,6 +206,9 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
                } else
                        ret = strlist__add(sl, tev.event);
                clear_probe_trace_event(&tev);
+               /* Skip if there is same name multi-probe event in the list */
+               if (ret == -EEXIST)
+                       ret = 0;
                if (ret < 0)
                        break;
        }
index 1c817ad..e4cff49 100644 (file)
@@ -637,14 +637,19 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
                return -EINVAL;
        }
 
-       /* Try to get actual symbol name from symtab */
-       symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+       if (dwarf_entrypc(sp_die, &eaddr) == 0) {
+               /* If the DIE has entrypc, use it. */
+               symbol = dwarf_diename(sp_die);
+       } else {
+               /* Try to get actual symbol name and address from symtab */
+               symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
+               eaddr = sym.st_value;
+       }
        if (!symbol) {
                pr_warning("Failed to find symbol at 0x%lx\n",
                           (unsigned long)paddr);
                return -ENOENT;
        }
-       eaddr = sym.st_value;
 
        tp->offset = (unsigned long)(paddr - eaddr);
        tp->address = (unsigned long)paddr;
index aa344a1..8a065a6 100644 (file)
@@ -2,11 +2,13 @@ from os import getenv
 from subprocess import Popen, PIPE
 from re import sub
 
+cc = getenv("CC")
+cc_is_clang = b"clang version" in Popen([cc, "-v"], stderr=PIPE).stderr.readline()
+
 def clang_has_option(option):
-    return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
+    return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
 
-cc = getenv("CC")
-if cc == "clang":
+if cc_is_clang:
     from distutils.sysconfig import get_config_vars
     vars = get_config_vars()
     for var in ('CFLAGS', 'OPT'):
@@ -40,7 +42,7 @@ class install_lib(_install_lib):
 cflags = getenv('CFLAGS', '').split()
 # switch off several checks (need to be at the end of cflags list)
 cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
-if cc != "clang":
+if not cc_is_clang:
     cflags += ['-Wno-cast-function-type' ]
 
 src_perf  = getenv('srctree') + '/tools/perf'
index 13f1e8b..2b65512 100644 (file)
@@ -16,7 +16,7 @@ override CFLAGS +=    -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
-       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS)
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap
 
 .PHONY : clean
 clean :
index 31c1ca0..33b3708 100644 (file)
@@ -30,7 +30,7 @@
 #include <sched.h>
 #include <time.h>
 #include <cpuid.h>
-#include <linux/capability.h>
+#include <sys/capability.h>
 #include <errno.h>
 #include <math.h>
 
@@ -304,6 +304,10 @@ int *irqs_per_cpu;         /* indexed by cpu_num */
 
 void setup_all_buffers(void);
 
+char *sys_lpi_file;
+char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us";
+char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec";
+
 int cpu_is_not_present(int cpu)
 {
        return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
@@ -2916,8 +2920,6 @@ int snapshot_gfx_mhz(void)
  *
  * record snapshot of
  * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
- *
- * return 1 if config change requires a restart, else return 0
  */
 int snapshot_cpu_lpi_us(void)
 {
@@ -2941,17 +2943,14 @@ int snapshot_cpu_lpi_us(void)
 /*
  * snapshot_sys_lpi()
  *
- * record snapshot of
- * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
- *
- * return 1 if config change requires a restart, else return 0
+ * record snapshot of sys_lpi_file
  */
 int snapshot_sys_lpi_us(void)
 {
        FILE *fp;
        int retval;
 
-       fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
+       fp = fopen_or_die(sys_lpi_file, "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
        if (retval != 1) {
@@ -3151,28 +3150,42 @@ void check_dev_msr()
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
 
-void check_permissions()
+/*
+ * check for CAP_SYS_RAWIO
+ * return 0 on success
+ * return 1 on fail
+ */
+int check_for_cap_sys_rawio(void)
 {
-       struct __user_cap_header_struct cap_header_data;
-       cap_user_header_t cap_header = &cap_header_data;
-       struct __user_cap_data_struct cap_data_data;
-       cap_user_data_t cap_data = &cap_data_data;
-       extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
-       int do_exit = 0;
-       char pathname[32];
+       cap_t caps;
+       cap_flag_value_t cap_flag_value;
 
-       /* check for CAP_SYS_RAWIO */
-       cap_header->pid = getpid();
-       cap_header->version = _LINUX_CAPABILITY_VERSION;
-       if (capget(cap_header, cap_data) < 0)
-               err(-6, "capget(2) failed");
+       caps = cap_get_proc();
+       if (caps == NULL)
+               err(-6, "cap_get_proc\n");
 
-       if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
-               do_exit++;
+       if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value))
+               err(-6, "cap_get\n");
+
+       if (cap_flag_value != CAP_SET) {
                warnx("capget(CAP_SYS_RAWIO) failed,"
                        " try \"# setcap cap_sys_rawio=ep %s\"", progname);
+               return 1;
        }
 
+       if (cap_free(caps) == -1)
+               err(-6, "cap_free\n");
+
+       return 0;
+}
+void check_permissions(void)
+{
+       int do_exit = 0;
+       char pathname[32];
+
+       /* check for CAP_SYS_RAWIO */
+       do_exit += check_for_cap_sys_rawio();
+
        /* test file permissions */
        sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
        if (euidaccess(pathname, R_OK)) {
@@ -3265,6 +3278,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                pkg_cstate_limits = glm_pkg_cstate_limits;
                break;
        default:
@@ -3336,6 +3350,17 @@ int is_skx(unsigned int family, unsigned int model)
        }
        return 0;
 }
+int is_ehl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case INTEL_FAM6_ATOM_TREMONT:
+               return 1;
+       }
+       return 0;
+}
 
 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
 {
@@ -3478,6 +3503,23 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
        dump_nhm_cst_cfg();
 }
 
+static void dump_sysfs_file(char *path)
+{
+       FILE *input;
+       char cpuidle_buf[64];
+
+       input = fopen(path, "r");
+       if (input == NULL) {
+               if (debug)
+                       fprintf(outf, "NSFOD %s\n", path);
+               return;
+       }
+       if (!fgets(cpuidle_buf, sizeof(cpuidle_buf), input))
+               err(1, "%s: failed to read file", path);
+       fclose(input);
+
+       fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf);
+}
 static void
 dump_sysfs_cstate_config(void)
 {
@@ -3491,6 +3533,15 @@ dump_sysfs_cstate_config(void)
        if (!DO_BIC(BIC_sysfs))
                return;
 
+       if (access("/sys/devices/system/cpu/cpuidle", R_OK)) {
+               fprintf(outf, "cpuidle not loaded\n");
+               return;
+       }
+
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_driver");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor_ro");
+
        for (state = 0; state < 10; ++state) {
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
@@ -3894,6 +3945,20 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
                else
                        BIC_PRESENT(BIC_PkgWatt);
                break;
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+                       BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+                       BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
+               }
+               break;
        case INTEL_FAM6_SKYLAKE_L:      /* SKL */
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
@@ -4295,6 +4360,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:          /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:           /* EHL */
                return 1;
        }
        return 0;
@@ -4324,6 +4390,7 @@ int has_c8910_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                return 1;
        }
        return 0;
@@ -4610,14 +4677,24 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_SKYLAKE:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
                return INTEL_FAM6_SKYLAKE_L;
 
        case INTEL_FAM6_ICELAKE_L:
        case INTEL_FAM6_ICELAKE_NNPI:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                return INTEL_FAM6_CANNONLAKE_L;
 
        case INTEL_FAM6_ATOM_TREMONT_D:
                return INTEL_FAM6_ATOM_GOLDMONT_D;
+
+       case INTEL_FAM6_ATOM_TREMONT_L:
+               return INTEL_FAM6_ATOM_TREMONT;
+
+       case INTEL_FAM6_ICELAKE_X:
+               return INTEL_FAM6_SKYLAKE_X;
        }
        return model;
 }
@@ -4872,7 +4949,8 @@ void process_cpuid()
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
 
-       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) ||
+           is_ehl(family, model))
                BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
@@ -4907,10 +4985,16 @@ void process_cpuid()
        else
                BIC_NOT_PRESENT(BIC_CPU_LPI);
 
-       if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK))
+       if (!access(sys_lpi_file_sysfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_sysfs;
                BIC_PRESENT(BIC_SYS_LPI);
-       else
+       } else if (!access(sys_lpi_file_debugfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_debugfs;
+               BIC_PRESENT(BIC_SYS_LPI);
+       } else {
+               sys_lpi_file_sysfs = NULL;
                BIC_NOT_PRESENT(BIC_SYS_LPI);
+       }
 
        if (!quiet)
                decode_misc_feature_control();
@@ -5306,7 +5390,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 19.08.31"
+       fprintf(outf, "turbostat version 20.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5323,9 +5407,9 @@ int add_counter(unsigned int msr_num, char *path, char *name,
        }
 
        msrp->msr_num = msr_num;
-       strncpy(msrp->name, name, NAME_BYTES);
+       strncpy(msrp->name, name, NAME_BYTES - 1);
        if (path)
-               strncpy(msrp->path, path, PATH_BYTES);
+               strncpy(msrp->path, path, PATH_BYTES - 1);
        msrp->width = width;
        msrp->type = type;
        msrp->format = format;
index ded7a95..6d2f3a1 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 ifneq ($(O),)
 ifeq ($(origin O), command line)
-       dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
-       ABSOLUTE_O := $(shell cd $(O) ; pwd)
+       dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+       ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd)
        OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
        COMMAND_O := O=$(ABSOLUTE_O)
 ifeq ($(objtree),)
index 6ec5039..b93fa64 100644 (file)
@@ -33,6 +33,7 @@ TARGETS += memory-hotplug
 TARGETS += mount
 TARGETS += mqueue
 TARGETS += net
+TARGETS += net/forwarding
 TARGETS += net/mptcp
 TARGETS += netfilter
 TARGETS += networking/timestamping
diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c b/tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c
new file mode 100644 (file)
index 0000000..189a34a
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "test_send_signal_kern.skel.h"
+
+static void sigusr1_handler(int signum)
+{
+}
+
+#define THREAD_COUNT 100
+
+static void *worker(void *p)
+{
+       int i;
+
+       for ( i = 0; i < 1000; i++)
+               usleep(1);
+
+       return NULL;
+}
+
+void test_send_signal_sched_switch(void)
+{
+       struct test_send_signal_kern *skel;
+       pthread_t threads[THREAD_COUNT];
+       u32 duration = 0;
+       int i, err;
+
+       signal(SIGUSR1, sigusr1_handler);
+
+       skel = test_send_signal_kern__open_and_load();
+       if (CHECK(!skel, "skel_open_and_load", "skeleton open_and_load failed\n"))
+               return;
+
+       skel->bss->pid = getpid();
+       skel->bss->sig = SIGUSR1;
+
+       err = test_send_signal_kern__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
+               goto destroy_skel;
+
+       for (i = 0; i < THREAD_COUNT; i++) {
+               err = pthread_create(threads + i, NULL, worker, NULL);
+               if (CHECK(err, "pthread_create", "Error creating thread, %s\n",
+                         strerror(errno)))
+                       goto destroy_skel;
+       }
+
+       for (i = 0; i < THREAD_COUNT; i++)
+               pthread_join(threads[i], NULL);
+
+destroy_skel:
+       test_send_signal_kern__destroy(skel);
+}
index 1acc91e..b4233d3 100644 (file)
@@ -31,6 +31,12 @@ int send_signal_tp(void *ctx)
        return bpf_send_signal_test(ctx);
 }
 
+SEC("tracepoint/sched/sched_switch")
+int send_signal_tp_sched(void *ctx)
+{
+       return bpf_send_signal_test(ctx);
+}
+
 SEC("perf_event")
 int send_signal_perf(void *ctx)
 {
index 93040ca..8da77cd 100644 (file)
@@ -1062,6 +1062,48 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "Member exceeds struct_size",
 },
 
+/* Test member unexceeds the size of struct
+ *
+ * enum E {
+ *     E0,
+ *     E1,
+ * };
+ *
+ * struct A {
+ *     char m;
+ *     enum E __attribute__((packed)) n;
+ * };
+ */
+{
+       .descr = "size check test #5",
+       .raw_types = {
+               /* int */                       /* [1] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, sizeof(int)),
+               /* char */                      /* [2] */
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 8, 1),
+               /* enum E { */                  /* [3] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 1),
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_ENUM_ENC(NAME_TBD, 1),
+               /* } */
+               /* struct A { */                /* [4] */
+               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 2),
+               BTF_MEMBER_ENC(NAME_TBD, 2, 0), /* char m; */
+               BTF_MEMBER_ENC(NAME_TBD, 3, 8),/* enum E __attribute__((packed)) n; */
+               /* } */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0E\0E0\0E1\0A\0m\0n",
+       .str_sec_size = sizeof("\0E\0E0\0E1\0A\0m\0n"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "size_check5_map",
+       .key_size = sizeof(int),
+       .value_size = 2,
+       .key_type_id = 1,
+       .value_type_id = 4,
+       .max_entries = 4,
+},
+
 /* typedef const void * const_void_ptr;
  * struct A {
  *     const_void_ptr m;
index bf0322e..bd5cae4 100644 (file)
        },
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
+{
+       "jset32: ignores upper bits",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_LD_IMM64(BPF_REG_7, 0x8000000000000000),
+       BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000),
+       BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+       BPF_EXIT_INSN(),
+       BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 2,
+},
 {
        "jset32: min/max deduction",
        .insns = {
index 287ae91..4c1bd03 100644 (file)
@@ -11,7 +11,9 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
 TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
 TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
 TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
-TEST_PROGS += fin_ack_lat.sh
+TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh
+TEST_PROGS += altnames.sh icmp_redirect.sh ip6_gre_headroom.sh
+TEST_PROGS += route_localnet.sh
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile
new file mode 100644 (file)
index 0000000..250fbb2
--- /dev/null
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0+ OR MIT
+
+TEST_PROGS = bridge_igmp.sh \
+       bridge_port_isolation.sh \
+       bridge_sticky_fdb.sh \
+       bridge_vlan_aware.sh \
+       bridge_vlan_unaware.sh \
+       ethtool.sh \
+       gre_inner_v4_multipath.sh \
+       gre_inner_v6_multipath.sh \
+       gre_multipath.sh \
+       ip6gre_inner_v4_multipath.sh \
+       ip6gre_inner_v6_multipath.sh \
+       ipip_flat_gre_key.sh \
+       ipip_flat_gre_keys.sh \
+       ipip_flat_gre.sh \
+       ipip_hier_gre_key.sh \
+       ipip_hier_gre_keys.sh \
+       ipip_hier_gre.sh \
+       loopback.sh \
+       mirror_gre_bound.sh \
+       mirror_gre_bridge_1d.sh \
+       mirror_gre_bridge_1d_vlan.sh \
+       mirror_gre_bridge_1q_lag.sh \
+       mirror_gre_bridge_1q.sh \
+       mirror_gre_changes.sh \
+       mirror_gre_flower.sh \
+       mirror_gre_lag_lacp.sh \
+       mirror_gre_neigh.sh \
+       mirror_gre_nh.sh \
+       mirror_gre.sh \
+       mirror_gre_vlan_bridge_1q.sh \
+       mirror_gre_vlan.sh \
+       mirror_vlan.sh \
+       router_bridge.sh \
+       router_bridge_vlan.sh \
+       router_broadcast.sh \
+       router_mpath_nh.sh \
+       router_multicast.sh \
+       router_multipath.sh \
+       router.sh \
+       router_vid_1.sh \
+       sch_ets.sh \
+       sch_tbf_ets.sh \
+       sch_tbf_prio.sh \
+       sch_tbf_root.sh \
+       tc_actions.sh \
+       tc_chains.sh \
+       tc_flower_router.sh \
+       tc_flower.sh \
+       tc_shblocks.sh \
+       tc_vlan_modify.sh \
+       vxlan_asymmetric.sh \
+       vxlan_bridge_1d_port_8472.sh \
+       vxlan_bridge_1d.sh \
+       vxlan_bridge_1q_port_8472.sh \
+       vxlan_bridge_1q.sh \
+       vxlan_symmetric.sh
+
+TEST_PROGS_EXTENDED := devlink_lib.sh \
+       ethtool_lib.sh \
+       fib_offload_lib.sh \
+       forwarding.config.sample \
+       ipip_lib.sh \
+       lib.sh \
+       mirror_gre_lib.sh \
+       mirror_gre_topo_lib.sh \
+       mirror_lib.sh \
+       mirror_topo_lib.sh \
+       sch_ets_core.sh \
+       sch_ets_tests.sh \
+       sch_tbf_core.sh \
+       sch_tbf_etsprio.sh \
+       tc_common.sh
+
+include ../../lib.mk
index c623393..b8475cb 100644 (file)
 #include <sys/socket.h>
 #include <unistd.h>
 
+#ifndef SOL_DCCP
+#define SOL_DCCP 269
+#endif
+
 static const char *IP4_ADDR = "127.0.0.1";
 static const char *IP6_ADDR = "::1";
 static const char *IP4_MAPPED6 = "::ffff:127.0.0.1";
index 08194aa..9c0f758 100644 (file)
@@ -3,6 +3,10 @@
 
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
-       nft_concat_range.sh
+       nft_concat_range.sh \
+       nft_queue.sh
+
+LDLIBS = -lmnl
+TEST_GEN_FILES =  nf-queue
 
 include ../lib.mk
index 59caa8f..4faf2ce 100644 (file)
@@ -1,2 +1,8 @@
 CONFIG_NET_NS=y
 CONFIG_NF_TABLES_INET=y
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_FLOW_OFFLOAD=m
+CONFIG_NF_CT_NETLINK=m
diff --git a/tools/testing/selftests/netfilter/nf-queue.c b/tools/testing/selftests/netfilter/nf-queue.c
new file mode 100644 (file)
index 0000000..29c73bc
--- /dev/null
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <time.h>
+#include <arpa/inet.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_queue.h>
+
+struct options {
+       bool count_packets;
+       int verbose;
+       unsigned int queue_num;
+       unsigned int timeout;
+};
+
+static unsigned int queue_stats[5];
+static struct options opts;
+
+static void help(const char *p)
+{
+       printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num]\n", p);
+}
+
+static int parse_attr_cb(const struct nlattr *attr, void *data)
+{
+       const struct nlattr **tb = data;
+       int type = mnl_attr_get_type(attr);
+
+       /* skip unsupported attribute in user-space */
+       if (mnl_attr_type_valid(attr, NFQA_MAX) < 0)
+               return MNL_CB_OK;
+
+       switch (type) {
+       case NFQA_MARK:
+       case NFQA_IFINDEX_INDEV:
+       case NFQA_IFINDEX_OUTDEV:
+       case NFQA_IFINDEX_PHYSINDEV:
+       case NFQA_IFINDEX_PHYSOUTDEV:
+               if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) {
+                       perror("mnl_attr_validate");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_TIMESTAMP:
+               if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
+                   sizeof(struct nfqnl_msg_packet_timestamp)) < 0) {
+                       perror("mnl_attr_validate2");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_HWADDR:
+               if (mnl_attr_validate2(attr, MNL_TYPE_UNSPEC,
+                   sizeof(struct nfqnl_msg_packet_hw)) < 0) {
+                       perror("mnl_attr_validate2");
+                       return MNL_CB_ERROR;
+               }
+               break;
+       case NFQA_PAYLOAD:
+               break;
+       }
+       tb[type] = attr;
+       return MNL_CB_OK;
+}
+
+static int queue_cb(const struct nlmsghdr *nlh, void *data)
+{
+       struct nlattr *tb[NFQA_MAX+1] = { 0 };
+       struct nfqnl_msg_packet_hdr *ph = NULL;
+       uint32_t id = 0;
+
+       (void)data;
+
+       mnl_attr_parse(nlh, sizeof(struct nfgenmsg), parse_attr_cb, tb);
+       if (tb[NFQA_PACKET_HDR]) {
+               ph = mnl_attr_get_payload(tb[NFQA_PACKET_HDR]);
+               id = ntohl(ph->packet_id);
+
+               if (opts.verbose > 0)
+                       printf("packet hook=%u, hwproto 0x%x",
+                               ntohs(ph->hw_protocol), ph->hook);
+
+               if (ph->hook >= 5) {
+                       fprintf(stderr, "Unknown hook %d\n", ph->hook);
+                       return MNL_CB_ERROR;
+               }
+
+               if (opts.verbose > 0) {
+                       uint32_t skbinfo = 0;
+
+                       if (tb[NFQA_SKB_INFO])
+                               skbinfo = ntohl(mnl_attr_get_u32(tb[NFQA_SKB_INFO]));
+                       if (skbinfo & NFQA_SKB_CSUMNOTREADY)
+                               printf(" csumnotready");
+                       if (skbinfo & NFQA_SKB_GSO)
+                               printf(" gso");
+                       if (skbinfo & NFQA_SKB_CSUM_NOTVERIFIED)
+                               printf(" csumnotverified");
+                       puts("");
+               }
+
+               if (opts.count_packets)
+                       queue_stats[ph->hook]++;
+       }
+
+       return MNL_CB_OK + id;
+}
+
+static struct nlmsghdr *
+nfq_build_cfg_request(char *buf, uint8_t command, int queue_num)
+{
+       struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
+       struct nfqnl_msg_config_cmd cmd = {
+               .command = command,
+               .pf = htons(AF_INET),
+       };
+       struct nfgenmsg *nfg;
+
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_CFG_CMD, sizeof(cmd), &cmd);
+
+       return nlh;
+}
+
+static struct nlmsghdr *
+nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num)
+{
+       struct nlmsghdr *nlh = mnl_nlmsg_put_header(buf);
+       struct nfqnl_msg_config_params params = {
+               .copy_range = htonl(range),
+               .copy_mode = mode,
+       };
+       struct nfgenmsg *nfg;
+
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_CONFIG;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_CFG_PARAMS, sizeof(params), &params);
+
+       return nlh;
+}
+
+static struct nlmsghdr *
+nfq_build_verdict(char *buf, int id, int queue_num, int verd)
+{
+       struct nfqnl_msg_verdict_hdr vh = {
+               .verdict = htonl(verd),
+               .id = htonl(id),
+       };
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfg;
+
+       nlh = mnl_nlmsg_put_header(buf);
+       nlh->nlmsg_type = (NFNL_SUBSYS_QUEUE << 8) | NFQNL_MSG_VERDICT;
+       nlh->nlmsg_flags = NLM_F_REQUEST;
+       nfg = mnl_nlmsg_put_extra_header(nlh, sizeof(*nfg));
+       nfg->nfgen_family = AF_UNSPEC;
+       nfg->version = NFNETLINK_V0;
+       nfg->res_id = htons(queue_num);
+
+       mnl_attr_put(nlh, NFQA_VERDICT_HDR, sizeof(vh), &vh);
+
+       return nlh;
+}
+
+static void print_stats(void)
+{
+       unsigned int last, total;
+       int i;
+
+       if (!opts.count_packets)
+               return;
+
+       total = 0;
+       last = queue_stats[0];
+
+       for (i = 0; i < 5; i++) {
+               printf("hook %d packets %08u\n", i, queue_stats[i]);
+               last = queue_stats[i];
+               total += last;
+       }
+
+       printf("%u packets total\n", total);
+}
+
+struct mnl_socket *open_queue(void)
+{
+       char buf[MNL_SOCKET_BUFFER_SIZE];
+       unsigned int queue_num;
+       struct mnl_socket *nl;
+       struct nlmsghdr *nlh;
+       struct timeval tv;
+       uint32_t flags;
+
+       nl = mnl_socket_open(NETLINK_NETFILTER);
+       if (nl == NULL) {
+               perror("mnl_socket_open");
+               exit(EXIT_FAILURE);
+       }
+
+       if (mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID) < 0) {
+               perror("mnl_socket_bind");
+               exit(EXIT_FAILURE);
+       }
+
+       queue_num = opts.queue_num;
+       nlh = nfq_build_cfg_request(buf, NFQNL_CFG_CMD_BIND, queue_num);
+
+       if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+               perror("mnl_socket_sendto");
+               exit(EXIT_FAILURE);
+       }
+
+       nlh = nfq_build_cfg_params(buf, NFQNL_COPY_PACKET, 0xFFFF, queue_num);
+
+       flags = NFQA_CFG_F_GSO | NFQA_CFG_F_UID_GID;
+       mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(flags));
+       mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(flags));
+
+       if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+               perror("mnl_socket_sendto");
+               exit(EXIT_FAILURE);
+       }
+
+       memset(&tv, 0, sizeof(tv));
+       tv.tv_sec = opts.timeout;
+       if (opts.timeout && setsockopt(mnl_socket_get_fd(nl),
+                                      SOL_SOCKET, SO_RCVTIMEO,
+                                      &tv, sizeof(tv))) {
+               perror("setsockopt(SO_RCVTIMEO)");
+               exit(EXIT_FAILURE);
+       }
+
+       return nl;
+}
+
+static int mainloop(void)
+{
+       unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE;
+       struct mnl_socket *nl;
+       struct nlmsghdr *nlh;
+       unsigned int portid;
+       char *buf;
+       int ret;
+
+       buf = malloc(buflen);
+       if (!buf) {
+               perror("malloc");
+               exit(EXIT_FAILURE);
+       }
+
+       nl = open_queue();
+       portid = mnl_socket_get_portid(nl);
+
+       for (;;) {
+               uint32_t id;
+
+               ret = mnl_socket_recvfrom(nl, buf, buflen);
+               if (ret == -1) {
+                       if (errno == ENOBUFS)
+                               continue;
+
+                       if (errno == EAGAIN) {
+                               errno = 0;
+                               ret = 0;
+                               break;
+                       }
+
+                       perror("mnl_socket_recvfrom");
+                       exit(EXIT_FAILURE);
+               }
+
+               ret = mnl_cb_run(buf, ret, 0, portid, queue_cb, NULL);
+               if (ret < 0) {
+                       perror("mnl_cb_run");
+                       exit(EXIT_FAILURE);
+               }
+
+               id = ret - MNL_CB_OK;
+               nlh = nfq_build_verdict(buf, id, opts.queue_num, NF_ACCEPT);
+               if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
+                       perror("mnl_socket_sendto");
+                       exit(EXIT_FAILURE);
+               }
+       }
+
+       mnl_socket_close(nl);
+
+       return ret;
+}
+
+static void parse_opts(int argc, char **argv)
+{
+       int c;
+
+       while ((c = getopt(argc, argv, "chvt:q:")) != -1) {
+               switch (c) {
+               case 'c':
+                       opts.count_packets = true;
+                       break;
+               case 'h':
+                       help(argv[0]);
+                       exit(0);
+                       break;
+               case 'q':
+                       opts.queue_num = atoi(optarg);
+                       if (opts.queue_num > 0xffff)
+                               opts.queue_num = 0;
+                       break;
+               case 't':
+                       opts.timeout = atoi(optarg);
+                       break;
+               case 'v':
+                       opts.verbose++;
+                       break;
+               }
+       }
+}
+
+int main(int argc, char *argv[])
+{
+       int ret;
+
+       parse_opts(argc, argv);
+
+       ret = mainloop();
+       if (opts.count_packets)
+               print_stats();
+
+       return ret;
+}
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
new file mode 100755 (executable)
index 0000000..6898448
--- /dev/null
@@ -0,0 +1,332 @@
+#!/bin/bash
+#
+# This tests nf_queue:
+# 1. can process packets from all hooks
+# 2. support running nfqueue from more than one base chain
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+nsrouter="nsrouter-$sfx"
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+       ip netns del ${nsrouter}
+       rm -f "$TMPFILE0"
+       rm -f "$TMPFILE1"
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+ip netns add ${nsrouter}
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not create net namespace"
+       exit $ksft_skip
+fi
+
+TMPFILE0=$(mktemp)
+TMPFILE1=$(mktemp)
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${nsrouter} type veth peer name eth0 netns ${ns1} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+ip link add veth1 netns ${nsrouter} type veth peer name eth0 netns ${ns2}
+
+ip -net ${nsrouter} link set lo up
+ip -net ${nsrouter} link set veth0 up
+ip -net ${nsrouter} addr add 10.0.1.1/24 dev veth0
+ip -net ${nsrouter} addr add dead:1::1/64 dev veth0
+
+ip -net ${nsrouter} link set veth1 up
+ip -net ${nsrouter} addr add 10.0.2.1/24 dev veth1
+ip -net ${nsrouter} addr add dead:2::1/64 dev veth1
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set eth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set eth0 up
+
+ip -net ${ns1} addr add 10.0.1.99/24 dev eth0
+ip -net ${ns1} addr add dead:1::99/64 dev eth0
+ip -net ${ns1} route add default via 10.0.1.1
+ip -net ${ns1} route add default via dead:1::1
+
+ip -net ${ns2} addr add 10.0.2.99/24 dev eth0
+ip -net ${ns2} addr add dead:2::99/64 dev eth0
+ip -net ${ns2} route add default via 10.0.2.1
+ip -net ${ns2} route add default via dead:2::1
+
+load_ruleset() {
+       local name=$1
+       local prio=$2
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table inet $name {
+       chain nfq {
+               ip protocol icmp queue bypass
+               icmpv6 type { "echo-request", "echo-reply" } queue num 1 bypass
+       }
+       chain pre {
+               type filter hook prerouting priority $prio; policy accept;
+               jump nfq
+       }
+       chain input {
+               type filter hook input priority $prio; policy accept;
+               jump nfq
+       }
+       chain forward {
+               type filter hook forward priority $prio; policy accept;
+               tcp dport 12345 queue num 2
+               jump nfq
+       }
+       chain output {
+               type filter hook output priority $prio; policy accept;
+               tcp dport 12345 queue num 3
+               jump nfq
+       }
+       chain post {
+               type filter hook postrouting priority $prio; policy accept;
+               jump nfq
+       }
+}
+EOF
+}
+
+load_counter_ruleset() {
+       local prio=$1
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table inet countrules {
+       chain pre {
+               type filter hook prerouting priority $prio; policy accept;
+               counter
+       }
+       chain input {
+               type filter hook input priority $prio; policy accept;
+               counter
+       }
+       chain forward {
+               type filter hook forward priority $prio; policy accept;
+               counter
+       }
+       chain output {
+               type filter hook output priority $prio; policy accept;
+               counter
+       }
+       chain post {
+               type filter hook postrouting priority $prio; policy accept;
+               counter
+       }
+}
+EOF
+}
+
+test_ping() {
+  ip netns exec ${ns1} ping -c 1 -q 10.0.2.99 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  return 0
+}
+
+test_ping_router() {
+  ip netns exec ${ns1} ping -c 1 -q 10.0.2.1 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  ip netns exec ${ns1} ping -c 1 -q dead:2::1 > /dev/null
+  if [ $? -ne 0 ];then
+       return 1
+  fi
+
+  return 0
+}
+
+test_queue_blackhole() {
+       local proto=$1
+
+ip netns exec ${nsrouter} nft -f - <<EOF
+table $proto blackh {
+       chain forward {
+       type filter hook forward priority 0; policy accept;
+               queue num 600
+       }
+}
+EOF
+       if [ $proto = "ip" ] ;then
+               ip netns exec ${ns1} ping -c 1 -q 10.0.2.99 > /dev/null
+               lret=$?
+       elif [ $proto = "ip6" ]; then
+               ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null
+               lret=$?
+       else
+               lret=111
+       fi
+
+       # queue without bypass keyword should drop traffic if no listener exists.
+       if [ $lret -eq 0 ];then
+               echo "FAIL: $proto expected failure, got $lret" 1>&2
+               exit 1
+       fi
+
+       ip netns exec ${nsrouter} nft delete table $proto blackh
+       if [ $? -ne 0 ] ;then
+               echo "FAIL: $proto: Could not delete blackh table"
+               exit 1
+       fi
+
+        echo "PASS: $proto: statement with no listener results in packet drop"
+}
+
+test_queue()
+{
+       local expected=$1
+       local last=""
+
+       # spawn nf-queue listeners
+       ip netns exec ${nsrouter} ./nf-queue -c -q 0 -t 3 > "$TMPFILE0" &
+       ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t 3 > "$TMPFILE1" &
+       sleep 1
+       test_ping
+       ret=$?
+       if [ $ret -ne 0 ];then
+               echo "FAIL: netns routing/connectivity with active listener on queue $queue: $ret" 1>&2
+               exit $ret
+       fi
+
+       test_ping_router
+       ret=$?
+       if [ $ret -ne 0 ];then
+               echo "FAIL: netns router unreachable listener on queue $queue: $ret" 1>&2
+               exit $ret
+       fi
+
+       wait
+       ret=$?
+
+       for file in $TMPFILE0 $TMPFILE1; do
+               last=$(tail -n1 "$file")
+               if [ x"$last" != x"$expected packets total" ]; then
+                       echo "FAIL: Expected $expected packets total, but got $last" 1>&2
+                       cat "$file" 1>&2
+
+                       ip netns exec ${nsrouter} nft list ruleset
+                       exit 1
+               fi
+       done
+
+       echo "PASS: Expected and received $last"
+}
+
+test_tcp_forward()
+{
+       ip netns exec ${nsrouter} ./nf-queue -q 2 -t 10 &
+       local nfqpid=$!
+
+       tmpfile=$(mktemp) || exit 1
+       dd conv=sparse status=none if=/dev/zero bs=1M count=100 of=$tmpfile
+       ip netns exec ${ns2} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
+       local rpid=$!
+
+       sleep 1
+       ip netns exec ${ns1} nc -w 5 10.0.2.99 12345 <"$tmpfile" >/dev/null &
+
+       rm -f "$tmpfile"
+
+       wait $rpid
+       wait $lpid
+       [ $? -eq 0 ] && echo "PASS: tcp and nfqueue in forward chain"
+}
+
+test_tcp_localhost()
+{
+       tc -net "${nsrouter}" qdisc add dev lo root netem loss random 1%
+
+       tmpfile=$(mktemp) || exit 1
+
+       dd conv=sparse status=none if=/dev/zero bs=1M count=900 of=$tmpfile
+       ip netns exec ${nsrouter} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
+       local rpid=$!
+
+       ip netns exec ${nsrouter} ./nf-queue -q 3 -t 30 &
+       local nfqpid=$!
+
+       sleep 1
+       ip netns exec ${nsrouter} nc -w 5 127.0.0.1 12345 <"$tmpfile" > /dev/null
+       rm -f "$tmpfile"
+
+       wait $rpid
+       [ $? -eq 0 ] && echo "PASS: tcp via loopback"
+}
+
+ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+load_ruleset "filter" 0
+
+sleep 3
+
+test_ping
+ret=$?
+if [ $ret -eq 0 ];then
+       # queue bypass works (rules were skipped, no listener)
+       echo "PASS: ${ns1} can reach ${ns2}"
+else
+       echo "FAIL: ${ns1} cannot reach ${ns2}: $ret" 1>&2
+       exit $ret
+fi
+
+test_queue_blackhole ip
+test_queue_blackhole ip6
+
+# dummy ruleset to add base chains between the
+# queueing rules.  We don't want the second reinject
+# to re-execute the old hooks.
+load_counter_ruleset 10
+
+# we are hooking all: prerouting/input/forward/output/postrouting.
+# we ping ${ns2} from ${ns1} via ${nsrouter} using ipv4 and ipv6, so:
+# 1x icmp prerouting,forward,postrouting -> 3 queue events (6 incl. reply).
+# 1x icmp prerouting,input,output postrouting -> 4 queue events incl. reply.
+# so we expect that userspace program receives 10 packets.
+test_queue 10
+
+# same.  We queue to a second program as well.
+load_ruleset "filter2" 20
+test_queue 20
+
+test_tcp_forward
+test_tcp_localhost
+
+exit $ret
index 138d46b..936e1ca 100755 (executable)
@@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75
 n0 wg set wg0 peer "$pub2" allowed-ips ::/0
 n0 wg set wg0 peer "$pub2" remove
-low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= )
-n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer }
-[[ -z $(n0 wg show wg0 peers) ]]
-n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer }
-[[ -z $(n0 wg show wg0 peers) ]]
+for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do
+       n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111
+done
+[[ -n $(n0 wg show wg0 peers) ]]
+exec 4< <(n0 ncat -l -u -p 1111)
+ncat_pid=$!
+waitncatudp $netns0 $ncat_pid
+ip0 link set wg0 up
+! read -r -n 1 -t 2 <&4 || false
+kill $ncat_pid
 ip0 link del wg0
 
 declare -A objects
index 28d4776..90598a4 100644 (file)
@@ -41,7 +41,7 @@ $(DISTFILES_PATH)/$(1):
        flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3)  $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi'
 endef
 
-$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3))
+$(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8))
 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c))
 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d))
 $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae))
index 90bc981..c969812 100644 (file)
@@ -13,7 +13,6 @@
 #include <fcntl.h>
 #include <sys/wait.h>
 #include <sys/mount.h>
-#include <sys/types.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/io.h>
index af9323a..d531de1 100644 (file)
@@ -56,7 +56,6 @@ CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
 CONFIG_HZ_PERIODIC=n
 CONFIG_HIGH_RES_TIMERS=y
-CONFIG_COMPAT_32BIT_TIME=y
 CONFIG_ARCH_RANDOM=y
 CONFIG_FILE_LOCKING=y
 CONFIG_POSIX_TIMERS=y