Merge branch 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Jun 2021 22:45:11 +0000 (15:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 5 Jun 2021 22:45:11 +0000 (15:45 -0700)
Pull i2c fixes from Wolfram Sang:
 "Some more bugfixes from I2C for v5.13. Usual stuff"

* 'i2c/for-current' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux:
  i2c: qcom-geni: Suspend and resume the bus during SYSTEM_SLEEP_PM ops
  i2c: qcom-geni: Add shutdown callback for i2c
  i2c: tegra-bpmp: Demote kernel-doc abuses
  i2c: altera: Fix formatting issue in struct and demote unworthy kernel-doc headers

231 files changed:
.mailmap
MAINTAINERS
arch/mips/mm/cache.c
arch/riscv/Makefile
arch/riscv/errata/sifive/Makefile
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
drivers/acpi/acpica/utdelete.c
drivers/base/memory.c
drivers/bluetooth/btusb.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/fdtparams.c
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/memattr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_mm.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/host1x/bus.c
drivers/hid/Kconfig
drivers/hid/Makefile
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.c
drivers/hid/hid-a4tech.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-ft260.c
drivers/hid/hid-gt683r.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-magicmouse.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/hid-semitek.c [new file with mode: 0644]
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-thrustmaster.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/surface-hid/surface_hid_core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/hid-pidff.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/pmbus/fsp-3y.c
drivers/hwmon/pmbus/isl68137.c
drivers/hwmon/pmbus/q54sj108a2.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/virtio_net.c
drivers/net/wireguard/Makefile
drivers/net/wireguard/allowedips.c
drivers/net/wireguard/allowedips.h
drivers/net/wireguard/main.c
drivers/net/wireguard/peer.c
drivers/net/wireguard/peer.h
drivers/net/wireguard/selftest/allowedips.c
drivers/net/wireguard/socket.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
drivers/net/wireless/mediatek/mt76/mt7921/init.c
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
drivers/net/wireless/mediatek/mt76/mt7921/main.c
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/pci/of.c
drivers/pci/probe.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/platform/vfio_platform_common.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/core/fb_defio.c
drivers/video/fbdev/core/fbmem.c
fs/btrfs/compression.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/reflink.c
fs/btrfs/tree-log.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/lops.c
fs/gfs2/lops.h
fs/gfs2/util.c
fs/io_uring.c
fs/notify/fanotify/fanotify_user.c
fs/notify/fdinfo.c
fs/ocfs2/file.c
include/linux/avf/virtchnl.h
include/linux/fanotify.h
include/linux/fb.h
include/linux/hid.h
include/linux/host1x.h
include/linux/mlx5/mlx5_ifc.h
include/linux/pci.h
include/linux/pgtable.h
include/net/caif/caif_dev.h
include/net/caif/cfcnfg.h
include/net/caif/cfserl.h
include/net/netfilter/nf_tables.h
include/net/tls.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/virtio_ids.h
init/main.c
kernel/bpf/helpers.c
kernel/trace/bpf_trace.c
lib/crc64.c
mm/debug_vm_pgtable.c
mm/hugetlb.c
mm/kasan/init.c
mm/kfence/core.c
mm/memory.c
mm/page_alloc.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/caif/cfserl.c
net/compat.c
net/core/devlink.c
net/core/fib_rules.c
net/core/rtnetlink.c
net/core/sock.c
net/dsa/tag_8021q.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ipv4/ipconfig.c
net/ipv6/route.c
net/ipv6/sit.c
net/kcm/kcmsock.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_ct.c
net/nfc/llcp_sock.c
net/sched/act_ct.c
net/sched/sch_htb.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/x25/af_x25.c
samples/vfio-mdev/mdpy-fb.c
scripts/Makefile.modfinal
scripts/link-vmlinux.sh
sound/core/control_led.c
sound/core/timer.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
tools/arch/mips/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/perf/Makefile.config
tools/perf/builtin-record.c
tools/perf/check-headers.sh
tools/perf/tests/attr/base-record
tools/perf/util/bpf_counter.c
tools/perf/util/dwarf-aux.c
tools/perf/util/env.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/perf_api_probe.c
tools/perf/util/perf_api_probe.h
tools/perf/util/probe-finder.c
tools/perf/util/stat-display.c
tools/perf/util/symbol-elf.c
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/proc/.gitignore
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/kernel.config

index ce6c497..c79a787 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -243,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
+Michel Lespinasse <michel@lespinasse.org>
+Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
+Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
 Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
 Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
 Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
index 503fd21..b706dd2 100644 (file)
@@ -3877,6 +3877,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
+C:     irc://irc.libera.chat/btrfs
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
@@ -6945,6 +6946,7 @@ F:        net/core/failover.c
 FANOTIFY
 M:     Jan Kara <jack@suse.cz>
 R:     Amir Goldstein <amir73il@gmail.com>
+R:     Matthew Bobrowski <repnop@google.com>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
 F:     fs/notify/fanotify/
@@ -12903,7 +12905,7 @@ F:      net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/
@@ -12916,7 +12918,7 @@ F:      net/nfc/
 NFC VIRTUAL NCI DEVICE DRIVER
 M:     Bongsu Jeon <bongsu.jeon@samsung.com>
 L:     netdev@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/virtual_ncidev.c
 F:     tools/testing/selftests/nci/
@@ -13214,7 +13216,7 @@ F:      sound/soc/codecs/tfa9879*
 
 NXP-NCI NFC DRIVER
 R:     Charles Gorand <charles.gorand@effinnov.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     drivers/nfc/nxp-nci
 
@@ -14117,6 +14119,7 @@ F:      drivers/pci/controller/pci-v3-semi.c
 PCI ENDPOINT SUBSYSTEM
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+R:     Krzysztof WilczyÅ„ski <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 F:     Documentation/PCI/endpoint/*
@@ -14165,6 +14168,7 @@ F:      drivers/pci/controller/pci-xgene-msi.c
 PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
 M:     Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 R:     Rob Herring <robh@kernel.org>
+R:     Krzysztof WilczyÅ„ski <kw@linux.com>
 L:     linux-pci@vger.kernel.org
 S:     Supported
 Q:     http://patchwork.ozlabs.org/project/linux-pci/list/
@@ -16143,7 +16147,7 @@ F:      include/media/drv-intf/s3c_camif.h
 SAMSUNG S3FWRN5 NFC DRIVER
 M:     Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
 F:     drivers/nfc/s3fwrn5
@@ -18333,7 +18337,7 @@ F:      sound/soc/codecs/tas571x*
 TI TRF7970A NFC DRIVER
 M:     Mark Greer <mgreer@animalcreek.com>
 L:     linux-wireless@vger.kernel.org
-L:     linux-nfc@lists.01.org (moderated for non-subscribers)
+L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Supported
 F:     Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
index a7bf0c8..830ab91 100644 (file)
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
 EXPORT_SYMBOL(_page_cachable_default);
 
 #define PM(p)  __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
 
 static inline void setup_protection_map(void)
 {
        protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[4]  = PVA(_PAGE_PRESENT);
-       protection_map[5]  = PVA(_PAGE_PRESENT);
-       protection_map[6]  = PVA(_PAGE_PRESENT);
-       protection_map[7]  = PVA(_PAGE_PRESENT);
+       protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+       protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[4]  = PM(_PAGE_PRESENT);
+       protection_map[5]  = PM(_PAGE_PRESENT);
+       protection_map[6]  = PM(_PAGE_PRESENT);
+       protection_map[7]  = PM(_PAGE_PRESENT);
 
        protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
-       protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
-       protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+       protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+       protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
                                _PAGE_NO_READ);
-       protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-       protection_map[12] = PVA(_PAGE_PRESENT);
-       protection_map[13] = PVA(_PAGE_PRESENT);
-       protection_map[14] = PVA(_PAGE_PRESENT);
-       protection_map[15] = PVA(_PAGE_PRESENT);
+       protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+       protection_map[12] = PM(_PAGE_PRESENT);
+       protection_map[13] = PM(_PAGE_PRESENT);
+       protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+       protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 }
 
-#undef _PVA
 #undef PM
 
 void cpu_cache_init(void)
index 3eb9590..4be0206 100644 (file)
@@ -38,6 +38,15 @@ else
        KBUILD_LDFLAGS += -melf32lriscv
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD),y)
+       KBUILD_CFLAGS += -mno-relax
+       KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+       KBUILD_CFLAGS += -Wa,-mno-relax
+       KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
 # ISA string setting
 riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
index bdd5fc8..2fde48d 100644 (file)
@@ -1,2 +1,2 @@
-obj-y += errata_cip_453.o
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
 obj-y += errata.o
index 03901d3..9a1b7a0 100644 (file)
@@ -231,13 +231,13 @@ static void __init init_resources(void)
 
        /* Clean-up any unused pre-allocated resources */
        mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
        return;
 
  error:
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
-       memblock_free((phys_addr_t) mem_res, mem_res_sz);
+       memblock_free(__pa(mem_res), mem_res_sz);
 }
 
 
index 4faf8bd..4c4c92c 100644 (file)
@@ -746,14 +746,18 @@ void __init protect_kernel_text_data(void)
        unsigned long init_data_start = (unsigned long)__init_data_begin;
        unsigned long rodata_start = (unsigned long)__start_rodata;
        unsigned long data_start = (unsigned long)_data;
-       unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+       unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+       unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
 
        set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
        set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
        set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
        /* rodata section is marked readonly in mark_rodata_ro */
        set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
-       set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+       set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
 }
 
 void mark_rodata_ro(void)
index 624a267..e5ba979 100644 (file)
@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                }
                break;
 
+       case ACPI_TYPE_LOCAL_ADDRESS_HANDLER:
+
+               ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+                                 "***** Address handler %p\n", object));
+
+               acpi_os_delete_mutex(object->address_space.context_mutex);
+               break;
+
        default:
 
                break;
index b31b3af..d5ffaab 100644 (file)
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
        struct zone *zone;
        int ret;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-
        /*
         * Unaccount before offlining, such that unpopulated zone and kthreads
         * can properly be torn down in offline_pages().
         */
-       if (nr_vmemmap_pages)
+       if (nr_vmemmap_pages) {
+               zone = page_zone(pfn_to_page(start_pfn));
                adjust_present_page_count(zone, -nr_vmemmap_pages);
+       }
 
        ret = offline_pages(start_pfn + nr_vmemmap_pages,
                            nr_pages - nr_vmemmap_pages);
index 5d603ef..7f6ba2c 100644 (file)
@@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = {
        /* Realtek 8822CE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
+       { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK |
+                                                    BTUSB_WIDEBAND_SPEECH },
 
        /* Realtek 8852AE Bluetooth devices */
        { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
@@ -2527,10 +2529,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
        }
 
        btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
+
                return err;
        }
 
@@ -2680,12 +2689,24 @@ download:
        err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
                                                sizeof(fwname), "sfi");
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Unsupported Intel firmware naming");
                return -EINVAL;
        }
 
-       err = request_firmware(&fw, fwname, &hdev->dev);
+       err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
        if (err < 0) {
+               if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+                       /* Firmware has already been loaded */
+                       set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+                       return 0;
+               }
+
                bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
                           fwname, err);
                return err;
index e15d484..ea7ca74 100644 (file)
@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
        if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
                return 0;
 
-       n = 0;
-       len = CPER_REC_LEN - 1;
+       len = CPER_REC_LEN;
        dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
        if (bank && device)
                n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
                             "DIMM location: not present. DMI handle: 0x%.4x ",
                             mem->mem_dev_handle);
 
-       msg[n] = '\0';
        return n;
 }
 
index bb042ab..e901f85 100644 (file)
@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm)
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name));
        BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params));
 
+       if (!fdt)
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                node = fdt_path_offset(fdt, dt_params[i].path);
                if (node < 0)
index 4e81c60..dd95f33 100644 (file)
@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
                return 0;
 
        /* Skip any leading slashes */
-       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+       while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\'))
                i++;
 
        while (--result_len > 0 && i < cmdline_len) {
index 5737cb0..0a9aba5 100644 (file)
@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
                return false;
        }
 
-       if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
-               pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
-               return false;
-       }
-
        if (PAGE_SIZE > EFI_PAGE_SIZE &&
            (!PAGE_ALIGNED(in->phys_addr) ||
             !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
index 0350205..6819fe5 100644 (file)
@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 {
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr;
-       unsigned long ras_counter;
 
        if (!fpriv)
                return -EINVAL;
@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        if (atomic_read(&ctx->guilty))
                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 
-       /*query ue count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, false);
-       /*ras counter is monotonic increasing*/
-       if (ras_counter != ctx->ras_counter_ue) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
-               ctx->ras_counter_ue = ras_counter;
-       }
-
-       /*query ce count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, true);
-       if (ras_counter != ctx->ras_counter_ce) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
-               ctx->ras_counter_ce = ras_counter;
-       }
-
        mutex_unlock(&mgr->lock);
        return 0;
 }
index 66ddfe4..57ec108 100644 (file)
@@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+       if (amdgpu_sriov_vf(adev) || 
+           adev->enable_virtual_display ||
+           (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
index 8f4a8f8..39b6c6b 100644 (file)
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
        unsigned char buff[34];
-       int addrptr = 0, size = 0;
+       int addrptr, size;
+       int len;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        /* If algo exists, it means that the i2c_adapter's initialized */
        if (!adev->pm.smu_i2c.algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-               return 0;
+               return -ENODEV;
        }
 
        /* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        /* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product name should only be 32 characters. Any more,
         * and something could be wrong. Cap it at 32 to be safe
         */
-       if (size > 32) {
+       if (len >= sizeof(adev->product_name)) {
                DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
-               size = 32;
+               len = sizeof(adev->product_name) - 1;
        }
        /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[2], size);
-       adev->product_name[size] = '\0';
+       memcpy(adev->product_name, &buff[2], len);
+       adev->product_name[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->product_number)) {
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[2], size);
-       adev->product_number[size] = '\0';
+       memcpy(adev->product_number, &buff[2], len);
+       adev->product_number[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Serial number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->serial)) {
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[2], size);
-       adev->serial[size] = '\0';
+       memcpy(adev->serial, &buff[2], len);
+       adev->serial[len] = '\0';
 
        return 0;
 }
index 46a5328..60aa99a 100644 (file)
@@ -76,6 +76,7 @@ struct psp_ring
        uint64_t                        ring_mem_mc_addr;
        void                            *ring_mem_handle;
        uint32_t                        ring_size;
+       uint32_t                        ring_wptr;
 };
 
 /* More registers may will be supported */
index 589410c..02bba1f 100644 (file)
@@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
@@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
        if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index f2e725f..908664a 100644 (file)
@@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
        return data;
@@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                        GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index 2bab9c7..cf3803f 100644 (file)
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unpin(bo);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
        return r;
index 389eff9..652cc1a 100644 (file)
@@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
        }
 
-       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv)
+               adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
        if (!adev->dm.dc->ctx->dmub_srv) {
                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
                return -ENOMEM;
@@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
-
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = false;
+       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing |= amdgpu_freesync_vid_mode &&
+               recalculate_timing = amdgpu_freesync_vid_mode &&
                                 is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        mode = *freesync_mode;
                } else {
                        decide_crtc_timing_for_drm_display_mode(
-                               &mode, preferred_mode,
-                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
-               }
+                               &mode, preferred_mode, scale);
 
-               preferred_refresh = drm_mode_vrefresh(preferred_mode);
+                       preferred_refresh = drm_mode_vrefresh(preferred_mode);
+               }
        }
 
        if (recalculate_timing)
@@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!recalculate_timing || mode_refresh != preferred_refresh)
+       if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
                        requested_bpc);
@@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        if (cursor_scale_w != primary_scale_w ||
            cursor_scale_h != primary_scale_h) {
-               DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+               drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
                return -EINVAL;
        }
 
@@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state)
        int i;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *overlay_state = NULL;
+       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
+       /* check if cursor plane is enabled */
+       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+
+       if (drm_atomic_plane_disabling(plane->state, cursor_state))
+               return 0;
+
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
index 527e56c..8357aa3 100644 (file)
@@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
        dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && dummy_pstate_supported) {
+       if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
                goto restore_dml_state;
        }
index 93f4d05..1e1cb24 100644 (file)
@@ -20,7 +20,6 @@ config DRM_I915
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
-       select IO_MAPPING
        select SYNC_FILE
        select IOSF_MBI
        select CRC32
index f6fe5cb..8598a1c 100644 (file)
@@ -367,10 +367,11 @@ retry:
                goto err_unpin;
 
        /* Finally, remap it using the new GTT offset */
-       ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start +
-                       (vma->ggtt_view.partial.offset << PAGE_SHIFT),
-                       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
-                       min_t(u64, vma->size, area->vm_end - area->vm_start));
+       ret = remap_io_mapping(area,
+                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
+                              (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+                              min_t(u64, vma->size, area->vm_end - area->vm_start),
+                              &ggtt->iomap);
        if (ret)
                goto err_fence;
 
index 9ec9277..69e43bf 100644 (file)
@@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* i915_mm.c */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap);
 int remap_io_sg(struct vm_area_struct *vma,
                unsigned long addr, unsigned long size,
                struct scatterlist *sgl, resource_size_t iobase);
index 9a777b0..666808c 100644 (file)
@@ -37,6 +37,17 @@ struct remap_pfn {
        resource_size_t iobase;
 };
 
+static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+
+       /* Special PTE are not associated with any struct page */
+       set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
+       r->pfn++;
+
+       return 0;
+}
+
 #define use_dma(io) ((io) != -1)
 
 static inline unsigned long sgt_pfn(const struct remap_pfn *r)
@@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data)
        return 0;
 }
 
+/**
+ * remap_io_mapping - remap an IO mapping to userspace
+ * @vma: user vma to map to
+ * @addr: target user address to start at
+ * @pfn: physical address of kernel memory
+ * @size: size of map area
+ * @iomap: the source io_mapping
+ *
+ *  Note: this is only safe if the mm semaphore is held when called.
+ */
+int remap_io_mapping(struct vm_area_struct *vma,
+                    unsigned long addr, unsigned long pfn, unsigned long size,
+                    struct io_mapping *iomap)
+{
+       struct remap_pfn r;
+       int err;
+
 #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+       GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
+
+       /* We rely on prevalidation of the io-mapping to skip track_pfn(). */
+       r.mm = vma->vm_mm;
+       r.pfn = pfn;
+       r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) |
+                         (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK));
+
+       err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
+       if (unlikely(err)) {
+               zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT);
+               return err;
+       }
+
+       return 0;
+}
 
 /**
  * remap_io_sg - remap an IO mapping to userspace
index ee8e753..eae0abd 100644 (file)
@@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg)
 
        for (n = 0; n < smoke[0].ncontexts; n++) {
                smoke[0].contexts[n] = live_context(i915, file);
-               if (!smoke[0].contexts[n]) {
-                       ret = -ENOMEM;
+               if (IS_ERR(smoke[0].contexts[n])) {
+                       ret = PTR_ERR(smoke[0].contexts[n]);
                        goto out_contexts;
                }
        }
index 87df251..0cb8680 100644 (file)
@@ -25,7 +25,7 @@
 #include "trace.h"
 
 /* XXX move to include/uapi/drm/drm_fourcc.h? */
-#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22)
 
 struct reset_control;
 
index 79bff8b..bfae8a0 100644 (file)
@@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
         * dGPU sector layout.
         */
        if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
-               base |= BIT(39);
+               base |= BIT_ULL(39);
 #endif
 
        tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
index 7b88261..0ea320c 100644 (file)
@@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client)
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
 
                err = reset_control_assert(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to assert SOR reset: %d\n",
                                err);
-                       return err;
+                       goto rpm_put;
                }
        }
 
        err = clk_prepare_enable(sor->clk);
        if (err < 0) {
                dev_err(sor->dev, "failed to enable clock: %d\n", err);
-               return err;
+               goto rpm_put;
        }
 
        usleep_range(1000, 3000);
@@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client)
                        dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
                                err);
                        clk_disable_unprepare(sor->clk);
-                       return err;
+                       goto rpm_put;
                }
 
                reset_control_release(sor->rst);
@@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client)
        }
 
        return 0;
+
+rpm_put:
+       if (sor->rst)
+               pm_runtime_put(sor->dev);
+
+       return err;
 }
 
 static int tegra_sor_exit(struct host1x_client *client)
@@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
                if (!sor->aux)
                        return -EPROBE_DEFER;
 
-               if (get_device(&sor->aux->ddc.dev)) {
-                       if (try_module_get(sor->aux->ddc.owner))
-                               sor->output.ddc = &sor->aux->ddc;
-                       else
-                               put_device(&sor->aux->ddc.dev);
-               }
+               if (get_device(sor->aux->dev))
+                       sor->output.ddc = &sor->aux->ddc;
        }
 
        if (!sor->aux) {
@@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
 
        err = tegra_sor_parse_dt(sor);
        if (err < 0)
-               return err;
+               goto put_aux;
 
        err = tegra_output_probe(&sor->output);
-       if (err < 0)
-               return dev_err_probe(&pdev->dev, err,
-                                    "failed to probe output\n");
+       if (err < 0) {
+               dev_err_probe(&pdev->dev, err, "failed to probe output\n");
+               goto put_aux;
+       }
 
        if (sor->ops && sor->ops->probe) {
                err = sor->ops->probe(sor);
@@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sor);
        pm_runtime_enable(&pdev->dev);
 
-       INIT_LIST_HEAD(&sor->client.list);
+       host1x_client_init(&sor->client);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
 
-       err = host1x_client_register(&sor->client);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
-                       err);
-               goto rpm_disable;
-       }
-
        /*
         * On Tegra210 and earlier, provide our own implementation for the
         * pad output clock.
@@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
                                      sor->index);
                if (!name) {
                        err = -ENOMEM;
-                       goto unregister;
+                       goto uninit;
                }
 
                err = host1x_client_resume(&sor->client);
                if (err < 0) {
                        dev_err(sor->dev, "failed to resume: %d\n", err);
-                       goto unregister;
+                       goto uninit;
                }
 
                sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
@@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev)
                err = PTR_ERR(sor->clk_pad);
                dev_err(sor->dev, "failed to register SOR pad clock: %d\n",
                        err);
-               goto unregister;
+               goto uninit;
+       }
+
+       err = __host1x_client_register(&sor->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               goto uninit;
        }
 
        return 0;
 
-unregister:
-       host1x_client_unregister(&sor->client);
-rpm_disable:
+uninit:
+       host1x_client_exit(&sor->client);
        pm_runtime_disable(&pdev->dev);
 remove:
+       if (sor->aux)
+               sor->output.ddc = NULL;
+
        tegra_output_remove(&sor->output);
+put_aux:
+       if (sor->aux)
+               put_device(sor->aux->dev);
+
        return err;
 }
 
@@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev)
 
        pm_runtime_disable(&pdev->dev);
 
+       if (sor->aux) {
+               put_device(sor->aux->dev);
+               sor->output.ddc = NULL;
+       }
+
        tegra_output_remove(&sor->output);
 
        return 0;
index 46f69c5..218e371 100644 (file)
@@ -735,6 +735,29 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 }
 EXPORT_SYMBOL(host1x_driver_unregister);
 
+/**
+ * __host1x_client_init() - initialize a host1x client
+ * @client: host1x client
+ * @key: lock class key for the client-specific mutex
+ */
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
+{
+       INIT_LIST_HEAD(&client->list);
+       __mutex_init(&client->lock, "host1x client lock", key);
+       client->usecount = 0;
+}
+EXPORT_SYMBOL(__host1x_client_init);
+
+/**
+ * host1x_client_exit() - uninitialize a host1x client
+ * @client: host1x client
+ */
+void host1x_client_exit(struct host1x_client *client)
+{
+       mutex_destroy(&client->lock);
+}
+EXPORT_SYMBOL(host1x_client_exit);
+
 /**
  * __host1x_client_register() - register a host1x client
  * @client: host1x client
@@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key)
+int __host1x_client_register(struct host1x_client *client)
 {
        struct host1x *host1x;
        int err;
 
-       INIT_LIST_HEAD(&client->list);
-       __mutex_init(&client->lock, "host1x client lock", key);
-       client->usecount = 0;
-
        mutex_lock(&devices_lock);
 
        list_for_each_entry(host1x, &devices, list) {
index 4bf263c..1605549 100644 (file)
@@ -93,11 +93,11 @@ menu "Special HID drivers"
        depends on HID
 
 config HID_A4TECH
-       tristate "A4 tech mice"
+       tristate "A4TECH mice"
        depends on HID
        default !EXPERT
        help
-       Support for A4 tech X5 and WOP-35 / Trust 450L mice.
+       Support for some A4TECH mice with two scroll wheels.
 
 config HID_ACCUTOUCH
        tristate "Accutouch touch device"
@@ -922,6 +922,21 @@ config HID_SAMSUNG
        help
        Support for Samsung InfraRed remote control or keyboards.
 
+config HID_SEMITEK
+       tristate "Semitek USB keyboards"
+       depends on HID
+       help
+       Support for Semitek USB keyboards that are not fully compliant
+       with the HID standard.
+
+       There are many variants, including:
+       - GK61, GK64, GK68, GK84, GK96, etc.
+       - SK61, SK64, SK68, SK84, SK96, etc.
+       - Dierya DK61/DK66
+       - Tronsmart TK09R
+       - Woo-dy
+       - X-Bows Nature/Knight
+
 config HID_SONY
        tristate "Sony PS2/3/4 accessories"
        depends on USB_HID
index 193431e..1ea1a7c 100644 (file)
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT)    += hid-roccat.o hid-roccat-common.o \
 obj-$(CONFIG_HID_RMI)          += hid-rmi.o
 obj-$(CONFIG_HID_SAITEK)       += hid-saitek.o
 obj-$(CONFIG_HID_SAMSUNG)      += hid-samsung.o
+obj-$(CONFIG_HID_SEMITEK)      += hid-semitek.o
 obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
 obj-$(CONFIG_HID_SONY)         += hid-sony.o
 obj-$(CONFIG_HID_SPEEDLINK)    += hid-speedlink.o
index 2ab38b7..3589d99 100644 (file)
@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work)
        sensor_index = req_node->sensor_idx;
        report_id = req_node->report_id;
        node_type = req_node->report_type;
+       kfree(req_node);
 
        if (node_type == HID_FEATURE_REPORT) {
                report_size = get_feature_report(sensor_index, report_id,
@@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
        int rc, i;
 
        dev = &privdata->pdev->dev;
-       cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL);
+       cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL);
        if (!cl_data)
                return -ENOMEM;
 
@@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                        rc = -EINVAL;
                        goto cleanup;
                }
-               cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL);
+               cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL);
                if (!cl_data->feature_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
                }
-               cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL);
+               cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL);
                if (!cl_data->input_report[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                info.sensor_idx = cl_idx;
                info.dma_address = cl_data->sensor_dma_addr[i];
 
-               cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
+               cl_data->report_descr[i] =
+                       devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
                        rc = -ENOMEM;
                        goto cleanup;
@@ -214,11 +216,11 @@ cleanup:
                                          cl_data->sensor_virt_addr[i],
                                          cl_data->sensor_dma_addr[i]);
                }
-               kfree(cl_data->feature_report[i]);
-               kfree(cl_data->input_report[i]);
-               kfree(cl_data->report_descr[i]);
+               devm_kfree(dev, cl_data->feature_report[i]);
+               devm_kfree(dev, cl_data->input_report[i]);
+               devm_kfree(dev, cl_data->report_descr[i]);
        }
-       kfree(cl_data);
+       devm_kfree(dev, cl_data);
        return rc;
 }
 
@@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                                          cl_data->sensor_dma_addr[i]);
                }
        }
-       kfree(cl_data);
        return 0;
 }
index 4f98948..5ad1e7a 100644 (file)
@@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data)
        int i;
 
        for (i = 0; i < cli_data->num_hid_devices; ++i) {
-               kfree(cli_data->feature_report[i]);
-               kfree(cli_data->input_report[i]);
-               kfree(cli_data->report_descr[i]);
                if (cli_data->hid_sensor_hubs[i]) {
                        kfree(cli_data->hid_sensor_hubs[i]->driver_data);
                        hid_destroy_device(cli_data->hid_sensor_hubs[i]);
index 3a8c4a5..2cbc32d 100644 (file)
@@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = {
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649),
                .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95),
+               .driver_data = A4_2WHEEL_MOUSE_HACK_B8 },
        { }
 };
 MODULE_DEVICE_TABLE(hid, a4_devices);
index 2ab22b9..fca8fc7 100644 (file)
@@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
 #define QUIRK_G752_KEYBOARD            BIT(8)
-#define QUIRK_T101HA_DOCK              BIT(9)
-#define QUIRK_T90CHI                   BIT(10)
-#define QUIRK_MEDION_E1239T            BIT(11)
-#define QUIRK_ROG_NKEY_KEYBOARD                BIT(12)
+#define QUIRK_T90CHI                   BIT(9)
+#define QUIRK_MEDION_E1239T            BIT(10)
+#define QUIRK_ROG_NKEY_KEYBOARD                BIT(11)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev,
        if (drvdata->quirks & QUIRK_MEDION_E1239T)
                return asus_e1239t_event(drvdata, data, size);
 
-       if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+       if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
                /*
                 * Skip these report ID, the device emits a continuous stream associated
                 * with the AURA mode it is in which looks like an 'echo'.
@@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev,
                                return -1;
                        }
                }
+               if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
+                       /*
+                        * G713 and G733 send these codes on some keypresses, depending on
+                        * the key pressed it can trigger a shutdown event if not caught.
+                       */
+                       if(data[0] == 0x02 && data[1] == 0x30) {
+                               return -1;
+                       }
+               }
+
        }
 
        return 0;
@@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
-       /* use hid-multitouch for T101HA touchpad */
-       if (id->driver_data & QUIRK_T101HA_DOCK &&
-           hdev->collection->usage == HID_GD_MOUSE)
-               return -ENODEV;
-
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
-       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
-               USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI },
        { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T),
                QUIRK_MEDION_E1239T },
+       /*
+        * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard
+        * part, while letting hid-multitouch.c handle the touchpad.
+        */
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+               USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, asus_devices);
index 0ae9f6d..0de2788 100644 (file)
@@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
        case BUS_I2C:
                bus = "I2C";
                break;
+       case BUS_VIRTUAL:
+               bus = "VIRTUAL";
+               break;
        default:
                bus = "<UNKNOWN>";
        }
@@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid)
 
        return 0;
 }
-
 EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
 
 static int __init hid_init(void)
index 59f8d71..a311fb8 100644 (file)
@@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = {
        [KEY_APPSELECT] = "AppSelect",
        [KEY_SCREENSAVER] = "ScreenSaver",
        [KEY_VOICECOMMAND] = "VoiceCommand",
+       [KEY_ASSISTANT] = "Assistant",
+       [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
+       [KEY_EMOJI_PICKER] = "EmojiPicker",
        [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
        [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
        [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
index a575160..f43a840 100644 (file)
@@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report {
        u8 address;             /* 7-bit I2C address */
        u8 flag;                /* I2C transaction condition */
        u8 length;              /* data payload length */
-       u8 data[60];            /* data payload */
+       u8 data[FT260_WR_DATA_MAX]; /* data payload */
 } __packed;
 
 struct ft260_i2c_read_request_report {
@@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev,
 
        ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT,
                                 HID_REQ_GET_REPORT);
-       memcpy(data, buf, len);
+       if (likely(ret == len))
+               memcpy(data, buf, len);
+       else if (ret >= 0)
+               ret = -EIO;
        kfree(buf);
        return ret;
 }
@@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS,
                                           (u8 *)&report, sizeof(report));
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                hid_err(hdev, "failed to retrieve status: %d\n", ret);
                return ret;
        }
@@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd,
        struct ft260_i2c_write_request_report *rep =
                (struct ft260_i2c_write_request_report *)dev->write_buf;
 
+       if (data_len >= sizeof(rep->data))
+               return -EINVAL;
+
        rep->address = addr;
        rep->data[0] = cmd;
        rep->length = data_len + 1;
@@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev,
 
        ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS,
                                           (u8 *)cfg, len);
-       if (ret != len) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve system status\n");
-               if (ret >= 0)
-                       return -EIO;
+               return ret;
        }
        return 0;
 }
@@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", *field);
 }
@@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len,
        int ret;
 
        ret = ft260_hid_feature_report_get(hdev, id, cfg, len);
-       if (ret != len && ret >= 0)
-               return -EIO;
+       if (ret < 0)
+               return ret;
 
        return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field));
 }
@@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
 
        ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION,
                                           (u8 *)&version, sizeof(version));
-       if (ret != sizeof(version)) {
+       if (ret < 0) {
                hid_err(hdev, "failed to retrieve chip version\n");
-               if (ret >= 0)
-                       ret = -EIO;
                goto err_hid_close;
        }
 
index 898871c..29ccb0a 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
        { }
 };
+MODULE_DEVICE_TABLE(hid, gt683r_led_id);
 
 static void gt683r_brightness_set(struct led_classdev *led_cdev,
                                enum led_brightness brightness)
index 84b8da3..b84a0a1 100644 (file)
@@ -26,6 +26,7 @@
 #define USB_DEVICE_ID_A4TECH_WCP32PU   0x0006
 #define USB_DEVICE_ID_A4TECH_X5_005D   0x000a
 #define USB_DEVICE_ID_A4TECH_RP_649    0x001a
+#define USB_DEVICE_ID_A4TECH_NB_95     0x022b
 
 #define USB_VENDOR_ID_AASHIMA          0x06d6
 #define USB_DEVICE_ID_AASHIMA_GAMEPAD  0x0025
 
 #define USB_VENDOR_ID_CORSAIR          0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
-
-#define USB_VENDOR_ID_CORSAIR           0x1b1c
 #define USB_DEVICE_ID_CORSAIR_K70R      0x1b09
 #define USB_DEVICE_ID_CORSAIR_K95RGB    0x1b11
 #define USB_DEVICE_ID_CORSAIR_M65RGB    0x1b12
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E    0x600e
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019     0x6019
 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E     0x602e
 #define USB_DEVICE_ID_SAITEK_X52       0x075c
 #define USB_DEVICE_ID_SAITEK_X52_2     0x0255
 #define USB_DEVICE_ID_SAITEK_X52_PRO   0x0762
+#define USB_DEVICE_ID_SAITEK_X65       0x0b6a
 
 #define USB_VENDOR_ID_SAMSUNG          0x0419
 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE        0x0001
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD      0x0023
 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2     0x0027
 
+#define USB_VENDOR_ID_SEMITEK  0x1ea7
+#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907
+
 #define USB_VENDOR_ID_SENNHEISER       0x1395
 #define USB_DEVICE_ID_SENNHEISER_BTD500USB     0x002c
 
 #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A      0x6e21
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4
 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index 18f5e28..abbfa91 100644 (file)
@@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                case 0x0cd: map_key_clear(KEY_PLAYPAUSE);       break;
                case 0x0cf: map_key_clear(KEY_VOICECOMMAND);    break;
+
+               case 0x0d9: map_key_clear(KEY_EMOJI_PICKER);    break;
+
                case 0x0e0: map_abs_clear(ABS_VOLUME);          break;
                case 0x0e2: map_key_clear(KEY_MUTE);            break;
                case 0x0e5: map_key_clear(KEY_BASSBOOST);       break;
index d598094..fee4e54 100644 (file)
@@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
        int status;
 
        long flags = (long) data[2];
+       *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
 
        if (flags & 0x80)
                switch (flags & 0x07) {
index 2bb473d..8bcaee4 100644 (file)
@@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev,
        if (id->vendor == USB_VENDOR_ID_APPLE &&
            id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
            hdev->type != HID_TYPE_USBMOUSE)
-               return 0;
+               return -ENODEV;
 
        msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
        if (msc == NULL) {
@@ -779,7 +779,10 @@ err_stop_hw:
 static void magicmouse_remove(struct hid_device *hdev)
 {
        struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-       cancel_delayed_work_sync(&msc->work);
+
+       if (msc)
+               cancel_delayed_work_sync(&msc->work);
+
        hid_hw_stop(hdev);
 }
 
index 9d9f3e1..2e4fb76 100644 (file)
@@ -70,6 +70,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_WIN8_PTP_BUTTONS      BIT(18)
 #define MT_QUIRK_SEPARATE_APP_REPORT   BIT(19)
 #define MT_QUIRK_FORCE_MULTI_INPUT     BIT(20)
+#define MT_QUIRK_DISABLE_WAKEUP                BIT(21)
 
 #define MT_INPUTMODE_TOUCHSCREEN       0x02
 #define MT_INPUTMODE_TOUCHPAD          0x03
@@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
 #define MT_CLS_EXPORT_ALL_INPUTS               0x0013
 /* reserved                                    0x0014 */
 #define MT_CLS_WIN_8_FORCE_MULTI_INPUT         0x0015
+#define MT_CLS_WIN_8_DISABLE_WAKEUP            0x0016
 
 /* vendor specific classes */
 #define MT_CLS_3M                              0x0101
@@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = {
                        MT_QUIRK_WIN8_PTP_BUTTONS |
                        MT_QUIRK_FORCE_MULTI_INPUT,
                .export_all_inputs = true },
+       { .name = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               .quirks = MT_QUIRK_ALWAYS_VALID |
+                       MT_QUIRK_IGNORE_DUPLICATES |
+                       MT_QUIRK_HOVERING |
+                       MT_QUIRK_CONTACT_CNT_ACCURATE |
+                       MT_QUIRK_STICKY_FINGERS |
+                       MT_QUIRK_WIN8_PTP_BUTTONS |
+                       MT_QUIRK_DISABLE_WAKEUP,
+               .export_all_inputs = true },
 
        /*
         * vendor specific classes
@@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
                if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
                        continue;
 
-               for (n = 0; n < field->report_count; n++) {
-                       if (field->usage[n].hid == HID_DG_CONTACTID)
-                               rdata->is_mt_collection = true;
+               if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+                       for (n = 0; n < field->report_count; n++) {
+                               if (field->usage[n].hid == HID_DG_CONTACTID) {
+                                       rdata->is_mt_collection = true;
+                                       break;
+                               }
+                       }
                }
        }
 
@@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if ((cls->name == MT_CLS_WIN_8 ||
-                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT ||
+                            cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
                /* we do not set suffix = "Touchscreen" */
                hi->input->name = hdev->name;
                break;
-       case HID_DG_STYLUS:
-               /* force BTN_STYLUS to allow tablet matching in udev */
-               __set_bit(BTN_STYLUS, hi->input->keybit);
-               break;
        case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
                suffix = "Custom Media Keys";
                break;
+       case HID_DG_STYLUS:
+               /* force BTN_STYLUS to allow tablet matching in udev */
+               __set_bit(BTN_STYLUS, hi->input->keybit);
+               fallthrough;
        case HID_DG_PEN:
                suffix = "Stylus";
                break;
@@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
 #ifdef CONFIG_PM
 static int mt_suspend(struct hid_device *hdev, pm_message_t state)
 {
+       struct mt_device *td = hid_get_drvdata(hdev);
+
        /* High latency is desirable for power savings during S3/S0ix */
-       mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+       if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP)
+               mt_set_modes(hdev, HID_LATENCY_HIGH, false, false);
+       else
+               mt_set_modes(hdev, HID_LATENCY_HIGH, true, true);
+
        return 0;
 }
 
@@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
                        USB_DEVICE_ID_ANTON_TOUCH_PAD) },
 
+       /* Asus T101HA */
+       { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP,
+               HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
+                          USB_VENDOR_ID_ASUSTEK,
+                          USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
+
        /* Asus T304UA */
        { .driver_data = MT_CLS_ASUS,
                HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
index 3dd6f15..51b39bd 100644 (file)
@@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL },
@@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
@@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
@@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) },
 #endif
 #if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
        { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c
new file mode 100644 (file)
index 0000000..ba6607d
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  HID driver for Semitek keyboards
+ *
+ *  Copyright (c) 2021 Benjamin Moody
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+                                  unsigned int *rsize)
+{
+       /* In the report descriptor for interface 2, fix the incorrect
+          description of report ID 0x04 (the report contains a
+          bitmask, not an array of keycodes.) */
+       if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) {
+               hid_info(hdev, "fixing up Semitek report descriptor\n");
+               rdesc[0x84] = 0x02;
+       }
+       return rdesc;
+}
+
+static const struct hid_device_id semitek_devices[] = {
+       { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) },
+       { }
+};
+MODULE_DEVICE_TABLE(hid, semitek_devices);
+
+static struct hid_driver semitek_driver = {
+       .name = "semitek",
+       .id_table = semitek_devices,
+       .report_fixup = semitek_report_fixup,
+};
+module_hid_driver(semitek_driver);
+
+MODULE_LICENSE("GPL");
index 2e66621..32c2306 100644 (file)
@@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
        struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev);
        int index, field_index, usage;
        char name[HID_CUSTOM_NAME_LENGTH];
-       int value;
+       int value, ret;
 
        if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage,
                   name) == 3) {
@@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr,
 
                report_id = sensor_inst->fields[field_index].attribute.
                                                                report_id;
-               sensor_hub_set_feature(sensor_inst->hsdev, report_id,
-                                      index, sizeof(value), &value);
+               ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id,
+                                            index, sizeof(value), &value);
+               if (ret)
+                       return ret;
        } else
                return -EINVAL;
 
index 95cf88f..6abd3e2 100644 (file)
@@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        buffer_size = buffer_size / sizeof(__s32);
        if (buffer_size) {
                for (i = 0; i < buffer_size; ++i) {
-                       hid_set_field(report->field[field_index], i,
-                                     (__force __s32)cpu_to_le32(*buf32));
+                       ret = hid_set_field(report->field[field_index], i,
+                                           (__force __s32)cpu_to_le32(*buf32));
+                       if (ret)
+                               goto done_proc;
+
                        ++buf32;
                }
        }
        if (remaining_bytes) {
                value = 0;
                memcpy(&value, (u8 *)buf32, remaining_bytes);
-               hid_set_field(report->field[field_index], i,
-                             (__force __s32)cpu_to_le32(value));
+               ret = hid_set_field(report->field[field_index], i,
+                                   (__force __s32)cpu_to_le32(value));
+               if (ret)
+                       goto done_proc;
        }
        hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
        hid_hw_wait(hsdev->hdev);
index 2e452c6..f643b1c 100644 (file)
@@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
        }
 
        tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
-       if (!tm_wheel->model_request) {
+       if (!tm_wheel->change_request) {
                ret = -ENOMEM;
                goto error5;
        }
index 9993133..4647461 100644 (file)
@@ -45,6 +45,7 @@
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 #define I2C_HID_QUIRK_RESET_ON_RESUME          BIT(5)
 #define I2C_HID_QUIRK_BAD_INPUT_SIZE           BIT(6)
+#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET    BIT(7)
 
 
 /* flags */
@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
+       /*
+        * Sending the wakeup after reset actually break ELAN touchscreen controller
+        */
+       { USB_VENDOR_ID_ELAN, HID_ANY_ID,
+                I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET },
        { 0, 0 }
 };
 
@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        }
 
        /* At least some SIS devices need this after reset */
-       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET))
+               ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
 
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
        hid->product = le16_to_cpu(ihid->hdesc.wProductID);
 
-       snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX",
-                client->name, hid->vendor, hid->product);
+       snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
+                client->name, (u16)hid->vendor, (u16)hid->product);
        strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
 
        ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
index 21b87e4..07e3cbc 100644 (file)
@@ -28,6 +28,8 @@
 #define EHL_Ax_DEVICE_ID       0x4BB3
 #define TGL_LP_DEVICE_ID       0xA0FC
 #define TGL_H_DEVICE_ID                0x43FC
+#define ADL_S_DEVICE_ID                0x7AF8
+#define ADL_P_DEVICE_ID                0x51FC
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 06081cf..a6d5173 100644 (file)
@@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index 7b27ec3..5571e74 100644 (file)
@@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid)
 
        shid->hid->dev.parent = shid->dev;
        shid->hid->bus = BUS_HOST;
-       shid->hid->vendor = cpu_to_le16(shid->attrs.vendor);
-       shid->hid->product = cpu_to_le16(shid->attrs.product);
-       shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version);
+       shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor);
+       shid->hid->product = get_unaligned_le16(&shid->attrs.product);
+       shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version);
        shid->hid->country = shid->hid_desc.country_code;
 
        snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X",
index 86257ce..4e90773 100644 (file)
@@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
        raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
        dir = usbhid->ctrl[usbhid->ctrltail].dir;
 
-       len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       len = hid_report_len(report);
        if (dir == USB_DIR_OUT) {
                usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
                usbhid->urbctrl->transfer_buffer_length = len;
index ea126c5..3b4ee21 100644 (file)
@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid)
 
        if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
            pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
+               error = -EPERM;
                hid_notice(hid,
                           "device does not support device managed pool\n");
                goto fail;
index 2970892..f2221ca 100644 (file)
@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
-       if (disallow_fan_support && index >= 8)
+       if (disallow_fan_support && index >= 20)
                return 0;
        if (disallow_fan_type_call &&
-           (index == 9 || index == 12 || index == 15))
+           (index == 21 || index == 25 || index == 28))
                return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
index e248424..aec294c 100644 (file)
@@ -37,6 +37,8 @@ struct fsp3y_data {
        struct pmbus_driver_info info;
        int chip;
        int page;
+
+       bool vout_linear_11;
 };
 
 #define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info)
@@ -108,11 +110,9 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
        int rv;
 
        /*
-        * YH5151-E outputs vout in linear11. The conversion is done when
-        * reading. Here, we have to inject pmbus_core with the correct
-        * exponent (it is -6).
+        * Inject an exponent for non-compliant YH5151-E.
         */
-       if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE)
+       if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE)
                return 0x1A;
 
        rv = set_page(client, page);
@@ -161,10 +161,9 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
                return rv;
 
        /*
-        * YH-5151E is non-compliant and outputs output voltages in linear11
-        * instead of linear16.
+        * Handle YH-5151E non-compliant linear11 vout voltage.
         */
-       if (data->chip == yh5151e && reg == PMBUS_READ_VOUT)
+       if (data->vout_linear_11 && reg == PMBUS_READ_VOUT)
                rv = sign_extend32(rv, 10) & 0xffff;
 
        return rv;
@@ -256,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client)
 
        data->info = fsp3y_info[data->chip];
 
+       /*
+        * YH-5151E sometimes reports vout in linear11 and sometimes in
+        * linear16. This depends on the exact individual piece of hardware. One
+        * YH-5151E can use linear16 and another might use linear11 instead.
+        *
+        * The format can be recognized by reading VOUT_MODE - if it doesn't
+        * report a valid exponent, then vout uses linear11. Otherwise, the
+        * device is compliant and uses linear16.
+        */
+       data->vout_linear_11 = false;
+       if (data->chip == yh5151e) {
+               rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+               if (rv < 0)
+                       return rv;
+
+               if (rv == 0xFF)
+                       data->vout_linear_11 = true;
+       }
+
        return pmbus_do_probe(client, &data->info);
 }
 
index 40597a9..1a8caff 100644 (file)
@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client)
                info->read_word_data = raa_dmpvr2_read_word_data;
                break;
        case raa_dmpvr2_2rail_nontc:
-               info->func[0] &= ~PMBUS_HAVE_TEMP;
-               info->func[1] &= ~PMBUS_HAVE_TEMP;
+               info->func[0] &= ~PMBUS_HAVE_TEMP3;
+               info->func[1] &= ~PMBUS_HAVE_TEMP3;
                fallthrough;
        case raa_dmpvr2_2rail:
                info->pages = 2;
index b6e8b20..fa298b4 100644 (file)
@@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client)
                dev_err(&client->dev, "Failed to read Manufacturer ID\n");
                return ret;
        }
-       if (ret != 5 || strncmp(buf, "DELTA", 5)) {
+       if (ret != 6 || strncmp(buf, "DELTA", 5)) {
                buf[ret] = '\0';
                dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf);
                return -ENODEV;
index 314f8d8..9058f09 100644 (file)
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
                          bool persistent, u8 *smt_idx);
 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
 void cxgb4_quiesce_rx(struct sge_rspq *q);
 int cxgb4_port_mirror_alloc(struct net_device *dev);
index 421bd9b..1f601de 100644 (file)
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
 /*
  * net_device operations
  */
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
        return err;
 }
 
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
index 1b88bd1..dd9be22 100644 (file)
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
        if (!ch_flower)
                return -ENOENT;
 
+       rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+                              adap->flower_ht_params);
+
        ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
                                      &ch_flower->fs, ch_flower->filter_id);
        if (ret)
-               goto err;
+               netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+                          ch_flower->filter_id, ret);
 
-       ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
-                                    adap->flower_ht_params);
-       if (ret) {
-               netdev_err(dev, "Flow remove from rhashtable failed");
-               goto err;
-       }
        kfree_rcu(ch_flower, rcu);
-
-err:
        return ret;
 }
 
index 6c259de..338b04f 100644 (file)
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
         * down before configuring tc params.
         */
        if (netif_running(dev)) {
-               cxgb_close(dev);
+               netif_tx_stop_all_queues(dev);
+               netif_carrier_off(dev);
                needs_bring_up = true;
        }
 
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
        }
 
 out:
-       if (needs_bring_up)
-               cxgb_open(dev);
+       if (needs_bring_up) {
+               netif_tx_start_all_queues(dev);
+               netif_carrier_on(dev);
+       }
 
        mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
        return ret;
index 1e5f2ed..6a099cb 100644 (file)
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
        if (!eosw_txq)
                return -ENOMEM;
 
+       if (!(adap->flags & CXGB4_FW_OK)) {
+               /* Don't stall caller when access to FW is lost */
+               complete(&eosw_txq->completion);
+               return -EIO;
+       }
+
        skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
index de70c16..b883ab8 100644 (file)
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = I40E_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 46d8844..68f177a 100644 (file)
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return I40E_XDP_REDIR;
        }
 
        switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
                result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+               if (result == I40E_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index e35db3f..2924c67 100644 (file)
@@ -335,6 +335,7 @@ struct ice_vsi {
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_ring **xdp_rings;     /* XDP ring array */
+       unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
 
@@ -547,15 +548,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
  */
 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
 {
+       struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
        if (ice_ring_is_xdp(ring))
-               qid -= ring->vsi->num_xdp_txq;
+               qid -= vsi->num_xdp_txq;
 
-       if (!ice_is_xdp_ena_vsi(ring->vsi))
+       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
                return NULL;
 
-       return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+       return xsk_get_pool_from_qid(vsi->netdev, qid);
 }
 
 /**
index d9ddd0b..99301ad 100644 (file)
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
                ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
                                                100000baseKR4_Full);
        }
-
-       /* Autoneg PHY types */
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
-               ethtool_link_ksettings_add_link_mode(ks, supported,
-                                                    Autoneg);
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    Autoneg);
-       }
 }
 
 #define TEST_SET_BITS_TIMEOUT  50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
                ks->base.port = PORT_TP;
                break;
        case ICE_MEDIA_BACKPLANE:
-               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
-               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
                ethtool_link_ksettings_add_link_mode(ks, advertising,
                                                     Backplane);
                ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
        if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
                ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
 
+       /* Set supported and advertised autoneg */
+       if (ice_is_phy_caps_an_enabled(caps)) {
+               ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+               ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+       }
+
 done:
        kfree(caps);
        return err;
index de38a0f..9b8300d 100644 (file)
@@ -31,6 +31,7 @@
 #define PF_FW_ATQLEN_ATQOVFL_M                 BIT(29)
 #define PF_FW_ATQLEN_ATQCRIT_M                 BIT(30)
 #define VF_MBX_ARQLEN(_VF)                     (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF)                     (0x0022A800 + ((_VF) * 4))
 #define PF_FW_ATQLEN_ATQENABLE_M               BIT(31)
 #define PF_FW_ATQT                             0x00080400
 #define PF_MBX_ARQBAH                          0x0022E400
index 82e2ce2..d70ee57 100644 (file)
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->q_vectors)
                goto err_vectors;
 
+       vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+       if (!vsi->af_xdp_zc_qps)
+               goto err_zc_qps;
+
        return 0;
 
+err_zc_qps:
+       devm_kfree(dev, vsi->q_vectors);
 err_vectors:
        devm_kfree(dev, vsi->rxq_map);
 err_rxq_map:
@@ -194,6 +200,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
                break;
        case ICE_VSI_VF:
                vf = &pf->vf[vsi->vf_id];
+               if (vf->num_req_qs)
+                       vf->num_vf_qs = vf->num_req_qs;
                vsi->alloc_txq = vf->num_vf_qs;
                vsi->alloc_rxq = vf->num_vf_qs;
                /* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +296,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(pf);
 
+       if (vsi->af_xdp_zc_qps) {
+               bitmap_free(vsi->af_xdp_zc_qps);
+               vsi->af_xdp_zc_qps = NULL;
+       }
        /* free the ring and vector containers */
        if (vsi->q_vectors) {
                devm_kfree(dev, vsi->q_vectors);
index e2b4b29..04748aa 100644 (file)
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
            struct bpf_prog *xdp_prog)
 {
        struct ice_ring *xdp_ring;
-       int err;
+       int err, result;
        u32 act;
 
        act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
                return ICE_XDP_PASS;
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
-               return ice_xmit_xdp_buff(xdp, xdp_ring);
+               result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
+               return result;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               return ICE_XDP_REDIR;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -2143,6 +2149,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
        struct ice_tx_offload_params offload = { 0 };
        struct ice_vsi *vsi = tx_ring->vsi;
        struct ice_tx_buf *first;
+       struct ethhdr *eth;
        unsigned int count;
        int tso, csum;
 
@@ -2189,7 +2196,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
                goto out_drop;
 
        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
-       if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+       eth = (struct ethhdr *)skb_mac_header(skb);
+       if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+                     eth->h_proto == htons(ETH_P_LLDP)) &&
                     vsi->type == ICE_VSI_PF &&
                     vsi->port_info->qos_cfg.is_sw_lldp))
                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
index a1d22d2..97a46c6 100644 (file)
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
         */
        clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
 
-       /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
-        * in the case of VFR. If this is done for PFR, it can mess up VF
-        * resets because the VF driver may already have started cleanup
-        * by the time we get here.
+       /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+        * needs to clear them in the case of VFR/VFLR. If this is done for
+        * PFR, it can mess up VF resets because the VF driver may already
+        * have started cleanup by the time we get here.
         */
-       if (!is_pfr)
+       if (!is_pfr) {
                wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+               wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+       }
 
        /* In the case of a VFLR, the HW has already reset the VF and we
         * just need to clean up, so don't hit the VFRTRIG register.
@@ -1698,7 +1700,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
                ice_vf_ctrl_vsi_release(vf);
 
        ice_vf_pre_vsi_rebuild(vf);
-       ice_vf_rebuild_vsi_with_release(vf);
+
+       if (ice_vf_rebuild_vsi_with_release(vf)) {
+               dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+               return false;
+       }
+
        ice_vf_post_vsi_rebuild(vf);
 
        /* if the VF has been reset allow it to come up again */
index faa7b8d..a1f89ea 100644 (file)
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
        if (!pool)
                return -EINVAL;
 
+       clear_bit(qid, vsi->af_xdp_zc_qps);
        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 
        return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        if (err)
                return err;
 
+       set_bit(qid, vsi->af_xdp_zc_qps);
+
        return 0;
 }
 
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return ICE_XDP_REDIR;
        }
 
        switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
        case XDP_TX:
                xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
                result = ice_xmit_xdp_buff(xdp, xdp_ring);
+               if (result == ICE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
index 7bda8c5..2d3daf0 100644 (file)
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_tx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb);
+                       ktime_t *timestamp);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
index 038a9fd..b2a042f 100644 (file)
@@ -8280,7 +8280,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
 static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
                                         struct igb_rx_buffer *rx_buffer,
                                         struct xdp_buff *xdp,
-                                        union e1000_adv_rx_desc *rx_desc)
+                                        ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8300,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
        if (unlikely(!skb))
                return NULL;
 
-       if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
-                       xdp->data += IGB_TS_HDR_LEN;
-                       size -= IGB_TS_HDR_LEN;
-               }
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* Determine available headroom for copy */
        headlen = size;
@@ -8336,7 +8332,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
 static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
                                     struct igb_rx_buffer *rx_buffer,
                                     struct xdp_buff *xdp,
-                                    union e1000_adv_rx_desc *rx_desc)
+                                    ktime_t timestamp)
 {
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8359,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
        if (metasize)
                skb_metadata_set(skb, metasize);
 
-       /* pull timestamp out of packet data */
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
-                       __skb_pull(skb, IGB_TS_HDR_LEN);
-       }
+       if (timestamp)
+               skb_hwtstamps(skb)->hwtstamp = timestamp;
 
        /* update buffer offset */
 #if (PAGE_SIZE < 8192)
@@ -8401,18 +8394,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
                break;
        case XDP_TX:
                result = igb_xdp_xmit_back(adapter, xdp);
+               if (result == IGB_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IGB_XDP_REDIR;
-               else
-                       result = IGB_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IGB_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
@@ -8682,7 +8677,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        while (likely(total_packets < budget)) {
                union e1000_adv_rx_desc *rx_desc;
                struct igb_rx_buffer *rx_buffer;
+               ktime_t timestamp = 0;
+               int pkt_offset = 0;
                unsigned int size;
+               void *pktbuf;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8700,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                dma_rmb();
 
                rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+               pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+               /* pull rx packet timestamp if available and valid */
+               if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+                       int ts_hdr_len;
+
+                       ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+                                                        pktbuf, &timestamp);
+
+                       pkt_offset += ts_hdr_len;
+                       size -= ts_hdr_len;
+               }
 
                /* retrieve a buffer from the ring */
                if (!skb) {
-                       unsigned int offset = igb_rx_offset(rx_ring);
-                       unsigned char *hard_start;
+                       unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+                       unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
 
-                       hard_start = page_address(rx_buffer->page) +
-                                    rx_buffer->page_offset - offset;
                        xdp_prepare_buff(&xdp, hard_start, offset, size, true);
 #if (PAGE_SIZE > 4096)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8740,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                } else if (skb)
                        igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
                else if (ring_uses_build_skb(rx_ring))
-                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+                       skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+                                           timestamp);
                else
                        skb = igb_construct_skb(rx_ring, rx_buffer,
-                                               &xdp, rx_desc);
+                                               &xdp, timestamp);
 
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
index ba61fe9..d68cd44 100644 (file)
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        dev_kfree_skb_any(skb);
 }
 
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
 /**
  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
  * @q_vector: Pointer to interrupt specific structure
  * @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
  *
  * This function is meant to retrieve a timestamp from the first buffer of an
  * incoming frame.  The value is stored in little endian format starting on
  * byte 8
  *
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
  **/
 int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                       struct sk_buff *skb)
+                       ktime_t *timestamp)
 {
        struct igb_adapter *adapter = q_vector->adapter;
+       struct skb_shared_hwtstamps ts;
        __le64 *regval = (__le64 *)va;
        int adjust = 0;
 
        if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
-               return IGB_RET_PTP_DISABLED;
+               return 0;
 
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
 
        /* check reserved dwords are zero, be/le doesn't matter for zero */
        if (regval[0])
-               return IGB_RET_PTP_INVALID;
+               return 0;
 
-       igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
-                                  le64_to_cpu(regval[1]));
+       igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
 
        /* adjust timestamp for the RX latency based on link speed */
        if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
                        break;
                }
        }
-       skb_hwtstamps(skb)->hwtstamp =
-               ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 
-       return 0;
+       *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+       return IGB_TS_HDR_LEN;
 }
 
 /**
index 069471b..f1adf15 100644 (file)
@@ -2047,20 +2047,19 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
                break;
        case XDP_TX:
                if (igc_xdp_xmit_back(adapter, xdp) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_TX;
+                       goto out_failure;
+               res = IGC_XDP_TX;
                break;
        case XDP_REDIRECT:
                if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
-                       res = IGC_XDP_CONSUMED;
-               else
-                       res = IGC_XDP_REDIRECT;
+                       goto out_failure;
+               res = IGC_XDP_REDIRECT;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(adapter->netdev, prog, act);
                fallthrough;
        case XDP_DROP:
index c5ec17d..2ac5b82 100644 (file)
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
-               if (!err)
-                       result = IXGBE_XDP_REDIR;
-               else
-                       result = IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
+               result = IXGBE_XDP_REDIR;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 91ad5b9..f72d297 100644 (file)
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 
        if (likely(act == XDP_REDIRECT)) {
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+               if (err)
+                       goto out_failure;
                rcu_read_unlock();
-               return result;
+               return IXGBE_XDP_REDIR;
        }
 
        switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
                break;
        case XDP_TX:
                xdpf = xdp_convert_buff_to_frame(xdp);
-               if (unlikely(!xdpf)) {
-                       result = IXGBE_XDP_CONSUMED;
-                       break;
-               }
+               if (unlikely(!xdpf))
+                       goto out_failure;
                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+               if (result == IXGBE_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index ba2ed8a..0e733cc 100644 (file)
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
        case XDP_TX:
                xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
                result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+               if (result == IXGBEVF_XDP_CONSUMED)
+                       goto out_failure;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
        case XDP_ABORTED:
+out_failure:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
                fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
index 8360289..d6513ae 100644 (file)
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       unsigned long fec_bitmap;
        u16 fec_policy = 0;
        int mode;
        int err;
 
-       if (bitmap_weight((unsigned long *)&fecparam->fec,
-                         ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+       bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+       if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
                return -EOPNOTSUPP;
 
        for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
        if (curr_val == new_val)
                return 0;
 
+       if (new_val && !priv->profile->rx_ptp_support &&
+           priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+               netdev_err(priv->netdev,
+                          "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+               return -EINVAL;
+       }
+
        new_params = priv->channels.params;
        MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
        if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
index ad0f694..ec6bafe 100644 (file)
@@ -3858,6 +3858,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
                        netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
        }
 
+       if (mlx5e_is_uplink_rep(priv)) {
+               features &= ~NETIF_F_HW_TLS_RX;
+               if (netdev->features & NETIF_F_HW_TLS_RX)
+                       netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+               features &= ~NETIF_F_HW_TLS_TX;
+               if (netdev->features & NETIF_F_HW_TLS_TX)
+                       netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+       }
+
        mutex_unlock(&priv->state_lock);
 
        return features;
@@ -3974,11 +3984,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
        return mlx5e_ptp_rx_manage_fs(priv, set);
 }
 
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+       bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+       int err;
+
+       if (!rx_filter)
+               /* Reset CQE compression to Admin default */
+               return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+       if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+               return 0;
+
+       /* Disable CQE compression */
+       netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+       if (err)
+               netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+       return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
 {
        struct mlx5e_params new_params;
+
+       if (ptp_rx == priv->channels.params.ptp_rx)
+               return 0;
+
+       new_params = priv->channels.params;
+       new_params.ptp_rx = ptp_rx;
+       return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+                                       &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
        struct hwtstamp_config config;
        bool rx_cqe_compress_def;
+       bool ptp_rx;
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3998,13 +4042,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        }
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->channels.params;
        rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
 
        /* RX HW timestamp */
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               new_params.ptp_rx = false;
+               ptp_rx = false;
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -4021,24 +4064,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
        case HWTSTAMP_FILTER_NTP_ALL:
-               new_params.ptp_rx = rx_cqe_compress_def;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
+               /* ptp_rx is set if both HW TS is set and CQE
+                * compression is set
+                */
+               ptp_rx = rx_cqe_compress_def;
                break;
        default:
-               mutex_unlock(&priv->state_lock);
-               return -ERANGE;
+               err = -ERANGE;
+               goto err_unlock;
        }
 
-       if (new_params.ptp_rx == priv->channels.params.ptp_rx)
-               goto out;
+       if (!priv->profile->rx_ptp_support)
+               err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+                                                    config.rx_filter != HWTSTAMP_FILTER_NONE);
+       else
+               err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+       if (err)
+               goto err_unlock;
 
-       err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
-                                      &new_params.ptp_rx, true);
-       if (err) {
-               mutex_unlock(&priv->state_lock);
-               return err;
-       }
-out:
        memcpy(&priv->tstamp, &config, sizeof(config));
        mutex_unlock(&priv->state_lock);
 
@@ -4047,6 +4091,9 @@ out:
 
        return copy_to_user(ifr->ifr_data, &config,
                            sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+       mutex_unlock(&priv->state_lock);
+       return err;
 }
 
 int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
index 2c776e7..dd64878 100644 (file)
@@ -2015,11 +2015,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                    misc_parameters_3);
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector *dissector = rule->match.dissector;
+       enum fs_flow_table_type fs_type;
        u16 addr_type = 0;
        u8 ip_proto = 0;
        u8 *match_level;
        int err;
 
+       fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
        match_level = outer_match_level;
 
        if (dissector->used_keys &
@@ -2145,6 +2147,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                if (match.mask->vlan_id ||
                    match.mask->vlan_priority ||
                    match.mask->vlan_tpid) {
+                       if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+                                                    fs_type)) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Matching on CVLAN is not supported");
+                               return -EOPNOTSUPP;
+                       }
+
                        if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
                                MLX5_SET(fte_match_set_misc, misc_c,
                                         outer_second_svlan_tag, 1);
index db1e742..d18a28a 100644 (file)
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
                         struct mlx5_fs_chains *chains,
                         int i)
 {
-       flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+       if (mlx5_chains_ignore_flow_level_supported(chains))
+               flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
        dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
        dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
 }
index d5d5763..106b50e 100644 (file)
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
                                                      reset_abort_work);
        struct mlx5_core_dev *dev = fw_reset->dev;
 
+       if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+               return;
+
        mlx5_sync_reset_clear_reset_requested(dev, true);
        mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
 }
index 00ef10a..20a4047 100644 (file)
@@ -107,7 +107,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
        return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
 }
 
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
 {
        return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
 }
index e96f345..d50bdb2 100644 (file)
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
 
 bool
 mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
 bool
 mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
 u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
 
 #else /* CONFIG_MLX5_CLS_ACT */
 
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
 static inline struct mlx5_flow_table *
 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
                      u32 level) { return ERR_PTR(-EOPNOTSUPP); }
index 1fbcd01..7ccfd40 100644 (file)
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
        int ret;
 
        ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-       ft_attr.level = dmn->info.caps.max_ft_level - 2;
+       ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+                             MLX5_FT_MAX_MULTIPATH_LEVEL);
        ft_attr.reformat_en = reformat_req;
        ft_attr.decap_en = reformat_req;
 
index c84c8bf..fc99ad8 100644 (file)
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_err(&pdev->dev,
                        "invalid sram_size %dB or board span %ldB\n",
                        mgp->sram_size, mgp->board_span);
+               status = -EINVAL;
                goto abort_with_ioremap;
        }
        memcpy_fromio(mgp->eeprom_strings,
index 5d956a5..c87202c 100644 (file)
@@ -1240,8 +1240,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
        priv->phylink_config.dev = &priv->dev->dev;
        priv->phylink_config.type = PHYLINK_NETDEV;
        priv->phylink_config.pcs_poll = true;
-       priv->phylink_config.ovr_an_inband =
-               priv->plat->mdio_bus_data->xpcs_an_inband;
+       if (priv->plat->mdio_bus_data)
+               priv->phylink_config.ovr_an_inband =
+                       priv->plat->mdio_bus_data->xpcs_an_inband;
 
        if (!fwnode)
                fwnode = dev_fwnode(priv->device);
@@ -7048,7 +7049,6 @@ error_mdio_register:
        stmmac_napi_del(ndev);
 error_hw_init:
        destroy_workqueue(priv->wq);
-       stmmac_bus_clks_config(priv, false);
        bitmap_free(priv->af_xdp_zc_qps);
 
        return ret;
index b9be530..ff83e00 100644 (file)
@@ -8,8 +8,8 @@
 
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
 #include <linux/regmap.h>
 #include <linux/ieee802154.h>
 #include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
 
 static struct spi_driver mrf24j40_driver = {
        .driver = {
-               .of_match_table = of_match_ptr(mrf24j40_of_match),
+               .of_match_table = mrf24j40_of_match,
                .name = "mrf24j40",
        },
        .id_table = mrf24j40_ids,
index 9b6a4a8..78a01c7 100644 (file)
@@ -401,18 +401,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        /* If headroom is not 0, there is an offset between the beginning of the
         * data and the allocated space, otherwise the data and the allocated
         * space are aligned.
+        *
+        * Buffers with headroom use PAGE_SIZE as alloc size, see
+        * add_recvbuf_mergeable() + get_mergeable_buf_len()
         */
-       if (headroom) {
-               /* Buffers with headroom use PAGE_SIZE as alloc size,
-                * see add_recvbuf_mergeable() + get_mergeable_buf_len()
-                */
-               truesize = PAGE_SIZE;
-               tailroom = truesize - len - offset;
-               buf = page_address(page);
-       } else {
-               tailroom = truesize - len;
-               buf = p;
-       }
+       truesize = headroom ? PAGE_SIZE : truesize;
+       tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
+       buf = p - headroom;
 
        len -= hdr_len;
        offset += hdr_padded_len;
@@ -958,7 +953,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page, offset,
                                                       len, PAGE_SIZE, false,
-                                                      metasize, headroom);
+                                                      metasize,
+                                                      VIRTIO_XDP_HEADROOM);
                                return head_skb;
                        }
                        break;
index fc52b2c..dbe1f85 100644 (file)
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
 ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
 wireguard-y := main.o
 wireguard-y += noise.o
index 3725e9c..b7197e8 100644 (file)
@@ -6,6 +6,8 @@
 #include "allowedips.h"
 #include "peer.h"
 
+static struct kmem_cache *node_cache;
+
 static void swap_endian(u8 *dst, const u8 *src, u8 bits)
 {
        if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
        node->bitlen = bits;
        memcpy(node->bits, src, bits / 8U);
 }
-#define CHOOSE_NODE(parent, key) \
-       parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+       return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
 
 static void push_rcu(struct allowedips_node **stack,
                     struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
        }
 }
 
+static void node_free_rcu(struct rcu_head *rcu)
+{
+       kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
 static void root_free_rcu(struct rcu_head *rcu)
 {
        struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
        while (len > 0 && (node = stack[--len])) {
                push_rcu(stack, node->bit[0], &len);
                push_rcu(stack, node->bit[1], &len);
-               kfree(node);
+               kmem_cache_free(node_cache, node);
        }
 }
 
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
        }
 }
 
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
-                               struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({                                                             \
-               WARN_ON(IS_ENABLED(DEBUG) && len >= 128);                      \
-               stack[len++] = p;                                              \
-       })
-
-       struct allowedips_node __rcu **stack[128], **nptr;
-       struct allowedips_node *node, *prev;
-       unsigned int len;
-
-       if (unlikely(!peer || !REF(*top)))
-               return;
-
-       for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
-               nptr = stack[len - 1];
-               node = DEREF(nptr);
-               if (!node) {
-                       --len;
-                       continue;
-               }
-               if (!prev || REF(prev->bit[0]) == node ||
-                   REF(prev->bit[1]) == node) {
-                       if (REF(node->bit[0]))
-                               PUSH(&node->bit[0]);
-                       else if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else if (REF(node->bit[0]) == prev) {
-                       if (REF(node->bit[1]))
-                               PUSH(&node->bit[1]);
-               } else {
-                       if (rcu_dereference_protected(node->peer,
-                               lockdep_is_held(lock)) == peer) {
-                               RCU_INIT_POINTER(node->peer, NULL);
-                               list_del_init(&node->peer_list);
-                               if (!node->bit[0] || !node->bit[1]) {
-                                       rcu_assign_pointer(*nptr, DEREF(
-                                              &node->bit[!REF(node->bit[0])]));
-                                       kfree_rcu(node, rcu);
-                                       node = DEREF(nptr);
-                               }
-                       }
-                       --len;
-               }
-       }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
 static unsigned int fls128(u64 a, u64 b)
 {
        return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
                        found = node;
                if (node->cidr == bits)
                        break;
-               node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+               node = rcu_dereference_bh(node->bit[choose(node, key)]);
        }
        return found;
 }
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                           u8 cidr, u8 bits, struct allowedips_node **rnode,
                           struct mutex *lock)
 {
-       struct allowedips_node *node = rcu_dereference_protected(trie,
-                                               lockdep_is_held(lock));
+       struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
        struct allowedips_node *parent = NULL;
        bool exact = false;
 
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
                        exact = true;
                        break;
                }
-               node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
-                                                lockdep_is_held(lock));
+               node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
        }
        *rnode = parent;
        return exact;
 }
 
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+       node->parent_bit_packed = (unsigned long)parent | bit;
+       rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+       u8 bit = choose(parent, node->bits);
+       connect_node(&parent->bit[bit], bit, node);
+}
+
 static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
               u8 cidr, struct wg_peer *peer, struct mutex *lock)
 {
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return -EINVAL;
 
        if (!rcu_access_pointer(*trie)) {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
                if (unlikely(!node))
                        return -ENOMEM;
                RCU_INIT_POINTER(node->peer, peer);
                list_add_tail(&node->peer_list, &peer->allowedips_list);
                copy_and_assign_cidr(node, key, cidr, bits);
-               rcu_assign_pointer(*trie, node);
+               connect_node(trie, 2, node);
                return 0;
        }
        if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
                return 0;
        }
 
-       newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+       newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
        if (unlikely(!newnode))
                return -ENOMEM;
        RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        if (!node) {
                down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
        } else {
-               down = rcu_dereference_protected(CHOOSE_NODE(node, key),
-                                                lockdep_is_held(lock));
+               const u8 bit = choose(node, key);
+               down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
                if (!down) {
-                       rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+                       connect_node(&node->bit[bit], bit, newnode);
                        return 0;
                }
        }
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
        parent = node;
 
        if (newnode->cidr == cidr) {
-               rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+               choose_and_connect_node(newnode, down);
                if (!parent)
-                       rcu_assign_pointer(*trie, newnode);
+                       connect_node(trie, 2, newnode);
                else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
-                                          newnode);
-       } else {
-               node = kzalloc(sizeof(*node), GFP_KERNEL);
-               if (unlikely(!node)) {
-                       list_del(&newnode->peer_list);
-                       kfree(newnode);
-                       return -ENOMEM;
-               }
-               INIT_LIST_HEAD(&node->peer_list);
-               copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+                       choose_and_connect_node(parent, newnode);
+               return 0;
+       }
 
-               rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
-               rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
-               if (!parent)
-                       rcu_assign_pointer(*trie, node);
-               else
-                       rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
-                                          node);
+       node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+       if (unlikely(!node)) {
+               list_del(&newnode->peer_list);
+               kmem_cache_free(node_cache, newnode);
+               return -ENOMEM;
        }
+       INIT_LIST_HEAD(&node->peer_list);
+       copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+       choose_and_connect_node(node, down);
+       choose_and_connect_node(node, newnode);
+       if (!parent)
+               connect_node(trie, 2, node);
+       else
+               choose_and_connect_node(parent, node);
        return 0;
 }
 
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
 void wg_allowedips_remove_by_peer(struct allowedips *table,
                                  struct wg_peer *peer, struct mutex *lock)
 {
+       struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+       bool free_parent;
+
+       if (list_empty(&peer->allowedips_list))
+               return;
        ++table->seq;
-       walk_remove_by_peer(&table->root4, peer, lock);
-       walk_remove_by_peer(&table->root6, peer, lock);
+       list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+               list_del_init(&node->peer_list);
+               RCU_INIT_POINTER(node->peer, NULL);
+               if (node->bit[0] && node->bit[1])
+                       continue;
+               child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+                                                 lockdep_is_held(lock));
+               if (child)
+                       child->parent_bit_packed = node->parent_bit_packed;
+               parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+               *parent_bit = child;
+               parent = (void *)parent_bit -
+                        offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+               free_parent = !rcu_access_pointer(node->bit[0]) &&
+                             !rcu_access_pointer(node->bit[1]) &&
+                             (node->parent_bit_packed & 3) <= 1 &&
+                             !rcu_access_pointer(parent->peer);
+               if (free_parent)
+                       child = rcu_dereference_protected(
+                                       parent->bit[!(node->parent_bit_packed & 1)],
+                                       lockdep_is_held(lock));
+               call_rcu(&node->rcu, node_free_rcu);
+               if (!free_parent)
+                       continue;
+               if (child)
+                       child->parent_bit_packed = parent->parent_bit_packed;
+               *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+               call_rcu(&parent->rcu, node_free_rcu);
+       }
 }
 
 int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
        return NULL;
 }
 
+int __init wg_allowedips_slab_init(void)
+{
+       node_cache = KMEM_CACHE(allowedips_node, 0);
+       return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+       rcu_barrier();
+       kmem_cache_destroy(node_cache);
+}
+
 #include "selftest/allowedips.c"
index e5c83ca..2346c79 100644 (file)
@@ -15,14 +15,11 @@ struct wg_peer;
 struct allowedips_node {
        struct wg_peer __rcu *peer;
        struct allowedips_node __rcu *bit[2];
-       /* While it may seem scandalous that we waste space for v4,
-        * we're alloc'ing to the nearest power of 2 anyway, so this
-        * doesn't actually make a difference.
-        */
-       u8 bits[16] __aligned(__alignof(u64));
        u8 cidr, bit_at_a, bit_at_b, bitlen;
+       u8 bits[16] __aligned(__alignof(u64));
 
-       /* Keep rarely used list at bottom to be beyond cache line. */
+       /* Keep rarely used members at bottom to be beyond cache line. */
+       unsigned long parent_bit_packed;
        union {
                struct list_head peer_list;
                struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
        struct allowedips_node __rcu *root4;
        struct allowedips_node __rcu *root6;
        u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
 
 void wg_allowedips_init(struct allowedips *table);
 void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
 bool wg_allowedips_selftest(void);
 #endif
 
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
 #endif /* _WG_ALLOWEDIPS_H */
index 7a7d5f1..75dbe77 100644 (file)
@@ -21,13 +21,22 @@ static int __init mod_init(void)
 {
        int ret;
 
+       ret = wg_allowedips_slab_init();
+       if (ret < 0)
+               goto err_allowedips;
+
 #ifdef DEBUG
+       ret = -ENOTRECOVERABLE;
        if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
            !wg_ratelimiter_selftest())
-               return -ENOTRECOVERABLE;
+               goto err_peer;
 #endif
        wg_noise_init();
 
+       ret = wg_peer_init();
+       if (ret < 0)
+               goto err_peer;
+
        ret = wg_device_init();
        if (ret < 0)
                goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
 err_netlink:
        wg_device_uninit();
 err_device:
+       wg_peer_uninit();
+err_peer:
+       wg_allowedips_slab_uninit();
+err_allowedips:
        return ret;
 }
 
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
 {
        wg_genetlink_uninit();
        wg_device_uninit();
+       wg_peer_uninit();
+       wg_allowedips_slab_uninit();
 }
 
 module_init(mod_init);
index cd5cb02..1acd00a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rcupdate.h>
 #include <linux/list.h>
 
+static struct kmem_cache *peer_cache;
 static atomic64_t peer_counter = ATOMIC64_INIT(0);
 
 struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
                return ERR_PTR(ret);
 
-       peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+       peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
        if (unlikely(!peer))
                return ERR_PTR(ret);
-       if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+       if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
                goto err;
 
        peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
        return peer;
 
 err:
-       kfree(peer);
+       kmem_cache_free(peer_cache, peer);
        return ERR_PTR(ret);
 }
 
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
        /* Mark as dead, so that we don't allow jumping contexts after. */
        WRITE_ONCE(peer->is_dead, true);
 
-       /* The caller must now synchronize_rcu() for this to take effect. */
+       /* The caller must now synchronize_net() for this to take effect. */
 }
 
 static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
        lockdep_assert_held(&peer->device->device_update_lock);
 
        peer_make_dead(peer);
-       synchronize_rcu();
+       synchronize_net();
        peer_remove_after_dead(peer);
 }
 
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
                peer_make_dead(peer);
                list_add_tail(&peer->peer_list, &dead_peers);
        }
-       synchronize_rcu();
+       synchronize_net();
        list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
                peer_remove_after_dead(peer);
 }
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
        /* The final zeroing takes care of clearing any remaining handshake key
         * material and other potentially sensitive information.
         */
-       kfree_sensitive(peer);
+       memzero_explicit(peer, sizeof(*peer));
+       kmem_cache_free(peer_cache, peer);
 }
 
 static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
                return;
        kref_put(&peer->refcount, kref_release);
 }
+
+int __init wg_peer_init(void)
+{
+       peer_cache = KMEM_CACHE(wg_peer, 0);
+       return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+       kmem_cache_destroy(peer_cache);
+}
index 8d53b68..76e4d31 100644 (file)
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
 void wg_peer_remove(struct wg_peer *peer);
 void wg_peer_remove_all(struct wg_device *wg);
 
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
 #endif /* _WG_PEER_H */
index 846db14..e173204 100644 (file)
 
 #include <linux/siphash.h>
 
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
-                                             u8 cidr)
-{
-       swap_endian(dst, src, bits);
-       memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
-       if (cidr)
-               dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
 static __init void print_node(struct allowedips_node *node, u8 bits)
 {
        char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
-       char *fmt_declaration = KERN_DEBUG
-               "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+       u8 ip1[16], ip2[16], cidr1, cidr2;
        char *style = "dotted";
-       u8 ip1[16], ip2[16];
        u32 color = 0;
 
+       if (node == NULL)
+               return;
        if (bits == 32) {
                fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
        } else if (bits == 128) {
                fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
-               fmt_declaration = KERN_DEBUG
-                       "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+               fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
        }
        if (node->peer) {
                hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
                        hsiphash_1u32(0xabad1dea, &key) % 200;
                style = "bold";
        }
-       swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
-       printk(fmt_declaration, ip1, node->cidr, style, color);
+       wg_allowedips_read_node(node, ip1, &cidr1);
+       printk(fmt_declaration, ip1, cidr1, style, color);
        if (node->bit[0]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[0])->bits, bits,
-                               node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[0])->cidr);
-               print_node(rcu_dereference_raw(node->bit[0]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
        if (node->bit[1]) {
-               swap_endian_and_apply_cidr(ip2,
-                               rcu_dereference_raw(node->bit[1])->bits,
-                               bits, node->cidr);
-               printk(fmt_connection, ip1, node->cidr, ip2,
-                      rcu_dereference_raw(node->bit[1])->cidr);
-               print_node(rcu_dereference_raw(node->bit[1]), bits);
+               wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+               printk(fmt_connection, ip1, cidr1, ip2, cidr2);
        }
+       if (node->bit[0])
+               print_node(rcu_dereference_raw(node->bit[0]), bits);
+       if (node->bit[1])
+               print_node(rcu_dereference_raw(node->bit[1]), bits);
 }
 
 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
 {
        union nf_inet_addr mask;
 
-       memset(&mask, 0x00, 128 / 8);
-       memset(&mask, 0xff, cidr / 8);
+       memset(&mask, 0, sizeof(mask));
+       memset(&mask.all, 0xff, cidr / 8);
        if (cidr % 32)
                mask.all[cidr / 32] = (__force u32)htonl(
                        (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
 }
 
 static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
-                 struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
 {
        return (ip->s_addr & node->mask.ip) == node->ip.ip;
 }
 
 static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
-                 struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
 {
-       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
-                      node->ip.ip6[0] &&
-              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
-                      node->ip.ip6[1] &&
-              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
-                      node->ip.ip6[2] &&
+       return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+              (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+              (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
               (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
 }
 
 static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
-                       struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
 {
        struct horrible_allowedips_node *other = NULL, *where = NULL;
        u8 my_cidr = horrible_mask_to_cidr(node->mask);
 
        hlist_for_each_entry(other, &table->head, table) {
-               if (!memcmp(&other->mask, &node->mask,
-                           sizeof(union nf_inet_addr)) &&
-                   !memcmp(&other->ip, &node->ip,
-                           sizeof(union nf_inet_addr)) &&
-                   other->ip_version == node->ip_version) {
+               if (other->ip_version == node->ip_version &&
+                   !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+                   !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
                        other->value = node->value;
                        kfree(node);
                        return;
                }
+       }
+       hlist_for_each_entry(other, &table->head, table) {
                where = other;
                if (horrible_mask_to_cidr(other->mask) <= my_cidr)
                        break;
@@ -201,8 +181,7 @@ static __init int
 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
                              struct in_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
                              struct in6_addr *ip, u8 cidr, void *value)
 {
-       struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
-                                                       GFP_KERNEL);
+       struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
 
        if (unlikely(!node))
                return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
 }
 
 static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
-                             struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 4)
-                       continue;
-               if (horrible_match_v4(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               if (node->ip_version == 4 && horrible_match_v4(node, ip))
+                       return node->value;
        }
-       return ret;
+       return NULL;
 }
 
 static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
-                             struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
 {
        struct horrible_allowedips_node *node;
-       void *ret = NULL;
 
        hlist_for_each_entry(node, &table->head, table) {
-               if (node->ip_version != 6)
+               if (node->ip_version == 6 && horrible_match_v6(node, ip))
+                       return node->value;
+       }
+       return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+       struct horrible_allowedips_node *node;
+       struct hlist_node *h;
+
+       hlist_for_each_entry_safe(node, h, &table->head, table) {
+               if (node->value != value)
                        continue;
-               if (horrible_match_v6(node, ip)) {
-                       ret = node->value;
-                       break;
-               }
+               hlist_del(&node->table);
+               kfree(node);
        }
-       return ret;
+
 }
 
 static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
                        goto free;
                }
                kref_init(&peers[i]->refcount);
+               INIT_LIST_HEAD(&peers[i]->allowedips_list);
        }
 
        mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
                        if (wg_allowedips_insert_v4(&t,
                                                    (struct in_addr *)mutated,
                                                    cidr, peer, &mutex) < 0) {
-                               pr_err("allowedips random malloc: FAIL\n");
+                               pr_err("allowedips random self-test malloc: FAIL\n");
                                goto free_locked;
                        }
                        if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
                print_tree(t.root6, 128);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 4);
-               if (lookup(t.root4, 32, ip) !=
-                   horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
+       for (j = 0;; ++j) {
+               for (i = 0; i < NUM_QUERIES; ++i) {
+                       prandom_bytes(ip, 4);
+                       if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+                               horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+                               pr_err("allowedips random v4 self-test: FAIL\n");
+                               goto free;
+                       }
+                       prandom_bytes(ip, 16);
+                       if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+                               pr_err("allowedips random v6 self-test: FAIL\n");
+                               goto free;
+                       }
                }
+               if (j >= NUM_PEERS)
+                       break;
+               mutex_lock(&mutex);
+               wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+               mutex_unlock(&mutex);
+               horrible_allowedips_remove_by_value(&h, peers[j]);
        }
 
-       for (i = 0; i < NUM_QUERIES; ++i) {
-               prandom_bytes(ip, 16);
-               if (lookup(t.root6, 128, ip) !=
-                   horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
-                       pr_err("allowedips random self-test: FAIL\n");
-                       goto free;
-               }
+       if (t.root4 || t.root6) {
+               pr_err("allowedips random self-test removal: FAIL\n");
+               goto free;
        }
+
        ret = true;
 
 free:
index d9ad850..8c496b7 100644 (file)
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
        if (new4)
                wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
        mutex_unlock(&wg->socket_update_lock);
-       synchronize_rcu();
+       synchronize_net();
        sock_free(old4);
        sock_free(old6);
 }
index 977acab..03fe628 100644 (file)
@@ -514,10 +514,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
 {
        struct sk_buff *skb = phy->rx_amsdu[q].head;
+       struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
        struct mt76_dev *dev = phy->dev;
 
        phy->rx_amsdu[q].head = NULL;
        phy->rx_amsdu[q].tail = NULL;
+
+       /*
+        * Validate if the amsdu has a proper first subframe.
+        * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+        * flag of the QoS header gets flipped. In such cases, the first
+        * subframe has a LLC/SNAP header in the location of the destination
+        * address.
+        */
+       if (skb_shinfo(skb)->frag_list) {
+               int offset = 0;
+
+               if (!(status->flag & RX_FLAG_8023)) {
+                       offset = ieee80211_get_hdrlen_from_skb(skb);
+
+                       if ((status->flag &
+                            (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+                           RX_FLAG_DECRYPTED)
+                               offset += 8;
+               }
+
+               if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+                       dev_kfree_skb(skb);
+                       return;
+               }
+       }
        __skb_queue_tail(&dev->rx_skb[q], skb);
 }
 
index 86341d1..d20f05a 100644 (file)
@@ -510,7 +510,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
        mutex_init(&dev->pm.mutex);
        init_waitqueue_head(&dev->pm.wait);
        spin_lock_init(&dev->pm.txq_lock);
-       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
        INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
index f81a17d..e2dcfee 100644 (file)
@@ -1912,8 +1912,9 @@ void mt7615_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7615_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7615_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index 17fe418..d1be78b 100644 (file)
@@ -51,16 +51,13 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        return ret;
 }
 
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
        struct mt76_phy *mphy = &dev->mt76.phy;
        u32 status;
        int ret;
 
-       if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
-               goto out;
-
        sdio_claim_host(func);
 
        sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -76,13 +73,21 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
        }
 
        sdio_release_host(func);
-
-out:
        dev->pm.last_activity = jiffies;
 
        return 0;
 }
 
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+       struct mt76_phy *mphy = &dev->mt76.phy;
+
+       if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+               return __mt7663s_mcu_drv_pmctrl(dev);
+
+       return 0;
+}
+
 static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
 {
        struct sdio_func *func = dev->mt76.sdio.func;
@@ -123,7 +128,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
        struct mt7615_mcu_ops *mcu_ops;
        int ret;
 
-       ret = mt7663s_mcu_drv_pmctrl(dev);
+       ret = __mt7663s_mcu_drv_pmctrl(dev);
        if (ret)
                return ret;
 
index c55698f..028ff43 100644 (file)
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
 
        dev->mt76.mcu_ops = &mt7663u_mcu_ops,
 
-       /* usb does not support runtime-pm */
-       clear_bit(MT76_STATE_PM, &dev->mphy.state);
        mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
        if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
                mt7615_mcu_restart(&dev->mt76);
                if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
index fe0ab5e..6195616 100644 (file)
@@ -721,6 +721,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
        phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
        phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
        phy->rcpi = rcpi;
+       phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+                               sta->ht_cap.ampdu_factor) |
+                    FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+                               sta->ht_cap.ampdu_density);
 
        tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
        ra_info = (struct sta_rec_ra_info *)tlv;
index 5847f94..b795e72 100644 (file)
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
        .reconfig_complete = mt76x02_reconfig_complete,
 };
 
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
 {
        int err;
 
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        if (err < 0)
                return err;
 
-       err = mt76x02_dma_init(dev);
-       if (err < 0)
-               return err;
+       if (!resume) {
+               err = mt76x02_dma_init(dev);
+               if (err < 0)
+                       return err;
+       }
 
        err = mt76x0_init_hardware(dev);
        if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
        mt76_clear(dev, 0x110, BIT(9));
        mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
 
+       return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+       int err;
+
+       err = mt76x0e_init_hardware(dev, false);
+       if (err < 0)
+               return err;
+
        err = mt76x0_register_device(dev);
        if (err < 0)
                return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return ret;
 
+       mt76_pci_disable_aspm(pdev);
+
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
                                 &drv_ops);
        if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
        mt76_free_device(mdev);
 }
 
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int i;
+
+       mt76_worker_disable(&mdev->tx_worker);
+       for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+               mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+       for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+               mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+       napi_disable(&mdev->tx_napi);
+
+       mt76_for_each_q_rx(mdev, i)
+               napi_disable(&mdev->napi[i]);
+
+       mt76x02_dma_disable(dev);
+       mt76x02_mcu_cleanup(dev);
+       mt76x0_chip_onoff(dev, false, false);
+
+       pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+       pci_save_state(pdev);
+
+       return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int err, i;
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               return err;
+
+       pci_restore_state(pdev);
+
+       mt76_worker_enable(&mdev->tx_worker);
+
+       mt76_for_each_q_rx(mdev, i) {
+               mt76_queue_rx_reset(dev, i);
+               napi_enable(&mdev->napi[i]);
+               napi_schedule(&mdev->napi[i]);
+       }
+
+       napi_enable(&mdev->tx_napi);
+       napi_schedule(&mdev->tx_napi);
+
+       return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
 static const struct pci_device_id mt76x0e_device_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
        { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
        .id_table       = mt76x0e_device_table,
        .probe          = mt76x0e_probe,
        .remove         = mt76x0e_remove,
+#ifdef CONFIG_PM
+       .suspend        = mt76x0e_suspend,
+       .resume         = mt76x0e_resume,
+#endif /* CONFIG_PM */
 };
 
 module_pci_driver(mt76x0e_driver);
index fe28bf4..1763ea0 100644 (file)
@@ -76,8 +76,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
        struct wiphy *wiphy = hw->wiphy;
 
        hw->queues = 4;
-       hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
-       hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+       hw->max_rx_aggregation_subframes = 64;
+       hw->max_tx_aggregation_subframes = 128;
 
        hw->radiotap_timestamp.units_pos =
                IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
index 214bd18..decf2d5 100644 (file)
@@ -1404,8 +1404,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
                        napi_schedule(&dev->mt76.napi[i]);
                mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
                mt7921_tx_cleanup(dev);
-               ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
-                                            MT7921_WATCHDOG_TIME);
+               if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+                       ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+                                                    MT7921_WATCHDOG_TIME);
        }
 
        ieee80211_wake_queues(mphy->hw);
index f4c27aa..97a0ef3 100644 (file)
@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
                                IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
                else if (band == NL80211_BAND_5GHZ)
                        he_cap_elem->phy_cap_info[0] =
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
-                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
 
                he_cap_elem->phy_cap_info[1] =
                        IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
index 5f3d56d..67dc4b4 100644 (file)
@@ -402,20 +402,22 @@ static void
 mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
                          u16 wlan_idx)
 {
-       struct mt7921_mcu_wlan_info_event *wtbl_info =
-               (struct mt7921_mcu_wlan_info_event *)(skb->data);
-       struct rate_info rate = {};
-       u8 curr_idx = wtbl_info->rate_info.rate_idx;
-       u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
-       struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+       struct mt7921_mcu_wlan_info_event *wtbl_info;
        struct mt76_phy *mphy = &dev->mphy;
        struct mt7921_sta_stats *stats;
+       struct rate_info rate = {};
        struct mt7921_sta *msta;
        struct mt76_wcid *wcid;
+       u8 idx;
 
        if (wlan_idx >= MT76_N_WCIDS)
                return;
 
+       wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
+       idx = wtbl_info->rate_info.rate_idx;
+       if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
+               return;
+
        rcu_read_lock();
 
        wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -426,7 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
        stats = &msta->stats;
 
        /* current rate */
-       mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+       mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
+                                le16_to_cpu(wtbl_info->rate_info.rate[idx]));
        stats->tx_rate = rate;
 out:
        rcu_read_unlock();
index 37943dc..4697a94 100644 (file)
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
                int count)
 {
        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
-       struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
        struct ib_sge *sge = &req->sge[1];
+       struct scatterlist *sgl;
        u32 len = 0;
        int i;
 
-       for (i = 0; i < count; i++, sgl++, sge++) {
+       for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
                sge->addr = sg_dma_address(sgl);
                sge->length = sg_dma_len(sgl);
                sge->lkey = queue->device->pd->local_dma_lkey;
                len += sge->length;
+               sge++;
        }
 
        sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
index 4b29a5b..b20b8d0 100644 (file)
@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
        return req->transfer_len - req->metadata_len;
 }
 
-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
+static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
+               struct nvmet_req *req)
 {
-       req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
+       req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
                        nvmet_data_transfer_len(req));
        if (!req->sg)
                goto out_err;
 
        if (req->metadata_len) {
-               req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
+               req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
                                &req->metadata_sg_cnt, req->metadata_len);
                if (!req->metadata_sg)
                        goto out_free_sg;
        }
+
+       req->p2p_dev = p2p_dev;
+
        return 0;
 out_free_sg:
        pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1025,25 +1029,19 @@ out_err:
        return -ENOMEM;
 }
 
-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
+static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
 {
-       if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
-               return false;
-
-       if (req->sq->ctrl && req->sq->qid && req->ns) {
-               req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
-                                                req->ns->nsid);
-               if (req->p2p_dev)
-                       return true;
-       }
-
-       req->p2p_dev = NULL;
-       return false;
+       if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
+           !req->sq->ctrl || !req->sq->qid || !req->ns)
+               return NULL;
+       return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
 }
 
 int nvmet_req_alloc_sgls(struct nvmet_req *req)
 {
-       if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
+       struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
+
+       if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
                return 0;
 
        req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
                pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
                if (req->metadata_sg)
                        pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
+               req->p2p_dev = NULL;
        } else {
                sgl_free(req->sg);
                if (req->metadata_sg)
index cb30cb9..a5c4a18 100644 (file)
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
 
 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
 {
-       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+       if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
+               return;
        nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
                clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
                nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
        }
+       ctrl->ctrl.queue_count = 1;
 }
 
 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        return 0;
 
 out_cleanup_queue:
+       clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
        blk_cleanup_queue(ctrl->ctrl.admin_q);
 out_cleanup_fabrics_q:
        blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_loop_shutdown_ctrl(ctrl);
 
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
-               /* state change failure should never happen */
-               WARN_ON_ONCE(1);
+               if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
+                   ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
+                       /* state change failure for non-deleted ctrl? */
+                       WARN_ON_ONCE(1);
                return;
        }
 
index da5b414..85dcb70 100644 (file)
@@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
 #endif
 }
 
+bool pci_host_of_has_msi_map(struct device *dev)
+{
+       if (dev && dev->of_node)
+               return of_get_property(dev->of_node, "msi-map", NULL);
+       return false;
+}
+
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
 {
index 3a62d09..2752046 100644 (file)
@@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
        device_enable_async_suspend(bus->bridge);
        pci_set_bus_of_node(bus);
        pci_set_bus_msi_domain(bus);
-       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+       if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+           !pci_host_of_has_msi_map(parent))
                bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
 
        if (!parent)
index 53ce78d..5e2e1b9 100644 (file)
@@ -2,6 +2,7 @@
 config VFIO_PCI
        tristate "VFIO support for PCI devices"
        depends on VFIO && PCI && EVENTFD
+       depends on MMU
        select VFIO_VIRQFD
        select IRQ_BYPASS_MANAGER
        help
index d57f037..70e28ef 100644 (file)
@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
                        if (len == 0xFF) {
                                len = vfio_ext_cap_len(vdev, ecap, epos);
                                if (len < 0)
-                                       return ret;
+                                       return len;
                        }
                }
 
index 361e5b5..470fcf7 100644 (file)
@@ -291,7 +291,7 @@ err_irq:
        vfio_platform_regions_cleanup(vdev);
 err_reg:
        mutex_unlock(&driver_lock);
-       module_put(THIS_MODULE);
+       module_put(vdev->parent_module);
        return ret;
 }
 
index a0747c3..a3e925a 100644 (file)
@@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
                return 0;
        }
 
-       size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
+       size = struct_size(cap_iovas, iova_ranges, iovas);
 
        cap_iovas = kzalloc(size, GFP_KERNEL);
        if (!cap_iovas)
index b292887..a591d29 100644 (file)
@@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        get_page(page);
+
+       if (vmf->vma->vm_file)
+               page->mapping = vmf->vma->vm_file->f_mapping;
+       else
+               printk(KERN_ERR "no mapping available\n");
+
+       BUG_ON(!page->mapping);
        page->index = vmf->pgoff;
 
        vmf->page = page;
@@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
        .page_mkwrite   = fb_deferred_io_mkwrite,
 };
 
+static int fb_deferred_io_set_page_dirty(struct page *page)
+{
+       if (!PageDirty(page))
+               SetPageDirty(page);
+       return 0;
+}
+
+static const struct address_space_operations fb_deferred_io_aops = {
+       .set_page_dirty = fb_deferred_io_set_page_dirty,
+};
+
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 {
        vma->vm_ops = &fb_deferred_io_vm_ops;
@@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info)
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
 
+void fb_deferred_io_open(struct fb_info *info,
+                        struct inode *inode,
+                        struct file *file)
+{
+       file->f_mapping->a_ops = &fb_deferred_io_aops;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_open);
+
 void fb_deferred_io_cleanup(struct fb_info *info)
 {
        struct fb_deferred_io *fbdefio = info->fbdefio;
+       struct page *page;
+       int i;
 
        BUG_ON(!fbdefio);
        cancel_delayed_work_sync(&info->deferred_work);
+
+       /* clear out the mapping that we setup */
+       for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
+               page = fb_deferred_io_page(info, i);
+               page->mapping = NULL;
+       }
+
        mutex_destroy(&fbdefio->lock);
 }
 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
index 072780b..98f1930 100644 (file)
@@ -1415,6 +1415,10 @@ __releases(&info->lock)
                if (res)
                        module_put(info->fbops->owner);
        }
+#ifdef CONFIG_FB_DEFERRED_IO
+       if (info->fbdefio)
+               fb_deferred_io_open(info, inode, file);
+#endif
 out:
        unlock_fb_info(info);
        if (res)
index d17ac30..1346d69 100644 (file)
@@ -457,7 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
                int submit = 0;
-               int len;
+               int len = 0;
 
                page = compressed_pages[pg_index];
                page->mapping = inode->vfs_inode.i_mapping;
@@ -465,10 +465,17 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
                                                          0);
 
-               if (pg_index == 0 && use_append)
-                       len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0);
-               else
-                       len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               /*
+                * Page can only be added to bio if the current bio fits in
+                * stripe.
+                */
+               if (!submit) {
+                       if (pg_index == 0 && use_append)
+                               len = bio_add_zone_append_page(bio, page,
+                                                              PAGE_SIZE, 0);
+                       else
+                               len = bio_add_page(bio, page, PAGE_SIZE, 0);
+               }
 
                page->mapping = NULL;
                if (submit || len < PAGE_SIZE) {
index f1d15b6..3d5c35e 100644 (file)
@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        trace_run_delayed_ref_head(fs_info, head, 0);
        btrfs_delayed_ref_unlock(head);
        btrfs_put_delayed_ref_head(head);
-       return 0;
+       return ret;
 }
 
 static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
index 294602f..441cee7 100644 (file)
@@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
        u64 end_byte = bytenr + len;
        u64 csum_end;
        struct extent_buffer *leaf;
-       int ret;
+       int ret = 0;
        const u32 csum_size = fs_info->csum_size;
        u32 blocksize_bits = fs_info->sectorsize_bits;
 
@@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
 
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_del_items(trans, root, path,
                                              path->slots[0], del_nr);
                        if (ret)
-                               goto out;
+                               break;
                        if (key.offset == bytenr)
                                break;
                } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        ret = btrfs_split_item(trans, root, path, &key, offset);
                        if (ret && ret != -EAGAIN) {
                                btrfs_abort_transaction(trans, ret);
-                               goto out;
+                               break;
                        }
+                       ret = 0;
 
                        key.offset = end_byte - 1;
                } else {
@@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                }
                btrfs_release_path(path);
        }
-       ret = 0;
-out:
        btrfs_free_path(path);
        return ret;
 }
 
+static int find_next_csum_offset(struct btrfs_root *root,
+                                struct btrfs_path *path,
+                                u64 *next_offset)
+{
+       const u32 nritems = btrfs_header_nritems(path->nodes[0]);
+       struct btrfs_key found_key;
+       int slot = path->slots[0] + 1;
+       int ret;
+
+       if (nritems == 0 || slot >= nritems) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0) {
+                       return ret;
+               } else if (ret > 0) {
+                       *next_offset = (u64)-1;
+                       return 0;
+               }
+               slot = path->slots[0];
+       }
+
+       btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
+
+       if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
+           found_key.type != BTRFS_EXTENT_CSUM_KEY)
+               *next_offset = (u64)-1;
+       else
+               *next_offset = found_key.offset;
+
+       return 0;
+}
+
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums)
@@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
        u64 total_bytes = 0;
        u64 csum_offset;
        u64 bytenr;
-       u32 nritems;
        u32 ins_size;
        int index = 0;
        int found_next;
@@ -981,26 +1011,10 @@ again:
                        goto insert;
                }
        } else {
-               int slot = path->slots[0] + 1;
-               /* we didn't find a csum item, insert one */
-               nritems = btrfs_header_nritems(path->nodes[0]);
-               if (!nritems || (path->slots[0] >= nritems - 1)) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0) {
-                               goto out;
-                       } else if (ret > 0) {
-                               found_next = 1;
-                               goto insert;
-                       }
-                       slot = path->slots[0];
-               }
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
-               if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
-                   found_key.type != BTRFS_EXTENT_CSUM_KEY) {
-                       found_next = 1;
-                       goto insert;
-               }
-               next_offset = found_key.offset;
+               /* We didn't find a csum item, insert one. */
+               ret = find_next_csum_offset(root, path, &next_offset);
+               if (ret < 0)
+                       goto out;
                found_next = 1;
                goto insert;
        }
@@ -1056,8 +1070,48 @@ extend_csum:
                tmp = sums->len - total_bytes;
                tmp >>= fs_info->sectorsize_bits;
                WARN_ON(tmp < 1);
+               extend_nr = max_t(int, 1, tmp);
+
+               /*
+                * A log tree can already have checksum items with a subset of
+                * the checksums we are trying to log. This can happen after
+                * doing a sequence of partial writes into prealloc extents and
+                * fsyncs in between, with a full fsync logging a larger subrange
+                * of an extent for which a previous fast fsync logged a smaller
+                * subrange. And this happens in particular due to merging file
+                * extent items when we complete an ordered extent for a range
+                * covered by a prealloc extent - this is done at
+                * btrfs_mark_extent_written().
+                *
+                * So if we try to extend the previous checksum item, which has
+                * a range that ends at the start of the range we want to insert,
+                * make sure we don't extend beyond the start offset of the next
+                * checksum item. If we are at the last item in the leaf, then
+                * forget the optimization of extending and add a new checksum
+                * item - it is not worth the complexity of releasing the path,
+                * getting the first key for the next leaf, repeat the btree
+                * search, etc, because log trees are temporary anyway and it
+                * would only save a few bytes of leaf space.
+                */
+               if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+                       if (path->slots[0] + 1 >=
+                           btrfs_header_nritems(path->nodes[0])) {
+                               ret = find_next_csum_offset(root, path, &next_offset);
+                               if (ret < 0)
+                                       goto out;
+                               found_next = 1;
+                               goto insert;
+                       }
+
+                       ret = find_next_csum_offset(root, path, &next_offset);
+                       if (ret < 0)
+                               goto out;
+
+                       tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits;
+                       if (tmp <= INT_MAX)
+                               extend_nr = min_t(int, extend_nr, tmp);
+               }
 
-               extend_nr = max_t(int, 1, (int)tmp);
                diff = (csum_offset + extend_nr) * csum_size;
                diff = min(diff,
                           MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
index 33f1457..46f3929 100644 (file)
@@ -3000,6 +3000,18 @@ out:
        if (ret || truncated) {
                u64 unwritten_start = start;
 
+               /*
+                * If we failed to finish this ordered extent for any reason we
+                * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
+                * extent, and mark the inode with the error if it wasn't
+                * already set.  Any error during writeback would have already
+                * set the mapping error, so we need to set it if we're the ones
+                * marking this ordered extent as failed.
+                */
+               if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
+                                            &ordered_extent->flags))
+                       mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+
                if (truncated)
                        unwritten_start += logical_len;
                clear_extent_uptodate(io_tree, unwritten_start, end, NULL);
@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       bool need_abort = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
@@ -9135,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             old_idx);
                if (ret)
                        goto out_fail;
+               need_abort = true;
        }
 
        /* And now for the dest. */
@@ -9150,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir,
                                             new_ino,
                                             btrfs_ino(BTRFS_I(old_dir)),
                                             new_idx);
-               if (ret)
+               if (ret) {
+                       if (need_abort)
+                               btrfs_abort_transaction(trans, ret);
                        goto out_fail;
+               }
        }
 
        /* Update inode version and ctime/mtime. */
index d434dc7..9178da0 100644 (file)
@@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst,
                         * inline extent's data to the page.
                         */
                        ASSERT(key.offset > 0);
-                       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                                 inline_data, size, datal,
-                                                 comp_type);
-                       goto out;
+                       goto copy_to_page;
                }
        } else if (i_size_read(dst) <= datal) {
                struct btrfs_file_extent_item *ei;
@@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst,
                    BTRFS_FILE_EXTENT_INLINE)
                        goto copy_inline_extent;
 
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
 copy_inline_extent:
-       ret = 0;
        /*
         * We have no extent items, or we have an extent at offset 0 which may
         * or may not be inlined. All these cases are dealt the same way.
@@ -240,11 +234,13 @@ copy_inline_extent:
                 * clone. Deal with all these cases by copying the inline extent
                 * data into the respective page at the destination inode.
                 */
-               ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
-                                         inline_data, size, datal, comp_type);
-               goto out;
+               goto copy_to_page;
        }
 
+       /*
+        * Release path before starting a new transaction so we don't hold locks
+        * that would confuse lockdep.
+        */
        btrfs_release_path(path);
        /*
         * If we end up here it means were copy the inline extent into a leaf
@@ -281,11 +277,6 @@ copy_inline_extent:
        ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
 out:
        if (!ret && !trans) {
-               /*
-                * Release path before starting a new transaction so we don't
-                * hold locks that would confuse lockdep.
-                */
-               btrfs_release_path(path);
                /*
                 * No transaction here means we copied the inline extent into a
                 * page of the destination inode.
@@ -306,6 +297,21 @@ out:
                *trans_out = trans;
 
        return ret;
+
+copy_to_page:
+       /*
+        * Release our path because we don't need it anymore and also because
+        * copy_inline_to_page() needs to reserve data and metadata, which may
+        * need to flush delalloc when we are low on available space and
+        * therefore cause a deadlock if writeback of an inline extent needs to
+        * write to the same leaf or an ordered extent completion needs to write
+        * to the same leaf.
+        */
+       btrfs_release_path(path);
+
+       ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
+                                 inline_data, size, datal, comp_type);
+       goto out;
 }
 
 /**
index 326be57..362d14d 100644 (file)
@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                        if (ret)
                                goto out;
 
-                       btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+                       if (ret)
+                               goto out;
                }
 
                ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 
        if (nlink != inode->i_nlink) {
                set_nlink(inode, nlink);
-               btrfs_update_inode(trans, root, BTRFS_I(inode));
+               ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+               if (ret)
+                       goto out;
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                        break;
 
                if (ret == 1) {
+                       ret = 0;
                        if (path->slots[0] == 0)
                                break;
                        path->slots[0]--;
@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 
                ret = btrfs_del_item(trans, root, path);
                if (ret)
-                       goto out;
+                       break;
 
                btrfs_release_path(path);
                inode = read_one_inode(root, key.offset);
-               if (!inode)
-                       return -EIO;
+               if (!inode) {
+                       ret = -EIO;
+                       break;
+               }
 
                ret = fixup_inode_link_count(trans, root, inode);
                iput(inode);
                if (ret)
-                       goto out;
+                       break;
 
                /*
                 * fixup on a directory may create new entries,
@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                 */
                key.offset = (u64)-1;
        }
-       ret = 0;
-out:
        btrfs_release_path(path);
        return ret;
 }
index a0b542d..493a83e 100644 (file)
@@ -911,8 +911,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                current->backing_dev_info = inode_to_bdi(inode);
                buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops);
                current->backing_dev_info = NULL;
-               if (unlikely(buffered <= 0))
+               if (unlikely(buffered <= 0)) {
+                       if (!ret)
+                               ret = buffered;
                        goto out_unlock;
+               }
 
                /*
                 * We need to ensure that the page cache pages are written to
index ea7fc5c..d9cb261 100644 (file)
@@ -582,6 +582,16 @@ out_locked:
        spin_unlock(&gl->gl_lockref.lock);
 }
 
+static bool is_system_glock(struct gfs2_glock *gl)
+{
+       struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+       struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
+
+       if (gl == m_ip->i_gl)
+               return true;
+       return false;
+}
+
 /**
  * do_xmote - Calls the DLM to change the state of a lock
  * @gl: The lock state
@@ -671,17 +681,25 @@ skip_inval:
         * to see sd_log_error and withdraw, and in the meantime, requeue the
         * work for later.
         *
+        * We make a special exception for some system glocks, such as the
+        * system statfs inode glock, which needs to be granted before the
+        * gfs2_quotad daemon can exit, and that exit needs to finish before
+        * we can unmount the withdrawn file system.
+        *
         * However, if we're just unlocking the lock (say, for unmount, when
         * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
         * then it's okay to tell dlm to unlock it.
         */
        if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
                gfs2_withdraw_delayed(sdp);
-       if (glock_blocked_by_withdraw(gl)) {
-               if (target != LM_ST_UNLOCKED ||
-                   test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
+       if (glock_blocked_by_withdraw(gl) &&
+           (target != LM_ST_UNLOCKED ||
+            test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
+               if (!is_system_glock(gl)) {
                        gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
                        goto out;
+               } else {
+                       clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
                }
        }
 
@@ -1466,9 +1484,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
            glock_blocked_by_withdraw(gl) &&
            gh->gh_gl != sdp->sd_jinode_gl) {
                sdp->sd_glock_dqs_held++;
+               spin_unlock(&gl->gl_lockref.lock);
                might_sleep();
                wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
                            TASK_UNINTERRUPTIBLE);
+               spin_lock(&gl->gl_lockref.lock);
        }
        if (gh->gh_flags & GL_NOCACHE)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1775,6 +1795,7 @@ __acquires(&lru_lock)
        while(!list_empty(list)) {
                gl = list_first_entry(list, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
+               clear_bit(GLF_LRU, &gl->gl_flags);
                if (!spin_trylock(&gl->gl_lockref.lock)) {
 add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
@@ -1820,7 +1841,6 @@ static long gfs2_scan_glock_lru(int nr)
                if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
-                       clear_bit(GLF_LRU, &gl->gl_flags);
                        freed++;
                        continue;
                }
index 454095e..54d3fbe 100644 (file)
@@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
        struct timespec64 atime;
        u16 height, depth;
        umode_t mode = be32_to_cpu(str->di_mode);
-       bool is_new = ip->i_inode.i_flags & I_NEW;
+       bool is_new = ip->i_inode.i_state & I_NEW;
 
        if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
                goto corrupt;
index 97d54e5..42c15cf 100644 (file)
@@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
 }
 
 /**
- * ail_drain - drain the ail lists after a withdraw
+ * gfs2_ail_drain - drain the ail lists after a withdraw
  * @sdp: Pointer to GFS2 superblock
  */
-static void ail_drain(struct gfs2_sbd *sdp)
+void gfs2_ail_drain(struct gfs2_sbd *sdp)
 {
        struct gfs2_trans *tr;
 
@@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
                list_del(&tr->tr_list);
                gfs2_trans_free(sdp, tr);
        }
+       gfs2_drain_revokes(sdp);
        spin_unlock(&sdp->sd_ail_lock);
 }
 
@@ -1162,7 +1163,6 @@ out_withdraw:
        if (tr && list_empty(&tr->tr_list))
                list_add(&tr->tr_list, &sdp->sd_ail1_list);
        spin_unlock(&sdp->sd_ail_lock);
-       ail_drain(sdp); /* frees all transactions */
        tr = NULL;
        goto out_end;
 }
index eea5801..fc905c2 100644 (file)
@@ -93,5 +93,6 @@ extern int gfs2_logd(void *data);
 extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
 extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index 221e711..8ee05d2 100644 (file)
@@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        gfs2_log_write_page(sdp, page);
 }
 
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+void gfs2_drain_revokes(struct gfs2_sbd *sdp)
 {
        struct list_head *head = &sdp->sd_log_revokes;
        struct gfs2_bufdata *bd;
@@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
        }
 }
 
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+       gfs2_drain_revokes(sdp);
+}
+
 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
                                  struct gfs2_log_header_host *head, int pass)
 {
index 31b6dd0..f707601 100644 (file)
@@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf);
 extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
 extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
                           struct gfs2_log_header_host *head, bool keep_cache);
+extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
 {
        return sdp->sd_ldptrs;
index 3e08027..f4325b4 100644 (file)
@@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
        if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
                return;
 
+       gfs2_ail_drain(sdp); /* frees all transactions */
        inode = sdp->sd_jdesc->jd_inode;
        ip = GFS2_I(inode);
        i_gl = ip->i_gl;
index 903458a..42380ed 100644 (file)
@@ -8228,6 +8228,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 {
        int i, ret;
 
+       imu->acct_pages = 0;
        for (i = 0; i < nr_pages; i++) {
                if (!PageCompound(pages[i])) {
                        imu->acct_pages++;
index 71fefb3..be5b6d2 100644 (file)
@@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
         * events generated by the listener process itself, without disclosing
         * the pids of other processes.
         */
-       if (!capable(CAP_SYS_ADMIN) &&
+       if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
            task_tgid(current) != event->pid)
                metadata.pid = 0;
 
-       if (path && path->mnt && path->dentry) {
+       /*
+        * For now, fid mode is required for an unprivileged listener and
+        * fid mode does not report fd in events.  Keep this check anyway
+        * for safety in case fid mode requirement is relaxed in the future
+        * to allow unprivileged listener to get events with no fd and no fid.
+        */
+       if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) &&
+           path && path->mnt && path->dentry) {
                fd = create_fd(group, path, &f);
                if (fd < 0)
                        return fd;
@@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
        int f_flags, fd;
        unsigned int fid_mode = flags & FANOTIFY_FID_BITS;
        unsigned int class = flags & FANOTIFY_CLASS_BITS;
+       unsigned int internal_flags = 0;
 
        pr_debug("%s: flags=%x event_f_flags=%x\n",
                 __func__, flags, event_f_flags);
@@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                 */
                if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode)
                        return -EPERM;
+
+               /*
+                * Setting the internal flag FANOTIFY_UNPRIV on the group
+                * prevents setting mount/filesystem marks on this group and
+                * prevents reporting pid and open fd in events.
+                */
+               internal_flags |= FANOTIFY_UNPRIV;
        }
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
                goto out_destroy_group;
        }
 
-       group->fanotify_data.flags = flags;
+       group->fanotify_data.flags = flags | internal_flags;
        group->memcg = get_mem_cgroup_from_mm(current->mm);
 
        group->fanotify_data.merge_hash = fanotify_alloc_merge_hash();
@@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
        group = f.file->private_data;
 
        /*
-        * An unprivileged user is not allowed to watch a mount point nor
-        * a filesystem.
+        * An unprivileged user is not allowed to setup mount nor filesystem
+        * marks.  This also includes setting up such marks by a group that
+        * was initialized by an unprivileged user.
         */
        ret = -EPERM;
-       if (!capable(CAP_SYS_ADMIN) &&
+       if ((!capable(CAP_SYS_ADMIN) ||
+            FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) &&
            mark_type != FAN_MARK_INODE)
                goto fput_and_out;
 
@@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void)
        max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS,
                                     FANOTIFY_DEFAULT_MAX_USER_MARKS);
 
+       BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10);
        BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
 
index a712b2a..57f0d5d 100644 (file)
@@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f)
        struct fsnotify_group *group = f->private_data;
 
        seq_printf(m, "fanotify flags:%x event-flags:%x\n",
-                  group->fanotify_data.flags,
+                  group->fanotify_data.flags & FANOTIFY_INIT_FLAGS,
                   group->fanotify_data.f_flags);
 
        show_fdinfo(m, f, fanotify_fdinfo);
index f17c3d3..7756579 100644 (file)
@@ -1855,6 +1855,45 @@ out:
        return ret;
 }
 
+/*
+ * zero out partial blocks of one cluster.
+ *
+ * start: file offset where zero starts, will be made upper block aligned.
+ * len: it will be trimmed to the end of current cluster if "start + len"
+ *      is bigger than it.
+ */
+static int ocfs2_zeroout_partial_cluster(struct inode *inode,
+                                       u64 start, u64 len)
+{
+       int ret;
+       u64 start_block, end_block, nr_blocks;
+       u64 p_block, offset;
+       u32 cluster, p_cluster, nr_clusters;
+       struct super_block *sb = inode->i_sb;
+       u64 end = ocfs2_align_bytes_to_clusters(sb, start);
+
+       if (start + len < end)
+               end = start + len;
+
+       start_block = ocfs2_blocks_for_bytes(sb, start);
+       end_block = ocfs2_blocks_for_bytes(sb, end);
+       nr_blocks = end_block - start_block;
+       if (!nr_blocks)
+               return 0;
+
+       cluster = ocfs2_bytes_to_clusters(sb, start);
+       ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
+                               &nr_clusters, NULL);
+       if (ret)
+               return ret;
+       if (!p_cluster)
+               return 0;
+
+       offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
+       p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
+       return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
+}
+
 /*
  * Parts of this function taken from xfs_change_file_space()
  */
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
 {
        int ret;
        s64 llen;
-       loff_t size;
+       loff_t size, orig_isize;
        struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
        struct buffer_head *di_bh = NULL;
        handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
+       orig_isize = i_size_read(inode);
        switch (sr->l_whence) {
        case 0: /*SEEK_SET*/
                break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                sr->l_start += f_pos;
                break;
        case 2: /*SEEK_END*/
-               sr->l_start += i_size_read(inode);
+               sr->l_start += orig_isize;
                break;
        default:
                ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
        default:
                ret = -EINVAL;
        }
+
+       /* zeroout eof blocks in the cluster. */
+       if (!ret && change_size && orig_isize < size) {
+               ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
+                                       size - orig_isize);
+               if (!ret)
+                       i_size_write(inode, size);
+       }
        up_write(&OCFS2_I(inode)->ip_alloc_sem);
        if (ret) {
                mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
                goto out_inode_unlock;
        }
 
-       if (change_size && i_size_read(inode) < size)
-               i_size_write(inode, size);
-
        inode->i_ctime = inode->i_mtime = current_time(inode);
        ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
        if (ret < 0)
index 565deea..8612f8f 100644 (file)
@@ -830,6 +830,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
 struct virtchnl_proto_hdrs {
        u8 tunnel_level;
+       u8 pad[3];
        /**
         * specify where protocol header start from.
         * 0 - from the outer layer
index bad41bc..a16dbec 100644 (file)
@@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
 #define FANOTIFY_INIT_FLAGS    (FANOTIFY_ADMIN_INIT_FLAGS | \
                                 FANOTIFY_USER_INIT_FLAGS)
 
+/* Internal group flags */
+#define FANOTIFY_UNPRIV                0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS  (FANOTIFY_UNPRIV)
+
 #define FANOTIFY_MARK_TYPE_BITS        (FAN_MARK_INODE | FAN_MARK_MOUNT | \
                                 FAN_MARK_FILESYSTEM)
 
index a8dccd2..ecfbcc0 100644 (file)
@@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
 /* drivers/video/fb_defio.c */
 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
 extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+                               struct inode *inode,
+                               struct file *file);
 extern void fb_deferred_io_cleanup(struct fb_info *info);
 extern int fb_deferred_io_fsync(struct file *file, loff_t start,
                                loff_t end, int datasync);
index 271021e..10e922c 100644 (file)
@@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
  */
 static inline u32 hid_report_len(struct hid_report *report)
 {
-       /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
-       return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+       return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
 }
 
 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
index 232e1bd..9b0487c 100644 (file)
@@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int __host1x_client_register(struct host1x_client *client,
-                            struct lock_class_key *key);
-#define host1x_client_register(class) \
-       ({ \
-               static struct lock_class_key __key; \
-               __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client)                     \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+       })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client)                 \
+       ({                                              \
+               static struct lock_class_key __key;     \
+               __host1x_client_init(client, &__key);   \
+               __host1x_client_register(client);       \
        })
 
 int host1x_client_unregister(struct host1x_client *client);
index 6d16eed..eb86e80 100644 (file)
@@ -1289,6 +1289,8 @@ enum mlx5_fc_bulk_alloc_bitmask {
 
 #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
 
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
 enum {
        MLX5_STEERING_FORMAT_CONNECTX_5   = 0,
        MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
index c20211e..2430650 100644 (file)
@@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
 struct device_node;
 struct irq_domain;
 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
 
 /* Arch may override this (weak) */
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
 #else  /* CONFIG_OF */
 static inline struct irq_domain *
 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
 #endif  /* CONFIG_OF */
 
 static inline struct device_node *
index 46b1378..a43047b 100644 (file)
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
  * To be differentiate with macro pte_mkyoung, this macro is used on platforms
  * where software maintains page access bit.
  */
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+       return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
+#endif
+
 #ifndef pte_savedwrite
 #define pte_savedwrite pte_write
 #endif
index 48ecca8..b655d86 100644 (file)
@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
  * The link_support layer is used to add any Link Layer specific
  * framing.
  */
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                        struct cflayer *link_support, int head_room,
                        struct cflayer **layer, int (**rcv_func)(
                                struct sk_buff *, struct net_device *,
index 2aa5e91..8819ff4 100644 (file)
@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
  * @fcs:       Specify if checksum is used in CAIF Framing Layer.
  * @head_room: Head space needed by link specific protocol.
  */
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
index 14a55e0..67cce87 100644 (file)
@@ -9,4 +9,5 @@
 #include <net/caif/caif_layer.h>
 
 struct cflayer *cfserl_create(int instance, bool use_stx);
+void cfserl_release(struct cflayer *layer);
 #endif
index 27eeb61..0a5655e 100644 (file)
@@ -1506,16 +1506,10 @@ struct nft_trans_chain {
 
 struct nft_trans_table {
        bool                            update;
-       u8                              state;
-       u32                             flags;
 };
 
 #define nft_trans_table_update(trans)  \
        (((struct nft_trans_table *)trans->data)->update)
-#define nft_trans_table_state(trans)   \
-       (((struct nft_trans_table *)trans->data)->state)
-#define nft_trans_table_flags(trans)   \
-       (((struct nft_trans_table *)trans->data)->flags)
 
 struct nft_trans_elem {
        struct nft_set                  *set;
index 3eccb52..8341a8d 100644 (file)
@@ -193,7 +193,11 @@ struct tls_offload_context_tx {
        (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
 
 enum tls_context_flags {
-       TLS_RX_SYNC_RUNNING = 0,
+       /* tls_device_down was called after the netdev went down, device state
+        * was released, and kTLS works in software, even though rx_conf is
+        * still TLS_HW (needed for transition).
+        */
+       TLS_RX_DEV_DEGRADED = 0,
        /* Unlike RX where resync is driven entirely by the core in TX only
         * the driver knows when things went out of sync, so we need the flag
         * to be atomic.
@@ -266,6 +270,7 @@ struct tls_context {
 
        /* cache cold stuff */
        struct proto *sk_proto;
+       struct sock *sk;
 
        void (*sk_destruct)(struct sock *sk);
 
@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx)
 struct sk_buff *
 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
                      struct sk_buff *skb);
+struct sk_buff *
+tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
+                        struct sk_buff *skb);
 
 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
 {
index ee93428..225ec87 100644 (file)
 #define KEY_VOICECOMMAND               0x246   /* Listening Voice Command */
 #define KEY_ASSISTANT          0x247   /* AL Context-aware desktop assistant */
 #define KEY_KBD_LAYOUT_NEXT    0x248   /* AC Next Keyboard Layout Select */
+#define KEY_EMOJI_PICKER       0x249   /* Show/hide emoji picker (HUTRR101) */
 
 #define KEY_BRIGHTNESS_MIN             0x250   /* Set Brightness to Minimum */
 #define KEY_BRIGHTNESS_MAX             0x251   /* Set Brightness to Maximum */
index f0c35ce..4fe842c 100644 (file)
@@ -54,7 +54,7 @@
 #define VIRTIO_ID_SOUND                        25 /* virtio sound */
 #define VIRTIO_ID_FS                   26 /* virtio filesystem */
 #define VIRTIO_ID_PMEM                 27 /* virtio pmem */
-#define VIRTIO_ID_BT                   28 /* virtio bluetooth */
 #define VIRTIO_ID_MAC80211_HWSIM       29 /* virtio mac80211-hwsim */
+#define VIRTIO_ID_BT                   40 /* virtio bluetooth */
 
 #endif /* _LINUX_VIRTIO_IDS_H */
index eb01e12..e9c42a1 100644 (file)
@@ -1537,7 +1537,7 @@ static noinline void __init kernel_init_freeable(void)
         */
        set_mems_allowed(node_states[N_MEMORY]);
 
-       cad_pid = task_pid(current);
+       cad_pid = get_pid(task_pid(current));
 
        smp_prepare_cpus(setup_max_cpus);
 
index 7344349..a2f1f15 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/jiffies.h>
 #include <linux/pid_namespace.h>
 #include <linux/proc_ns.h>
+#include <linux/security.h>
 
 #include "../../lib/kstrtox.h"
 
@@ -1069,11 +1070,13 @@ bpf_base_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
        case BPF_FUNC_snprintf_btf:
                return &bpf_snprintf_btf_proto;
        case BPF_FUNC_snprintf:
index d2d7cf6..7a52bc1 100644 (file)
@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = {
 static __always_inline int
 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
+       int ret;
 
-       if (unlikely(ret < 0))
-               goto fail;
        ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = {
 static __always_inline int
 bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
 {
-       int ret = security_locked_down(LOCKDOWN_BPF_READ);
-
-       if (unlikely(ret < 0))
-               goto fail;
+       int ret;
 
        /*
         * The strncpy_from_kernel_nofault() call will likely not fill the
@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
         */
        ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
-               goto fail;
-
-       return ret;
-fail:
-       memset(dst, 0, size);
+               memset(dst, 0, size);
        return ret;
 }
 
@@ -1011,16 +999,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_probe_read_user:
                return &bpf_probe_read_user_proto;
        case BPF_FUNC_probe_read_kernel:
-               return &bpf_probe_read_kernel_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_proto;
        case BPF_FUNC_probe_read_user_str:
                return &bpf_probe_read_user_str_proto;
        case BPF_FUNC_probe_read_kernel_str:
-               return &bpf_probe_read_kernel_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_kernel_str_proto;
 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        case BPF_FUNC_probe_read:
-               return &bpf_probe_read_compat_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_proto;
        case BPF_FUNC_probe_read_str:
-               return &bpf_probe_read_compat_str_proto;
+               return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
+                      NULL : &bpf_probe_read_compat_str_proto;
 #endif
 #ifdef CONFIG_CGROUPS
        case BPF_FUNC_get_current_cgroup_id:
index 47cfa05..9f852a8 100644 (file)
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL v2");
 /**
  * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
  * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
-       or the previous crc64 value if computing incrementally.
      or the previous crc64 value if computing incrementally.
  * @p: pointer to buffer over which CRC64 is run
  * @len: length of buffer @p
  */
index 05efe98..297d1b3 100644 (file)
@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PMD advanced\n");
        /* Align the address wrt HPAGE_PMD_SIZE */
-       vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE;
+       vaddr &= HPAGE_PMD_MASK;
 
        pgtable_trans_huge_deposit(mm, pmdp, pgtable);
 
@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm,
 
        pr_debug("Validating PUD advanced\n");
        /* Align the address wrt HPAGE_PUD_SIZE */
-       vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE;
+       vaddr &= HPAGE_PUD_MASK;
 
        set_pud_at(mm, vaddr, pudp, pud);
        pudp_set_wrprotect(mm, vaddr, pudp);
index 95918f4..5560b50 100644 (file)
@@ -1793,7 +1793,7 @@ retry:
                        SetPageHWPoison(page);
                        ClearPageHWPoison(head);
                }
-               remove_hugetlb_page(h, page, false);
+               remove_hugetlb_page(h, head, false);
                h->max_huge_pages--;
                spin_unlock_irq(&hugetlb_lock);
                update_and_free_page(h, head);
@@ -4889,10 +4889,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
                if (!page)
                        goto out;
        } else if (!*pagep) {
-               ret = -ENOMEM;
+               /* If a page already exists, then it's UFFDIO_COPY for
+                * a non-missing case. Return -EEXIST.
+                */
+               if (vm_shared &&
+                   hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+
                page = alloc_huge_page(dst_vma, dst_addr, 0);
-               if (IS_ERR(page))
+               if (IS_ERR(page)) {
+                       ret = -ENOMEM;
                        goto out;
+               }
 
                ret = copy_huge_page_from_user(page,
                                                (const void __user *) src_addr,
index c4605ac..348f31d 100644 (file)
@@ -220,8 +220,8 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
 /**
  * kasan_populate_early_shadow - populate shadow memory region with
  *                               kasan_early_shadow_page
- * @shadow_start - start of the memory range to populate
- * @shadow_end   - end of the memory range to populate
+ * @shadow_start: start of the memory range to populate
+ * @shadow_end: end of the memory range to populate
  */
 int __ref kasan_populate_early_shadow(const void *shadow_start,
                                        const void *shadow_end)
index e18fbbd..4d21ac4 100644 (file)
@@ -627,10 +627,10 @@ static void toggle_allocation_gate(struct work_struct *work)
                 * During low activity with no allocations we might wait a
                 * while; let's avoid the hung task warning.
                 */
-               wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
-                                  sysctl_hung_task_timeout_secs * HZ / 2);
+               wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
+                                       sysctl_hung_task_timeout_secs * HZ / 2);
        } else {
-               wait_event(allocation_wait, atomic_read(&kfence_allocation_gate));
+               wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
        }
 
        /* Disable static key and reset timer. */
index 730daa0..f3ffab9 100644 (file)
@@ -2939,6 +2939,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
                }
                flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
                entry = mk_pte(new_page, vma->vm_page_prot);
+               entry = pte_sw_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
                /*
@@ -3602,6 +3603,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
        __SetPageUptodate(page);
 
        entry = mk_pte(page, vma->vm_page_prot);
+       entry = pte_sw_mkyoung(entry);
        if (vma->vm_flags & VM_WRITE)
                entry = pte_mkwrite(pte_mkdirty(entry));
 
@@ -3786,6 +3788,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
 
        if (prefault && arch_wants_old_prefaulted_pte())
                entry = pte_mkold(entry);
+       else
+               entry = pte_sw_mkyoung(entry);
 
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index aaa1655..d1f5de1 100644 (file)
@@ -9158,6 +9158,8 @@ bool take_page_off_buddy(struct page *page)
                        del_page_from_free_list(page_head, zone, page_order);
                        break_down_buddy_pages(zone, page_head, page, 0,
                                                page_order, migratetype);
+                       if (!is_migrate_isolate(migratetype))
+                               __mod_zone_freepage_state(zone, -1, migratetype);
                        ret = true;
                        break;
                }
index fd12f16..7d71d10 100644 (file)
@@ -1610,8 +1610,13 @@ setup_failed:
        } else {
                /* Init failed, cleanup */
                flush_work(&hdev->tx_work);
-               flush_work(&hdev->cmd_work);
+
+               /* Since hci_rx_work() is possible to awake new cmd_work
+                * it should be flushed first to avoid unexpected call of
+                * hci_cmd_work()
+                */
                flush_work(&hdev->rx_work);
+               flush_work(&hdev->cmd_work);
 
                skb_queue_purge(&hdev->cmd_q);
                skb_queue_purge(&hdev->rx_q);
index 251b912..eed0dd0 100644 (file)
@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
                /* Detach sockets from device */
                read_lock(&hci_sk_list.lock);
                sk_for_each(sk, &hci_sk_list.head) {
-                       bh_lock_sock_nested(sk);
+                       lock_sock(sk);
                        if (hci_pi(sk)->hdev == hdev) {
                                hci_pi(sk)->hdev = NULL;
                                sk->sk_err = EPIPE;
@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
 
                                hci_dev_put(hdev);
                        }
-                       bh_unlock_sock(sk);
+                       release_sock(sk);
                }
                read_unlock(&hci_sk_list.lock);
        }
index c10e5a5..4401397 100644 (file)
@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
        caifd_put(caifd);
 }
 
-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
+int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
                     struct cflayer *link_support, int head_room,
                     struct cflayer **layer,
                     int (**rcv_func)(struct sk_buff *, struct net_device *,
@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        enum cfcnfg_phy_preference pref;
        struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        caifdevs = caif_device_list(dev_net(dev));
        caifd = caif_device_alloc(dev);
        if (!caifd)
-               return;
+               return -ENOMEM;
        *layer = &caifd->layer;
        spin_lock_init(&caifd->flow_lock);
 
@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        strlcpy(caifd->layer.name, dev->name,
                sizeof(caifd->layer.name));
        caifd->layer.transmit = transmit;
-       cfcnfg_add_phy_layer(cfg,
+       res = cfcnfg_add_phy_layer(cfg,
                                dev,
                                &caifd->layer,
                                pref,
@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        mutex_unlock(&caifdevs->lock);
        if (rcv_func)
                *rcv_func = receive;
+       return res;
 }
 EXPORT_SYMBOL(caif_enroll_dev);
 
@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        int head_room = 0;
        struct caif_device_entry_list *caifdevs;
+       int res;
 
        cfg = get_cfcnfg(dev_net(dev));
        caifdevs = caif_device_list(dev_net(dev));
@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
                                break;
                        }
                }
-               caif_enroll_dev(dev, caifdev, link_support, head_room,
+               res = caif_enroll_dev(dev, caifdev, link_support, head_room,
                                &layer, NULL);
+               if (res)
+                       cfserl_release(link_support);
                caifdev->flowctrl = dev_flowctrl;
                break;
 
index a0116b9..b02e129 100644 (file)
@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
        return (struct cflayer *) this;
 }
 
+static void cfusbl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 static struct packet_type caif_usb_type __read_mostly = {
        .type = cpu_to_be16(ETH_P_802_EX1),
 };
@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        struct cflayer *layer, *link_support;
        struct usbnet *usbnet;
        struct usb_device *usbdev;
+       int res;
 
        /* Check whether we have a NCM device, and find its VID/PID. */
        if (!(dev->dev.parent && dev->dev.parent->driver &&
@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        if (dev->num_tx_queues > 1)
                pr_warn("USB device uses more than one tx queue\n");
 
-       caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
+       res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
                        &layer, &caif_usb_type.func);
+       if (res)
+               goto err;
+
        if (!pack_added)
                dev_add_pack(&caif_usb_type);
        pack_added = true;
@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
        strlcpy(layer->name, dev->name, sizeof(layer->name));
 
        return 0;
+err:
+       cfusbl_release(link_support);
+       return res;
 }
 
 static struct notifier_block caif_device_notifier = {
index 399239a..cac30e6 100644 (file)
@@ -450,7 +450,7 @@ unlock:
        rcu_read_unlock();
 }
 
-void
+int
 cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                     struct net_device *dev, struct cflayer *phy_layer,
                     enum cfcnfg_phy_preference pref,
@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
 {
        struct cflayer *frml;
        struct cfcnfg_phyinfo *phyinfo = NULL;
-       int i;
+       int i, res = 0;
        u8 phyid;
 
        mutex_lock(&cnfg->lock);
@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
                        goto got_phyid;
        }
        pr_warn("Too many CAIF Link Layers (max 6)\n");
+       res = -EEXIST;
        goto out;
 
 got_phyid:
        phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
-       if (!phyinfo)
+       if (!phyinfo) {
+               res = -ENOMEM;
                goto out_err;
+       }
 
        phy_layer->id = phyid;
        phyinfo->pref = pref;
@@ -492,8 +495,10 @@ got_phyid:
 
        frml = cffrml_create(phyid, fcs);
 
-       if (!frml)
+       if (!frml) {
+               res = -ENOMEM;
                goto out_err;
+       }
        phyinfo->frm_layer = frml;
        layer_set_up(frml, cnfg->mux);
 
@@ -511,11 +516,12 @@ got_phyid:
        list_add_rcu(&phyinfo->node, &cnfg->phys);
 out:
        mutex_unlock(&cnfg->lock);
-       return;
+       return res;
 
 out_err:
        kfree(phyinfo);
        mutex_unlock(&cnfg->lock);
+       return res;
 }
 EXPORT_SYMBOL(cfcnfg_add_phy_layer);
 
index e11725a..40cd57a 100644 (file)
@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
                           int phyid);
 
+void cfserl_release(struct cflayer *layer)
+{
+       kfree(layer);
+}
+
 struct cflayer *cfserl_create(int instance, bool use_stx)
 {
        struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
index ddd15af..210fc3b 100644 (file)
@@ -177,7 +177,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        if (kcmlen > stackbuf_size)
                kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
        if (kcmsg == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        /* Now copy them over neatly. */
        memset(kcmsg, 0, kcmlen);
index 4eb9695..051432e 100644 (file)
@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
        case DEVLINK_PORT_FLAVOUR_CPU:
        case DEVLINK_PORT_FLAVOUR_DSA:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER,
                                attrs->phys.port_number))
                        return -EMSGSIZE;
@@ -8631,7 +8630,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
 
        switch (attrs->flavour) {
        case DEVLINK_PORT_FLAVOUR_PHYSICAL:
-       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
                if (!attrs->split)
                        n = snprintf(name, len, "p%u", attrs->phys.port_number);
                else
@@ -8679,6 +8677,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
                n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf,
                             attrs->pci_sf.sf);
                break;
+       case DEVLINK_PORT_FLAVOUR_VIRTUAL:
+               return -EOPNOTSUPP;
        }
 
        if (n >= len)
index cd80ffe..a9f9379 100644 (file)
@@ -1168,7 +1168,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
 {
        struct net *net;
        struct sk_buff *skb;
-       int err = -ENOBUFS;
+       int err = -ENOMEM;
 
        net = ops->fro_net;
        skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
index 714d5fa..3e84279 100644 (file)
@@ -4842,8 +4842,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
        if (err < 0)
                goto errout;
 
-       if (!skb->len)
+       if (!skb->len) {
+               err = -EINVAL;
                goto errout;
+       }
 
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
index 958614e..946888a 100644 (file)
@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val)
 }
 EXPORT_SYMBOL(sock_set_rcvbuf);
 
+static void __sock_set_mark(struct sock *sk, u32 val)
+{
+       if (val != sk->sk_mark) {
+               sk->sk_mark = val;
+               sk_dst_reset(sk);
+       }
+}
+
 void sock_set_mark(struct sock *sk, u32 val)
 {
        lock_sock(sk);
-       sk->sk_mark = val;
+       __sock_set_mark(sk, val);
        release_sock(sk);
 }
 EXPORT_SYMBOL(sock_set_mark);
@@ -1126,10 +1134,10 @@ set_sndbuf:
        case SO_MARK:
                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
                        ret = -EPERM;
-               } else if (val != sk->sk_mark) {
-                       sk->sk_mark = val;
-                       sk_dst_reset(sk);
+                       break;
                }
+
+               __sock_set_mark(sk, val);
                break;
 
        case SO_RXQ_OVFL:
index 008c1ec..122ad58 100644 (file)
@@ -64,7 +64,7 @@
 #define DSA_8021Q_SUBVLAN_HI_SHIFT     9
 #define DSA_8021Q_SUBVLAN_HI_MASK      GENMASK(9, 9)
 #define DSA_8021Q_SUBVLAN_LO_SHIFT     4
-#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(4, 3)
+#define DSA_8021Q_SUBVLAN_LO_MASK      GENMASK(5, 4)
 #define DSA_8021Q_SUBVLAN_HI(x)                (((x) & GENMASK(2, 2)) >> 2)
 #define DSA_8021Q_SUBVLAN_LO(x)                ((x) & GENMASK(1, 0))
 #define DSA_8021Q_SUBVLAN(x)           \
index 0c1b077..29bf976 100644 (file)
@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
            nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
            nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
                        be32_to_cpu(params.frame_counter)) ||
-           ieee802154_llsec_fill_key_id(msg, &params.out_key))
+           ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
+               rc = -ENOBUFS;
                goto out_free;
+       }
 
        dev_put(dev);
 
@@ -1184,7 +1186,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
 {
        struct ieee802154_llsec_device *dpos;
        struct ieee802154_llsec_device_key *kpos;
-       int rc = 0, idx = 0, idx2;
+       int idx = 0, idx2;
 
        list_for_each_entry(dpos, &data->table->devices, list) {
                if (idx++ < data->s_idx)
@@ -1200,7 +1202,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                                                      data->nlmsg_seq,
                                                      dpos->hwaddr, kpos,
                                                      data->dev)) {
-                               return rc = -EMSGSIZE;
+                               return -EMSGSIZE;
                        }
 
                        data->s_idx2++;
@@ -1209,7 +1211,7 @@ static int llsec_iter_devkeys(struct llsec_dump_data *data)
                data->s_idx++;
        }
 
-       return rc;
+       return 0;
 }
 
 int ieee802154_llsec_dump_devkeys(struct sk_buff *skb,
index 2cdc7e6..88215b5 100644 (file)
@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
-           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
+           nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
+               rc = -EMSGSIZE;
                goto nla_put_failure;
+       }
        dev_put(dev);
 
        wpan_phy_put(phy);
index 05f6bd8..0cf2374 100644 (file)
@@ -1298,19 +1298,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
        if (!nla || nla_parse_nested_deprecated(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, nl802154_dev_addr_policy, NULL))
                return -EINVAL;
 
-       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
-           !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
-           !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
-             attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
+       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
                return -EINVAL;
 
        addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
        addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
        switch (addr->mode) {
        case NL802154_DEV_ADDR_SHORT:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
+                       return -EINVAL;
                addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
                break;
        case NL802154_DEV_ADDR_EXTENDED:
+               if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
+                       return -EINVAL;
                addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
                break;
        default:
index bc2f6ca..816d8aa 100644 (file)
@@ -886,7 +886,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
 
 
 /*
- *  Copy BOOTP-supplied string if not already set.
+ *  Copy BOOTP-supplied string
  */
 static int __init ic_bootp_string(char *dest, char *src, int len, int max)
 {
@@ -935,12 +935,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
                }
                break;
        case 12:        /* Host name */
-               ic_bootp_string(utsname()->nodename, ext+1, *ext,
-                               __NEW_UTS_LEN);
-               ic_host_name_set = 1;
+               if (!ic_host_name_set) {
+                       ic_bootp_string(utsname()->nodename, ext+1, *ext,
+                                       __NEW_UTS_LEN);
+                       ic_host_name_set = 1;
+               }
                break;
        case 15:        /* Domain name (DNS) */
-               ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
+               if (!ic_domain[0])
+                       ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
                break;
        case 17:        /* Root path */
                if (!root_server_path[0])
index a22822b..d417e51 100644 (file)
@@ -3673,11 +3673,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
        if (nh) {
                if (rt->fib6_src.plen) {
                        NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
-                       goto out;
+                       goto out_free;
                }
                if (!nexthop_get(nh)) {
                        NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
-                       goto out;
+                       goto out_free;
                }
                rt->nh = nh;
                fib6_nh = nexthop_fib6_nh(rt->nh);
@@ -3714,6 +3714,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
 out:
        fib6_info_release(rt);
        return ERR_PTR(err);
+out_free:
+       ip_fib_metrics_put(rt->fib6_metrics);
+       kfree(rt);
+       return ERR_PTR(err);
 }
 
 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
index aa98294..f7c8110 100644 (file)
@@ -271,6 +271,9 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (ipip6_tunnel_create(dev) < 0)
                goto failed_free;
 
+       if (!parms->name[0])
+               strcpy(parms->name, dev->name);
+
        return nt;
 
 failed_free:
index 6201965..1c572c8 100644 (file)
@@ -1066,6 +1066,11 @@ out_error:
                goto partial_message;
        }
 
+       if (skb_has_frag_list(head)) {
+               kfree_skb_list(skb_shinfo(head)->frag_list);
+               skb_shinfo(head)->frag_list = NULL;
+       }
+
        if (head != kcm->seq_skb)
                kfree_skb(head);
 
index 2bc1995..5edc686 100644 (file)
@@ -947,6 +947,10 @@ static void __mptcp_update_wmem(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
        if (!msk->wmem_reserved)
                return;
 
@@ -1085,10 +1089,20 @@ out:
 
 static void __mptcp_clean_una_wakeup(struct sock *sk)
 {
+#ifdef CONFIG_LOCKDEP
+       WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
        __mptcp_clean_una(sk);
        mptcp_write_space(sk);
 }
 
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+       mptcp_data_lock(sk);
+       __mptcp_clean_una_wakeup(sk);
+       mptcp_data_unlock(sk);
+}
+
 static void mptcp_enter_memory_pressure(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow;
@@ -2299,7 +2313,7 @@ static void __mptcp_retrans(struct sock *sk)
        struct sock *ssk;
        int ret;
 
-       __mptcp_clean_una_wakeup(sk);
+       mptcp_clean_una_wakeup(sk);
        dfrag = mptcp_rtx_head(sk);
        if (!dfrag) {
                if (mptcp_data_fin_enabled(msk)) {
index bde6be7..ef3d037 100644 (file)
@@ -630,21 +630,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
        if (subflow_req->mp_capable) {
-               if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
-                       /* here we can receive and accept an in-window,
-                        * out-of-order pkt, which will not carry the MP_CAPABLE
-                        * opt even on mptcp enabled paths
-                        */
-                       goto create_msk;
-               }
-
+               /* we can receive and accept an in-window, out-of-order pkt,
+                * which may not carry the MP_CAPABLE opt even on mptcp enabled
+                * paths: always try to extract the peer key, and fallback
+                * for packets missing it.
+                * Even OoO DSS packets coming legitly after dropped or
+                * reordered MPC will cause fallback, but we don't have other
+                * options.
+                */
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
                }
 
-create_msk:
                new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
                if (!new_msk)
                        fallback = true;
@@ -1012,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
 
                status = get_mapping_status(ssk, msk);
                trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
-               if (status == MAPPING_INVALID) {
-                       ssk->sk_err = EBADMSG;
-                       goto fatal;
-               }
-               if (status == MAPPING_DUMMY) {
-                       __mptcp_do_fallback(msk);
-                       skb = skb_peek(&ssk->sk_receive_queue);
-                       subflow->map_valid = 1;
-                       subflow->map_seq = READ_ONCE(msk->ack_seq);
-                       subflow->map_data_len = skb->len;
-                       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
-                                                  subflow->ssn_offset;
-                       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
-                       return true;
-               }
+               if (unlikely(status == MAPPING_INVALID))
+                       goto fallback;
+
+               if (unlikely(status == MAPPING_DUMMY))
+                       goto fallback;
 
                if (status != MAPPING_OK)
                        goto no_data;
@@ -1039,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
                 * MP_CAPABLE-based mapping
                 */
                if (unlikely(!READ_ONCE(msk->can_ack))) {
-                       if (!subflow->mpc_map) {
-                               ssk->sk_err = EBADMSG;
-                               goto fatal;
-                       }
+                       if (!subflow->mpc_map)
+                               goto fallback;
                        WRITE_ONCE(msk->remote_key, subflow->remote_key);
                        WRITE_ONCE(msk->ack_seq, subflow->map_seq);
                        WRITE_ONCE(msk->can_ack, true);
@@ -1070,17 +1057,31 @@ static bool subflow_check_data_avail(struct sock *ssk)
 no_data:
        subflow_sched_work_if_closed(msk, ssk);
        return false;
-fatal:
-       /* fatal protocol error, close the socket */
-       /* This barrier is coupled with smp_rmb() in tcp_poll() */
-       smp_wmb();
-       ssk->sk_error_report(ssk);
-       tcp_set_state(ssk, TCP_CLOSE);
-       subflow->reset_transient = 0;
-       subflow->reset_reason = MPTCP_RST_EMPTCP;
-       tcp_send_active_reset(ssk, GFP_ATOMIC);
-       subflow->data_avail = 0;
-       return false;
+
+fallback:
+       /* RFC 8684 section 3.7. */
+       if (subflow->mp_join || subflow->fully_established) {
+               /* fatal protocol error, close the socket.
+                * subflow_error_report() will introduce the appropriate barriers
+                */
+               ssk->sk_err = EBADMSG;
+               ssk->sk_error_report(ssk);
+               tcp_set_state(ssk, TCP_CLOSE);
+               subflow->reset_transient = 0;
+               subflow->reset_reason = MPTCP_RST_EMPTCP;
+               tcp_send_active_reset(ssk, GFP_ATOMIC);
+               subflow->data_avail = 0;
+               return false;
+       }
+
+       __mptcp_do_fallback(msk);
+       skb = skb_peek(&ssk->sk_receive_queue);
+       subflow->map_valid = 1;
+       subflow->map_seq = READ_ONCE(msk->ack_seq);
+       subflow->map_data_len = skb->len;
+       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+       subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+       return true;
 }
 
 bool mptcp_subflow_data_available(struct sock *sk)
index d45dbcb..c250970 100644 (file)
@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
        svc->port = u->port;
        svc->fwmark = u->fwmark;
-       svc->flags = u->flags;
+       svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
        svc->timeout = u->timeout * HZ;
        svc->netmask = u->netmask;
        svc->ipvs = ipvs;
index 89e5bac..dc9ca12 100644 (file)
@@ -664,7 +664,7 @@ int nf_conntrack_proto_init(void)
 
 #if IS_ENABLED(CONFIG_IPV6)
 cleanup_sockopt:
-       nf_unregister_sockopt(&so_getorigdst6);
+       nf_unregister_sockopt(&so_getorigdst);
 #endif
        return ret;
 }
index d63d2d8..72bc759 100644 (file)
@@ -736,7 +736,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
-           nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
+           nla_put_be32(skb, NFTA_TABLE_FLAGS,
+                        htonl(table->flags & NFT_TABLE_F_MASK)) ||
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) ||
            nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle),
                         NFTA_TABLE_PAD))
@@ -947,20 +948,22 @@ err_register_hooks:
 
 static void nf_tables_table_disable(struct net *net, struct nft_table *table)
 {
+       table->flags &= ~NFT_TABLE_F_DORMANT;
        nft_table_disable(net, table, 0);
+       table->flags |= NFT_TABLE_F_DORMANT;
 }
 
-enum {
-       NFT_TABLE_STATE_UNCHANGED       = 0,
-       NFT_TABLE_STATE_DORMANT,
-       NFT_TABLE_STATE_WAKEUP
-};
+#define __NFT_TABLE_F_INTERNAL         (NFT_TABLE_F_MASK + 1)
+#define __NFT_TABLE_F_WAS_DORMANT      (__NFT_TABLE_F_INTERNAL << 0)
+#define __NFT_TABLE_F_WAS_AWAKEN       (__NFT_TABLE_F_INTERNAL << 1)
+#define __NFT_TABLE_F_UPDATE           (__NFT_TABLE_F_WAS_DORMANT | \
+                                        __NFT_TABLE_F_WAS_AWAKEN)
 
 static int nf_tables_updtable(struct nft_ctx *ctx)
 {
        struct nft_trans *trans;
        u32 flags;
-       int ret = 0;
+       int ret;
 
        if (!ctx->nla[NFTA_TABLE_FLAGS])
                return 0;
@@ -985,21 +988,27 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
 
        if ((flags & NFT_TABLE_F_DORMANT) &&
            !(ctx->table->flags & NFT_TABLE_F_DORMANT)) {
-               nft_trans_table_state(trans) = NFT_TABLE_STATE_DORMANT;
+               ctx->table->flags |= NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE))
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_AWAKEN;
        } else if (!(flags & NFT_TABLE_F_DORMANT) &&
                   ctx->table->flags & NFT_TABLE_F_DORMANT) {
-               ret = nf_tables_table_enable(ctx->net, ctx->table);
-               if (ret >= 0)
-                       nft_trans_table_state(trans) = NFT_TABLE_STATE_WAKEUP;
+               ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+               if (!(ctx->table->flags & __NFT_TABLE_F_UPDATE)) {
+                       ret = nf_tables_table_enable(ctx->net, ctx->table);
+                       if (ret < 0)
+                               goto err_register_hooks;
+
+                       ctx->table->flags |= __NFT_TABLE_F_WAS_DORMANT;
+               }
        }
-       if (ret < 0)
-               goto err;
 
-       nft_trans_table_flags(trans) = flags;
        nft_trans_table_update(trans) = true;
        nft_trans_commit_list_add_tail(ctx->net, trans);
+
        return 0;
-err:
+
+err_register_hooks:
        nft_trans_destroy(trans);
        return ret;
 }
@@ -1905,7 +1914,7 @@ static int nft_chain_parse_netdev(struct net *net,
 static int nft_chain_parse_hook(struct net *net,
                                const struct nlattr * const nla[],
                                struct nft_chain_hook *hook, u8 family,
-                               bool autoload)
+                               struct netlink_ext_ack *extack, bool autoload)
 {
        struct nftables_pernet *nft_net = nft_pernet(net);
        struct nlattr *ha[NFTA_HOOK_MAX + 1];
@@ -1935,8 +1944,10 @@ static int nft_chain_parse_hook(struct net *net,
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
-               if (IS_ERR(type))
+               if (IS_ERR(type)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                        return PTR_ERR(type);
+               }
        }
        if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
                return -EOPNOTSUPP;
@@ -1945,8 +1956,11 @@ static int nft_chain_parse_hook(struct net *net,
            hook->priority <= NF_IP_PRI_CONNTRACK)
                return -EOPNOTSUPP;
 
-       if (!try_module_get(type->owner))
+       if (!try_module_get(type->owner)) {
+               if (nla[NFTA_CHAIN_TYPE])
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]);
                return -ENOENT;
+       }
 
        hook->type = type;
 
@@ -2057,7 +2071,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
 static u64 chain_id;
 
 static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
-                             u8 policy, u32 flags)
+                             u8 policy, u32 flags,
+                             struct netlink_ext_ack *extack)
 {
        const struct nlattr * const *nla = ctx->nla;
        struct nft_table *table = ctx->table;
@@ -2079,7 +2094,8 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                if (flags & NFT_CHAIN_BINDING)
                        return -EOPNOTSUPP;
 
-               err = nft_chain_parse_hook(net, nla, &hook, family, true);
+               err = nft_chain_parse_hook(net, nla, &hook, family, extack,
+                                          true);
                if (err < 0)
                        return err;
 
@@ -2234,7 +2250,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
                        return -EEXIST;
                }
                err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
-                                          false);
+                                          extack, false);
                if (err < 0)
                        return err;
 
@@ -2447,7 +2463,7 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info,
                                          extack);
        }
 
-       return nf_tables_addchain(&ctx, family, genmask, policy, flags);
+       return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack);
 }
 
 static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info,
@@ -3328,8 +3344,10 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
                        if (n == NFT_RULE_MAXEXPRS)
                                goto err1;
                        err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
-                       if (err < 0)
+                       if (err < 0) {
+                               NL_SET_BAD_ATTR(extack, tmp);
                                goto err1;
+                       }
                        size += expr_info[n].ops->size;
                        n++;
                }
@@ -8547,10 +8565,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_DORMANT)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT)
                                        nf_tables_table_disable(net, trans->ctx.table);
 
-                               trans->ctx.table->flags = nft_trans_table_flags(trans);
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                        } else {
                                nft_clear(net, trans->ctx.table);
                        }
@@ -8768,9 +8790,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                switch (trans->msg_type) {
                case NFT_MSG_NEWTABLE:
                        if (nft_trans_table_update(trans)) {
-                               if (nft_trans_table_state(trans) == NFT_TABLE_STATE_WAKEUP)
+                               if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
+                               if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) {
                                        nf_tables_table_disable(net, trans->ctx.table);
-
+                                       trans->ctx.table->flags |= NFT_TABLE_F_DORMANT;
+                               } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) {
+                                       trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT;
+                               }
+                               trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE;
                                nft_trans_destroy(trans);
                        } else {
                                list_del_rcu(&trans->ctx.table->list);
index 322ac5d..752b10c 100644 (file)
@@ -380,10 +380,14 @@ static int
 nfnl_cthelper_update(const struct nlattr * const tb[],
                     struct nf_conntrack_helper *helper)
 {
+       u32 size;
        int ret;
 
-       if (tb[NFCTH_PRIV_DATA_LEN])
-               return -EBUSY;
+       if (tb[NFCTH_PRIV_DATA_LEN]) {
+               size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+               if (size != helper->data_len)
+                       return -EBUSY;
+       }
 
        if (tb[NFCTH_POLICY]) {
                ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
index 0592a94..337e22d 100644 (file)
@@ -1217,7 +1217,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj,
        struct nf_conn *ct;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
-       if (!ct || ctinfo == IP_CT_UNTRACKED) {
+       if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) {
                regs->verdict.code = NFT_BREAK;
                return;
        }
index 53dbe73..6cfd30f 100644 (file)
@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (!llcp_sock->service_name) {
                nfc_llcp_local_put(llcp_sock->local);
                llcp_sock->local = NULL;
+               llcp_sock->dev = NULL;
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                llcp_sock->local = NULL;
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
+               llcp_sock->dev = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
index ec7a1c4..18edd9a 100644 (file)
@@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
         */
        cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
        if (!cached) {
-               if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
+               if (tcf_ct_flow_table_lookup(p, skb, family)) {
                        skip_add = true;
                        goto do_nat;
                }
@@ -1022,10 +1022,11 @@ do_nat:
                 * even if the connection is already confirmed.
                 */
                nf_conntrack_confirm(skb);
-       } else if (!skip_add) {
-               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
        }
 
+       if (!skip_add)
+               tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
+
 out_push:
        skb_push_rcsum(skb, nh_ofs);
 
@@ -1202,9 +1203,6 @@ static int tcf_ct_fill_params(struct net *net,
                                   sizeof(p->zone));
        }
 
-       if (p->zone == NF_CT_DEFAULT_ZONE_ID)
-               return 0;
-
        nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
        tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
        if (!tmpl) {
index 081c11d..8827987 100644 (file)
@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch,
        struct Qdisc *old_q;
 
        /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
-       qdisc_refcount_inc(new_q);
+       if (new_q)
+               qdisc_refcount_inc(new_q);
        old_q = htb_graft_helper(dev_queue, new_q);
        WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
 }
@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q) {
+                       if (new_q)
                                htb_set_lockdep_class_child(new_q);
-                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
-                       }
+                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
                }
        }
 
index 76a6f8c..bd9f156 100644 (file)
@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work);
 static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task);
 static LIST_HEAD(tls_device_gc_list);
 static LIST_HEAD(tls_device_list);
+static LIST_HEAD(tls_device_down_list);
 static DEFINE_SPINLOCK(tls_device_lock);
 
 static void tls_device_free_ctx(struct tls_context *ctx)
@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
        struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
        struct net_device *netdev;
 
-       if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
-               return;
-
        trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type);
+       rcu_read_lock();
        netdev = READ_ONCE(tls_ctx->netdev);
        if (netdev)
                netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
                                                   TLS_OFFLOAD_CTX_DIR_RX);
-       clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+       rcu_read_unlock();
        TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 }
 
@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 
        if (tls_ctx->rx_conf != TLS_HW)
                return;
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags)))
+               return;
 
        prot = &tls_ctx->prot_info;
        rx_ctx = tls_offload_ctx_rx(tls_ctx);
@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
 
        ctx->sw.decrypted |= is_decrypted;
 
+       if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) {
+               if (likely(is_encrypted || is_decrypted))
+                       return 0;
+
+               /* After tls_device_down disables the offload, the next SKB will
+                * likely have initial fragments decrypted, and final ones not
+                * decrypted. We need to reencrypt that single SKB.
+                */
+               return tls_device_reencrypt(sk, skb);
+       }
+
        /* Return immediately if the record is either entirely plaintext or
         * entirely ciphertext. Otherwise handle reencrypt partially decrypted
         * record.
@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev)
        spin_unlock_irqrestore(&tls_device_lock, flags);
 
        list_for_each_entry_safe(ctx, tmp, &list, list) {
+               /* Stop offloaded TX and switch to the fallback.
+                * tls_is_sk_tx_device_offloaded will return false.
+                */
+               WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw);
+
+               /* Stop the RX and TX resync.
+                * tls_dev_resync must not be called after tls_dev_del.
+                */
+               WRITE_ONCE(ctx->netdev, NULL);
+
+               /* Start skipping the RX resync logic completely. */
+               set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags);
+
+               /* Sync with inflight packets. After this point:
+                * TX: no non-encrypted packets will be passed to the driver.
+                * RX: resync requests from the driver will be ignored.
+                */
+               synchronize_net();
+
+               /* Release the offload context on the driver side. */
                if (ctx->tx_conf == TLS_HW)
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_TX);
@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev)
                    !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_RX);
-               WRITE_ONCE(ctx->netdev, NULL);
-               smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
-               while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
-                       usleep_range(10, 200);
+
                dev_put(netdev);
-               list_del_init(&ctx->list);
 
-               if (refcount_dec_and_test(&ctx->refcount))
-                       tls_device_free_ctx(ctx);
+               /* Move the context to a separate list for two reasons:
+                * 1. When the context is deallocated, list_del is called.
+                * 2. It's no longer an offloaded context, so we don't want to
+                *    run offload-specific code on this context.
+                */
+               spin_lock_irqsave(&tls_device_lock, flags);
+               list_move_tail(&ctx->list, &tls_device_down_list);
+               spin_unlock_irqrestore(&tls_device_lock, flags);
+
+               /* Device contexts for RX and TX will be freed in on sk_destruct
+                * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW.
+                */
        }
 
        up_write(&device_offload_lock);
index cacf040..e40bedd 100644 (file)
@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb);
 
+struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk,
+                                        struct net_device *dev,
+                                        struct sk_buff *skb)
+{
+       return tls_sw_fallback(sk, skb);
+}
+
 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb)
 {
        return tls_sw_fallback(skb->sk, skb);
index 47b7c53..fde56ff 100644 (file)
@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk)
        mutex_init(&ctx->tx_lock);
        rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
        ctx->sk_proto = READ_ONCE(sk->sk_prot);
+       ctx->sk = sk;
        return ctx;
 }
 
index 44d6566..1816899 100644 (file)
@@ -536,7 +536,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
        if (protocol)
                goto out;
 
-       rc = -ENOBUFS;
+       rc = -ENOMEM;
        if ((sk = x25_alloc_socket(net, kern)) == NULL)
                goto out;
 
index 21dbf63..9ec93d9 100644 (file)
@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev,
        if (format != DRM_FORMAT_XRGB8888) {
                pci_err(pdev, "format mismatch (0x%x != 0x%x)\n",
                        format, DRM_FORMAT_XRGB8888);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (width < 100  || width > 10000) {
                pci_err(pdev, "width (%d) out of range\n", width);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        if (height < 100 || height > 10000) {
                pci_err(pdev, "height (%d) out of range\n", height);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_release_regions;
        }
        pci_info(pdev, "mdpy found: %dx%d framebuffer\n",
                 width, height);
 
        info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev);
-       if (!info)
+       if (!info) {
+               ret = -ENOMEM;
                goto err_release_regions;
+       }
        pci_set_drvdata(pdev, info);
        par = info->par;
 
index dd87cea..a7883e4 100644 (file)
@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M]  $@
 quiet_cmd_btf_ko = BTF [M] $@
       cmd_btf_ko =                                                     \
        if [ -f vmlinux ]; then                                         \
-               LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \
+               LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
        else                                                            \
                printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
        fi;
index f4de4c9..0e0f646 100755 (executable)
@@ -240,7 +240,7 @@ gen_btf()
        fi
 
        info "BTF" ${2}
-       LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${extra_paholeopt} ${1}
+       LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${extra_paholeopt} ${1}
 
        # Create ${2} which contains just .BTF section but no symbols. Add
        # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all
index 25f57c1..a90e31d 100644 (file)
@@ -17,6 +17,9 @@ MODULE_LICENSE("GPL");
 #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \
                        >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1)
 
+#define to_led_card_dev(_dev) \
+       container_of(_dev, struct snd_ctl_led_card, dev)
+
 enum snd_ctl_led_mode {
         MODE_FOLLOW_MUTE = 0,
         MODE_FOLLOW_ROUTE,
@@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card)
        snd_ctl_led_refresh();
 }
 
+static void snd_ctl_led_card_release(struct device *dev)
+{
+       struct snd_ctl_led_card *led_card = to_led_card_dev(dev);
+
+       kfree(led_card);
+}
+
+static void snd_ctl_led_release(struct device *dev)
+{
+}
+
+static void snd_ctl_led_dev_release(struct device *dev)
+{
+}
+
 /*
  * sysfs
  */
@@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card)
                led_card->number = card->number;
                led_card->led = led;
                device_initialize(&led_card->dev);
+               led_card->dev.release = snd_ctl_led_card_release;
                if (dev_set_name(&led_card->dev, "card%d", card->number) < 0)
                        goto cerr;
                led_card->dev.parent = &led->dev;
@@ -681,7 +700,6 @@ cerr:
                put_device(&led_card->dev);
 cerr2:
                printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number);
-               kfree(led_card);
        }
 }
 
@@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card)
                snprintf(link_name, sizeof(link_name), "led-%s", led->name);
                sysfs_remove_link(&card->ctl_dev.kobj, link_name);
                sysfs_remove_link(&led_card->dev.kobj, "card");
-               device_del(&led_card->dev);
-               kfree(led_card);
+               device_unregister(&led_card->dev);
                led->cards[card->number] = NULL;
        }
 }
@@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void)
 
        device_initialize(&snd_ctl_led_dev);
        snd_ctl_led_dev.class = sound_class;
+       snd_ctl_led_dev.release = snd_ctl_led_dev_release;
        dev_set_name(&snd_ctl_led_dev, "ctl-led");
        if (device_add(&snd_ctl_led_dev)) {
                put_device(&snd_ctl_led_dev);
@@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void)
                INIT_LIST_HEAD(&led->controls);
                device_initialize(&led->dev);
                led->dev.parent = &snd_ctl_led_dev;
+               led->dev.release = snd_ctl_led_release;
                led->dev.groups = snd_ctl_led_dev_attr_groups;
                dev_set_name(&led->dev, led->name);
                if (device_add(&led->dev)) {
                        put_device(&led->dev);
                        for (; group > 0; group--) {
                                led = &snd_ctl_leds[group - 1];
-                               device_del(&led->dev);
+                               device_unregister(&led->dev);
                        }
-                       device_del(&snd_ctl_led_dev);
+                       device_unregister(&snd_ctl_led_dev);
                        return -ENOMEM;
                }
        }
@@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void)
        }
        for (group = 0; group < MAX_LED; group++) {
                led = &snd_ctl_leds[group];
-               device_del(&led->dev);
+               device_unregister(&led->dev);
        }
-       device_del(&snd_ctl_led_dev);
+       device_unregister(&snd_ctl_led_dev);
        snd_ctl_led_clean(NULL);
 }
 
index 6898b1a..92b7008 100644 (file)
@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
                return;
        if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
                return;
+       event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ts, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event, &tstamp, resolution);
 }
 
 /* start/continue a master timer */
index ab5ff78..d8be146 100644 (file)
@@ -331,6 +331,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
 #endif
 
 };
index a31009a..5462f77 100644 (file)
@@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM_SLEEP
 static int hda_codec_pm_prepare(struct device *dev)
 {
+       dev->power.power_state = PMSG_SUSPEND;
        return pm_runtime_suspended(dev);
 }
 
@@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
+       /* If no other pm-functions are called between prepare() and complete() */
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND)
+               dev->power.power_state = PMSG_RESUME;
+
        if (pm_runtime_suspended(dev) && (codec->jackpoll_interval ||
            hda_codec_need_resume(codec) || codec->forced_resume))
                pm_request_resume(dev);
index b638fc2..1f8018f 100644 (file)
@@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol,
 static const struct snd_kcontrol_new cap_sw_temp = {
        .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
        .name = "Capture Switch",
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
        .info = cap_sw_info,
        .get = cap_sw_get,
        .put = cap_sw_put,
index 79ade33..470753b 100644 (file)
@@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-P */
        { PCI_DEVICE(0x8086, 0x51c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-M */
+       { PCI_DEVICE(0x8086, 0x51cc),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 726507d..8629e84 100644 (file)
@@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec,
                break;
        case HDA_FIXUP_ACT_PROBE:
 
-               /* Set initial volume on Bullseye to -26 dB */
-               if (codec->fixup_id == CS8409_BULLSEYE)
-                       snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
-                                       HDA_INPUT, 0, 0xff, 0x19);
+               /* Set initial DMIC volume to -26 dB */
+               snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID,
+                               HDA_INPUT, 0, 0xff, 0x19);
                snd_hda_gen_add_kctl(&spec->gen,
                        NULL, &cs8409_cs42l42_hp_volume_mixer);
                snd_hda_gen_add_kctl(&spec->gen,
index 61a60c4..43e3714 100644 (file)
@@ -8303,6 +8303,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..d0f4ecd
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+       PERF_REG_MIPS_PC,
+       PERF_REG_MIPS_R1,
+       PERF_REG_MIPS_R2,
+       PERF_REG_MIPS_R3,
+       PERF_REG_MIPS_R4,
+       PERF_REG_MIPS_R5,
+       PERF_REG_MIPS_R6,
+       PERF_REG_MIPS_R7,
+       PERF_REG_MIPS_R8,
+       PERF_REG_MIPS_R9,
+       PERF_REG_MIPS_R10,
+       PERF_REG_MIPS_R11,
+       PERF_REG_MIPS_R12,
+       PERF_REG_MIPS_R13,
+       PERF_REG_MIPS_R14,
+       PERF_REG_MIPS_R15,
+       PERF_REG_MIPS_R16,
+       PERF_REG_MIPS_R17,
+       PERF_REG_MIPS_R18,
+       PERF_REG_MIPS_R19,
+       PERF_REG_MIPS_R20,
+       PERF_REG_MIPS_R21,
+       PERF_REG_MIPS_R22,
+       PERF_REG_MIPS_R23,
+       PERF_REG_MIPS_R24,
+       PERF_REG_MIPS_R25,
+       PERF_REG_MIPS_R26,
+       PERF_REG_MIPS_R27,
+       PERF_REG_MIPS_R28,
+       PERF_REG_MIPS_R29,
+       PERF_REG_MIPS_R30,
+       PERF_REG_MIPS_R31,
+       PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
index 406a951..73df23d 100644 (file)
@@ -90,7 +90,6 @@ endif
 ifeq ($(ARCH),mips)
   NO_PERF_REGS := 0
   CFLAGS += -I$(OUTPUT)arch/mips/include/generated
-  CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi
   LIBUNWIND_LIBS = -lunwind -lunwind-mips
 endif
 
index 3337b5f..84803ab 100644 (file)
@@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv)
                rec->no_buildid = true;
        }
 
+       if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+               pr_err("Kernel has no cgroup sampling support.\n");
+               err = -EINVAL;
+               goto out_opts;
+       }
+
        if (rec->opts.kcore)
                rec->data.is_dir = true;
 
index dd8ff28..c783558 100755 (executable)
@@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt
 arch/x86/tools/gen-insn-attr-x86.awk
 arch/arm/include/uapi/asm/perf_regs.h
 arch/arm64/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
 arch/powerpc/include/uapi/asm/perf_regs.h
 arch/s390/include/uapi/asm/perf_regs.h
 arch/x86/include/uapi/asm/perf_regs.h
index 4a7b8de..8c10955 100644 (file)
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=1
 comm=1
index 974f10e..5ed674a 100644 (file)
@@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target)
 
        evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
        if (evsel->bperf_leader_link_fd < 0 &&
-           bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+           bperf_reload_leader_program(evsel, attr_map_fd, &entry)) {
+               err = -1;
                goto out;
-
+       }
        /*
         * The bpf_link holds reference to the leader program, and the
         * leader program holds reference to the maps. Therefore, if
@@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
        /* Step 2: load the follower skeleton */
        evsel->follower_skel = bperf_follower_bpf__open();
        if (!evsel->follower_skel) {
+               err = -1;
                pr_err("Failed to open follower skeleton\n");
                goto out;
        }
index b2f4920..7d2ba84 100644 (file)
@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data)
        if ((tag == DW_TAG_formal_parameter ||
             tag == DW_TAG_variable) &&
            die_compare_name(die_mem, fvp->name) &&
-       /* Does the DIE have location information or external instance? */
+       /*
+        * Does the DIE have location information or const value
+        * or external instance?
+        */
            (dwarf_attr(die_mem, DW_AT_external, &attr) ||
-            dwarf_attr(die_mem, DW_AT_location, &attr)))
+            dwarf_attr(die_mem, DW_AT_location, &attr) ||
+            dwarf_attr(die_mem, DW_AT_const_value, &attr)))
                return DIE_FIND_CB_END;
        if (dwarf_haspc(die_mem, fvp->addr))
                return DIE_FIND_CB_CONTINUE;
index 9130f6f..bc5e4f2 100644 (file)
@@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env)
                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
                next = rb_next(&node->rb_node);
                rb_erase(&node->rb_node, root);
+               free(node->info_linear);
                free(node);
        }
 
index 4a3cd1b..a8d8463 100644 (file)
@@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig)
        evsel->auto_merge_stats = orig->auto_merge_stats;
        evsel->collect_stat = orig->collect_stat;
        evsel->weak_group = orig->weak_group;
+       evsel->use_config_name = orig->use_config_name;
 
        if (evsel__copy_config_terms(evsel, orig) < 0)
                goto out_err;
index 75cf5db..bdad52a 100644 (file)
@@ -83,8 +83,10 @@ struct evsel {
                bool                    collect_stat;
                bool                    weak_group;
                bool                    bpf_counter;
+               bool                    use_config_name;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
+               struct list_head        config_terms;
        };
 
        /*
@@ -116,10 +118,8 @@ struct evsel {
        bool                    merged_stat;
        bool                    reset_group;
        bool                    errored;
-       bool                    use_config_name;
        struct hashmap          *per_pkg_mask;
        struct evsel            *leader;
-       struct list_head        config_terms;
        int                     err;
        int                     cpu_iter;
        struct {
index 829af17..0204116 100644 (file)
@@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel)
        evsel->core.attr.build_id = 1;
 }
 
+static void perf_probe_cgroup(struct evsel *evsel)
+{
+       evsel->core.attr.cgroup = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
@@ -182,3 +187,8 @@ bool perf_can_record_build_id(void)
 {
        return perf_probe_api(perf_probe_build_id);
 }
+
+bool perf_can_record_cgroup(void)
+{
+       return perf_probe_api(perf_probe_cgroup);
+}
index f12ca55..b104168 100644 (file)
@@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void);
 bool perf_can_record_text_poke_events(void);
 bool perf_can_sample_identifier(void);
 bool perf_can_record_build_id(void);
+bool perf_can_record_cgroup(void);
 
 #endif // __PERF_API_PROBE_H
index 866f2d5..b029c29 100644 (file)
@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
            immediate_value_is_supported()) {
                Dwarf_Sword snum;
 
+               if (!tvar)
+                       return 0;
+
                dwarf_formsdata(&attr, &snum);
                ret = asprintf(&tvar->value, "\\%ld", (long)snum);
 
index a76fff5..ca326f9 100644 (file)
@@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter)
        char *config;
        int ret = 0;
 
-       if (counter->uniquified_name ||
+       if (counter->uniquified_name || counter->use_config_name ||
            !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
                                           strlen(counter->pmu_name)))
                return;
@@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter)
                }
        } else {
                if (perf_pmu__has_hybrid()) {
-                       if (!counter->use_config_name) {
-                               ret = asprintf(&new_name, "%s/%s/",
-                                              counter->pmu_name, counter->name);
-                       }
+                       ret = asprintf(&new_name, "%s/%s/",
+                                      counter->pmu_name, counter->name);
                } else {
                        ret = asprintf(&new_name, "%s [%s]",
                                       counter->name, counter->pmu_name);
index 4c56aa8..a733457 100644 (file)
@@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes)
 
        list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
                list_del_init(&pos->note_list);
+               zfree(&pos->args);
                zfree(&pos->name);
                zfree(&pos->provider);
                free(pos);
index 3c4cb72..9ca5f1b 100755 (executable)
@@ -501,6 +501,7 @@ do_transfer()
        local stat_ackrx_now_l=$(get_mib_counter "${listener_ns}" "MPTcpExtMPCapableACKRX")
        local stat_cookietx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesSent")
        local stat_cookierx_now=$(get_mib_counter "${listener_ns}" "TcpExtSyncookiesRecv")
+       local stat_ooo_now=$(get_mib_counter "${listener_ns}" "TcpExtTCPOFOQueue")
 
        expect_synrx=$((stat_synrx_last_l))
        expect_ackrx=$((stat_ackrx_last_l))
@@ -518,10 +519,14 @@ do_transfer()
                        "${stat_synrx_now_l}" "${expect_synrx}" 1>&2
                retc=1
        fi
-       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ]; then
-               printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
-                       "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
-               rets=1
+       if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then
+               if [ ${stat_ooo_now} -eq 0 ]; then
+                       printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \
+                               "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2
+                       rets=1
+               else
+                       printf "[ Note ] fallback due to TCP OoO"
+               fi
        fi
 
        if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then
index bed4b53..8f3e72e 100644 (file)
@@ -10,6 +10,7 @@
 /proc-self-map-files-002
 /proc-self-syscall
 /proc-self-wchan
+/proc-subset-pid
 /proc-uptime-001
 /proc-uptime-002
 /read
index 7ed7cd9..ebc4ee0 100755 (executable)
@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0
 ip1 -4 route add default dev wg0 table 51820
 ip1 -4 rule add not fwmark 51820 table 51820
 ip1 -4 rule add table main suppress_prefixlength 0
+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter'
 # Flood the pings instead of sending just one, to trigger routing table reference counting bugs.
 n1 ping -W 1 -c 100 -f 192.168.99.7
 n1 ping -W 1 -c 100 -f abab::1111
index 4eecb43..74db83a 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y
 CONFIG_NETFILTER_XT_NAT=y
 CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 CONFIG_NETFILTER_XT_MARK=y
-CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_NAT_IPV4=y
 CONFIG_IP_NF_IPTABLES=y
 CONFIG_IP_NF_FILTER=y