Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorDavid S. Miller <davem@davemloft.net>
Sun, 26 Jan 2020 09:40:21 +0000 (10:40 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sun, 26 Jan 2020 09:40:21 +0000 (10:40 +0100)
Minor conflict in mlx5 because changes happened to code that has
moved meanwhile.

Signed-off-by: David S. Miller <davem@davemloft.net>
191 files changed:
Documentation/core-api/xarray.rst
Documentation/devicetree/bindings/net/fsl-fman.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am335x-boneblack-common.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/kernel/hyp-stub.S
arch/powerpc/Kconfig
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1-best-effort.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-0-1g-5.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-10g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-0.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-1.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-2.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-3.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-4.dtsi
arch/powerpc/boot/dts/fsl/qoriq-fman3-1-1g-5.dtsi
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/xive-regs.h
arch/powerpc/sysdev/xive/common.c
drivers/atm/firestream.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/i915/gem/i915_gem_busy.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_gem.h
drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_job.h
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_mmu.h
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/hwmon/adt7475.c
drivers/hwmon/hwmon.c
drivers/hwmon/nct7802.c
drivers/input/evdev.c
drivers/input/misc/keyspan_remote.c
drivers/input/misc/max77650-onkey.c
drivers/input/misc/pm8xxx-vibrator.c
drivers/input/rmi4/rmi_f54.c
drivers/input/rmi4/rmi_smbus.c
drivers/input/tablet/aiptek.c
drivers/input/tablet/gtco.c
drivers/input/tablet/pegasus_notetaker.c
drivers/input/touchscreen/sun4i-ts.c
drivers/input/touchscreen/sur40.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/leds/leds-as3645a.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lm3532.c
drivers/leds/leds-max77650.c
drivers/leds/leds-rb532.c
drivers/leds/trigger/ledtrig-pattern.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci_am654.c
drivers/net/can/slcan.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/natsemi/sonic.c
drivers/net/ethernet/natsemi/sonic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/gtp.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/iwlwifi/dvm/tx.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/mediatek/mt76/airtime.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/pci/quirks.c
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/tee/optee/Kconfig
fs/btrfs/dev-replace.c
fs/btrfs/scrub.c
fs/ceph/mds_client.c
fs/io_uring.c
fs/readdir.c
fs/reiserfs/xattr.c
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/nfnetlink.h
include/linux/xarray.h
include/net/netns/nftables.h
include/trace/events/xen.h
include/uapi/linux/io_uring.h
kernel/power/snapshot.c
kernel/trace/trace.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/trace/trace_uprobe.c
lib/strncpy_from_user.c
lib/strnlen_user.c
lib/test_xarray.c
lib/xarray.c
net/atm/proc.c
net/caif/caif_usb.c
net/core/dev.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/utils.c
net/ipv4/esp4_offload.c
net/ipv4/fou.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/seg6_local.c
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_osf.c
net/rose/af_rose.c
net/sched/cls_api.c
net/sched/ematch.c
net/xfrm/xfrm_interface.c
scripts/recordmcount.c

index fcedc53..640934b 100644 (file)
@@ -25,10 +25,6 @@ good performance with large indices.  If your index can be larger than
 ``ULONG_MAX`` then the XArray is not the data type for you.  The most
 important user of the XArray is the page cache.
 
-Each non-``NULL`` entry in the array has three bits associated with
-it called marks.  Each mark may be set or cleared independently of
-the others.  You can iterate over entries which are marked.
-
 Normal pointers may be stored in the XArray directly.  They must be 4-byte
 aligned, which is true for any pointer returned from kmalloc() and
 alloc_page().  It isn't true for arbitrary user-space pointers,
@@ -41,12 +37,11 @@ When you retrieve an entry from the XArray, you can check whether it is
 a value entry by calling xa_is_value(), and convert it back to
 an integer by calling xa_to_value().
 
-Some users want to store tagged pointers instead of using the marks
-described above.  They can call xa_tag_pointer() to create an
-entry with a tag, xa_untag_pointer() to turn a tagged entry
-back into an untagged pointer and xa_pointer_tag() to retrieve
-the tag of an entry.  Tagged pointers use the same bits that are used
-to distinguish value entries from normal pointers, so each user must
+Some users want to tag the pointers they store in the XArray.  You can
+call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
+to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
+to retrieve the tag of an entry.  Tagged pointers use the same bits that
+are used to distinguish value entries from normal pointers, so you must
 decide whether they want to store value entries or tagged pointers in
 any particular XArray.
 
@@ -56,10 +51,9 @@ conflict with value entries or internal entries.
 An unusual feature of the XArray is the ability to create entries which
 occupy a range of indices.  Once stored to, looking up any index in
 the range will return the same entry as looking up any other index in
-the range.  Setting a mark on one index will set it on all of them.
-Storing to any index will store to all of them.  Multi-index entries can
-be explicitly split into smaller entries, or storing ``NULL`` into any
-entry will cause the XArray to forget about the range.
+the range.  Storing to any index will store to all of them.  Multi-index
+entries can be explicitly split into smaller entries, or storing ``NULL``
+into any entry will cause the XArray to forget about the range.
 
 Normal API
 ==========
@@ -87,17 +81,11 @@ If you want to only store a new entry to an index if the current entry
 at that index is ``NULL``, you can use xa_insert() which
 returns ``-EBUSY`` if the entry is not empty.
 
-You can enquire whether a mark is set on an entry by using
-xa_get_mark().  If the entry is not ``NULL``, you can set a mark
-on it by using xa_set_mark() and remove the mark from an entry by
-calling xa_clear_mark().  You can ask whether any entry in the
-XArray has a particular mark set by calling xa_marked().
-
 You can copy entries out of the XArray into a plain array by calling
-xa_extract().  Or you can iterate over the present entries in
-the XArray by calling xa_for_each().  You may prefer to use
-xa_find() or xa_find_after() to move to the next present
-entry in the XArray.
+xa_extract().  Or you can iterate over the present entries in the XArray
+by calling xa_for_each(), xa_for_each_start() or xa_for_each_range().
+You may prefer to use xa_find() or xa_find_after() to move to the next
+present entry in the XArray.
 
 Calling xa_store_range() stores the same entry in a range
 of indices.  If you do this, some of the other operations will behave
@@ -124,6 +112,31 @@ xa_destroy().  If the XArray entries are pointers, you may wish
 to free the entries first.  You can do this by iterating over all present
 entries in the XArray using the xa_for_each() iterator.
 
+Search Marks
+------------
+
+Each entry in the array has three bits associated with it called marks.
+Each mark may be set or cleared independently of the others.  You can
+iterate over marked entries by using the xa_for_each_marked() iterator.
+
+You can enquire whether a mark is set on an entry by using
+xa_get_mark().  If the entry is not ``NULL``, you can set a mark on it
+by using xa_set_mark() and remove the mark from an entry by calling
+xa_clear_mark().  You can ask whether any entry in the XArray has a
+particular mark set by calling xa_marked().  Erasing an entry from the
+XArray causes all marks associated with that entry to be cleared.
+
+Setting or clearing a mark on any index of a multi-index entry will
+affect all indices covered by that entry.  Querying the mark on any
+index will return the same result.
+
+There is no way to iterate over entries which are not marked; the data
+structure does not allow this to be implemented efficiently.  There are
+not currently iterators to search for logical combinations of bits (eg
+iterate over all entries which have both ``XA_MARK_1`` and ``XA_MARK_2``
+set, or iterate over all entries which have ``XA_MARK_0`` or ``XA_MARK_2``
+set).  It would be possible to add these if a user arises.
+
 Allocating XArrays
 ------------------
 
@@ -180,6 +193,8 @@ No lock needed:
 Takes RCU read lock:
  * xa_load()
  * xa_for_each()
+ * xa_for_each_start()
+ * xa_for_each_range()
  * xa_find()
  * xa_find_after()
  * xa_extract()
@@ -419,10 +434,9 @@ you last processed.  If you have interrupts disabled while iterating,
 then it is good manners to pause the iteration and reenable interrupts
 every ``XA_CHECK_SCHED`` entries.
 
-The xas_get_mark(), xas_set_mark() and
-xas_clear_mark() functions require the xa_state cursor to have
-been moved to the appropriate location in the xarray; they will do
-nothing if you have called xas_pause() or xas_set()
+The xas_get_mark(), xas_set_mark() and xas_clear_mark() functions require
+the xa_state cursor to have been moved to the appropriate location in the
+XArray; they will do nothing if you have called xas_pause() or xas_set()
 immediately before.
 
 You can call xas_set_update() to have a callback function
index 299c0dc..250f8d8 100644 (file)
@@ -403,6 +403,19 @@ PROPERTIES
                The settings and programming routines for internal/external
                MDIO are different. Must be included for internal MDIO.
 
+- fsl,erratum-a011043
+               Usage: optional
+               Value type: <boolean>
+               Definition: Indicates the presence of the A011043 erratum
+               describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely
+               set when reading internal PCS registers. MDIO reads to
+               internal PCS registers may result in having the
+               MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and
+               read data (MDIO_DATA[MDIO_DATA]) is correct.
+               Software may get false read error when reading internal
+               PCS registers through MDIO. As a workaround, all internal
+               MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit.
+
 For internal PHY device on internal mdio bus, a PHY node should be created.
 See the definition of the PHY node in booting-without-of.txt for an
 example of how to define a PHY (Internal PHY has no interrupt line).
index e7463b4..1726b4e 100644 (file)
@@ -6198,6 +6198,7 @@ ETHERNET PHY LIBRARY
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Heiner Kallweit <hkallweit1@gmail.com>
+R:     Russell King <linux@armlinux.org.uk>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-net-phydev
@@ -8570,7 +8571,7 @@ S:        Maintained
 F:     drivers/platform/x86/intel-vbtn.c
 
 INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 F:     drivers/net/wireless/intel/iwlegacy/
@@ -11500,6 +11501,7 @@ F:      drivers/net/dsa/
 
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Jakub Kicinski <kuba@kernel.org>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
@@ -13840,7 +13842,7 @@ S:      Maintained
 F:     arch/mips/ralink
 
 RALINK RT2X00 WIRELESS LAN DRIVER
-M:     Stanislaw Gruszka <sgruszka@redhat.com>
+M:     Stanislaw Gruszka <stf_xl@wp.pl>
 M:     Helmut Schaa <helmut.schaa@googlemail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
@@ -16622,7 +16624,7 @@ F:      kernel/time/ntp.c
 F:     tools/testing/selftests/timers/
 
 TIPC NETWORK LAYER
-M:     Jon Maloy <jon.maloy@ericsson.com>
+M:     Jon Maloy <jmaloy@redhat.com>
 M:     Ying Xue <ying.xue@windriver.com>
 L:     netdev@vger.kernel.org (core kernel code)
 L:     tipc-discussion@lists.sourceforge.net (user apps, general discussion)
index 0a7c37d..c50ef91 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 7ad0798..91f93bc 100644 (file)
 };
 
 / {
+       memory@80000000 {
+               device_type = "memory";
+               reg = <0x80000000 0x20000000>; /* 512 MB */
+       };
+
        clk_mcasp0_fixed: clk_mcasp0_fixed {
                #clock-cells = <0>;
                compatible = "fixed-clock";
index 078cb47..a6fbc08 100644 (file)
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi0_pins_default>;
        pinctrl-1 = <&spi0_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &spi1 {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&spi1_pins_default>;
        pinctrl-1 = <&spi1_pins_sleep>;
+       ti,pindir-d0-out-d1-in = <1>;
 };
 
 &usb2_phy1 {
index ae50203..6607fa8 100644 (file)
@@ -146,10 +146,9 @@ ARM_BE8(orr        r7, r7, #(1 << 25))     @ HSCTLR.EE
 #if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
        @ make CNTP_* and CNTPCT accessible from PL1
        mrc     p15, 0, r7, c0, c1, 1   @ ID_PFR1
-       lsr     r7, #16
-       and     r7, #0xf
-       cmp     r7, #1
-       bne     1f
+       ubfx    r7, r7, #16, #4
+       teq     r7, #0
+       beq     1f
        mrc     p15, 4, r7, c14, c1, 0  @ CNTHCTL
        orr     r7, r7, #3              @ PL1PCEN | PL1PCTEN
        mcr     p15, 4, r7, c14, c1, 0  @ CNTHCTL
index 1ec34e1..e2a4121 100644 (file)
@@ -455,11 +455,7 @@ config PPC_TRANSACTIONAL_MEM
 config PPC_UV
        bool "Ultravisor support"
        depends on KVM_BOOK3S_HV_POSSIBLE
-       select ZONE_DEVICE
-       select DEV_PAGEMAP_OPS
-       select DEVICE_PRIVATE
-       select MEMORY_HOTPLUG
-       select MEMORY_HOTREMOVE
+       depends on DEVICE_PRIVATE
        default n
        help
          This option paravirtualizes the kernel to run in POWER platforms that
index e1a961f..baa0c50 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index c288f3c..9309560 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy6: ethernet-phy@0 {
                        reg = <0x0>;
index 94f3e71..ff4bd38 100644 (file)
@@ -63,6 +63,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index 94a7698..1fa38ed 100644 (file)
@@ -60,6 +60,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy7: ethernet-phy@0 {
                        reg = <0x0>;
index b5ff5f7..a8cc978 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy0: ethernet-phy@0 {
                        reg = <0x0>;
index ee44182..8b8bd70 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy1: ethernet-phy@0 {
                        reg = <0x0>;
index f05f0d7..619c880 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy2: ethernet-phy@0 {
                        reg = <0x0>;
index a9114ec..d7ebb73 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy3: ethernet-phy@0 {
                        reg = <0x0>;
index 44dd00a..b151d69 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy4: ethernet-phy@0 {
                        reg = <0x0>;
index 5b1b84b..adc0ae0 100644 (file)
@@ -59,6 +59,7 @@ fman@400000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy5: ethernet-phy@0 {
                        reg = <0x0>;
index 0e1daae..435047e 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy14: ethernet-phy@0 {
                        reg = <0x0>;
index 68c5ef7..c098657 100644 (file)
@@ -60,6 +60,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xf3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy15: ethernet-phy@0 {
                        reg = <0x0>;
index 605363c..9d06824 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe1000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy8: ethernet-phy@0 {
                        reg = <0x0>;
index 1955dfa..70e9477 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe3000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy9: ethernet-phy@0 {
                        reg = <0x0>;
index 2c14764..ad96e65 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe5000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy10: ethernet-phy@0 {
                        reg = <0x0>;
index b8b541f..034bc4b 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe7000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy11: ethernet-phy@0 {
                        reg = <0x0>;
index 4b2cfdd..93ca23d 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xe9000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy12: ethernet-phy@0 {
                        reg = <0x0>;
index 0a52ddf..23b3117 100644 (file)
@@ -59,6 +59,7 @@ fman@500000 {
                #size-cells = <0>;
                compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
                reg = <0xeb000 0x1000>;
+               fsl,erratum-a011043; /* must ignore read errors */
 
                pcsphy13: ethernet-phy@0 {
                        reg = <0x0>;
index 15b7500..3fa1b96 100644 (file)
@@ -600,8 +600,11 @@ extern void slb_set_size(u16 size);
  *
  */
 #define MAX_USER_CONTEXT       ((ASM_CONST(1) << CONTEXT_BITS) - 2)
+
+// The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
 #define MIN_USER_CONTEXT       (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
-                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
+                                MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
+
 /*
  * For platforms that support on 65bit VA we limit the context bits
  */
index f2dfcd5..33aee74 100644 (file)
@@ -39,6 +39,7 @@
 
 #define XIVE_ESB_VAL_P         0x2
 #define XIVE_ESB_VAL_Q         0x1
+#define XIVE_ESB_INVALID       0xFF
 
 /*
  * Thread Management (aka "TM") registers
index f5fadbd..9651ca0 100644 (file)
@@ -972,12 +972,21 @@ static int xive_get_irqchip_state(struct irq_data *data,
                                  enum irqchip_irq_state which, bool *state)
 {
        struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
+       u8 pq;
 
        switch (which) {
        case IRQCHIP_STATE_ACTIVE:
-               *state = !xd->stale_p &&
-                        (xd->saved_p ||
-                         !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
+               pq = xive_esb_read(xd, XIVE_ESB_GET);
+
+               /*
+                * The esb value being all 1's means we couldn't get
+                * the PQ state of the interrupt through mmio. It may
+                * happen, for example when querying a PHB interrupt
+                * while the PHB is in an error state. We consider the
+                * interrupt to be inactive in that case.
+                */
+               *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
+                       (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
                return 0;
        default:
                return -EINVAL;
index aad00d2..cc87004 100644 (file)
@@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        }
                        if (!to) {
                                printk ("No more free channels for FS50..\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                        vcc->channo = dev->channo;
@@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                        if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) ||
                            ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) {
                                printk ("Channel is in use for FS155.\n");
+                               kfree(vcc);
                                return -EBUSY;
                        }
                }
@@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
                            tc, sizeof (struct fs_transmit_config));
                if (!tc) {
                        fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n");
+                       kfree(vcc);
                        return -ENOMEM;
                }
 
index 01a793a..30a1e3a 100644 (file)
@@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
 
        /* Renoir */
-       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
        {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT},
index 5a61a55..e6afe4f 100644 (file)
@@ -1916,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
+static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+{
+       switch (pdt) {
+       case DP_PEER_DEVICE_DP_LEGACY_CONV:
+       case DP_PEER_DEVICE_SST_SINK:
+               return true;
+       case DP_PEER_DEVICE_MST_BRANCHING:
+               /* For sst branch device */
+               if (!mcs)
+                       return true;
+
+               return false;
+       }
+       return true;
+}
+
+static int
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
+                   bool new_mcs)
 {
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
        struct drm_dp_mst_branch *mstb;
        u8 rad[8], lct;
        int ret = 0;
 
-       if (port->pdt == new_pdt)
+       if (port->pdt == new_pdt && port->mcs == new_mcs)
                return 0;
 
        /* Teardown the old pdt, if there is one */
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /*
-                * If the new PDT would also have an i2c bus, don't bother
-                * with reregistering it
-                */
-               if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-                   new_pdt == DP_PEER_DEVICE_SST_SINK) {
-                       port->pdt = new_pdt;
-                       return 0;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /*
+                        * If the new PDT would also have an i2c bus,
+                        * don't bother with reregistering it
+                        */
+                       if (new_pdt != DP_PEER_DEVICE_NONE &&
+                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                               port->pdt = new_pdt;
+                               port->mcs = new_mcs;
+                               return 0;
+                       }
 
-               /* remove i2c over sideband */
-               drm_dp_mst_unregister_i2c_bus(&port->aux);
-               break;
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               mutex_lock(&mgr->lock);
-               drm_dp_mst_topology_put_mstb(port->mstb);
-               port->mstb = NULL;
-               mutex_unlock(&mgr->lock);
-               break;
+                       /* remove i2c over sideband */
+                       drm_dp_mst_unregister_i2c_bus(&port->aux);
+               } else {
+                       mutex_lock(&mgr->lock);
+                       drm_dp_mst_topology_put_mstb(port->mstb);
+                       port->mstb = NULL;
+                       mutex_unlock(&mgr->lock);
+               }
        }
 
        port->pdt = new_pdt;
-       switch (port->pdt) {
-       case DP_PEER_DEVICE_DP_LEGACY_CONV:
-       case DP_PEER_DEVICE_SST_SINK:
-               /* add i2c over sideband */
-               ret = drm_dp_mst_register_i2c_bus(&port->aux);
-               break;
+       port->mcs = new_mcs;
 
-       case DP_PEER_DEVICE_MST_BRANCHING:
-               lct = drm_dp_calculate_rad(port, rad);
-               mstb = drm_dp_add_mst_branch_device(lct, rad);
-               if (!mstb) {
-                       ret = -ENOMEM;
-                       DRM_ERROR("Failed to create MSTB for port %p", port);
-                       goto out;
-               }
+       if (port->pdt != DP_PEER_DEVICE_NONE) {
+               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+                       /* add i2c over sideband */
+                       ret = drm_dp_mst_register_i2c_bus(&port->aux);
+               } else {
+                       lct = drm_dp_calculate_rad(port, rad);
+                       mstb = drm_dp_add_mst_branch_device(lct, rad);
+                       if (!mstb) {
+                               ret = -ENOMEM;
+                               DRM_ERROR("Failed to create MSTB for port %p",
+                                         port);
+                               goto out;
+                       }
 
-               mutex_lock(&mgr->lock);
-               port->mstb = mstb;
-               mstb->mgr = port->mgr;
-               mstb->port_parent = port;
+                       mutex_lock(&mgr->lock);
+                       port->mstb = mstb;
+                       mstb->mgr = port->mgr;
+                       mstb->port_parent = port;
 
-               /*
-                * Make sure this port's memory allocation stays
-                * around until its child MSTB releases it
-                */
-               drm_dp_mst_get_port_malloc(port);
-               mutex_unlock(&mgr->lock);
+                       /*
+                        * Make sure this port's memory allocation stays
+                        * around until its child MSTB releases it
+                        */
+                       drm_dp_mst_get_port_malloc(port);
+                       mutex_unlock(&mgr->lock);
 
-               /* And make sure we send a link address for this */
-               ret = 1;
-               break;
+                       /* And make sure we send a link address for this */
+                       ret = 1;
+               }
        }
 
 out:
@@ -2135,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
                goto error;
        }
 
-       if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
-            port->pdt == DP_PEER_DEVICE_SST_SINK) &&
-           port->port_num >= DP_MST_LOGICAL_PORT_0) {
+       if (port->pdt != DP_PEER_DEVICE_NONE &&
+           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2201,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps = 0, ret;
        u8 new_pdt = DP_PEER_DEVICE_NONE;
+       bool new_mcs = 0;
        bool created = false, send_link_addr = false, changed = false;
 
        port = drm_dp_get_port(mstb, port_msg->port_number);
@@ -2245,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
        port->input = port_msg->input_port;
        if (!port->input)
                new_pdt = port_msg->peer_device_type;
-       port->mcs = port_msg->mcs;
+       new_mcs = port_msg->mcs;
        port->ddps = port_msg->ddps;
        port->ldps = port_msg->legacy_device_plug_status;
        port->dpcd_rev = port_msg->dpcd_revision;
@@ -2272,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                }
        }
 
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                send_link_addr = true;
        } else if (ret < 0) {
@@ -2286,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
         * we're coming out of suspend. In this case, always resend the link
         * address if there's an MSTB on this port
         */
-       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
+       if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
+           port->mcs)
                send_link_addr = true;
 
        if (port->connector)
@@ -2323,6 +2341,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        struct drm_dp_mst_port *port;
        int old_ddps, old_input, ret, i;
        u8 new_pdt;
+       bool new_mcs;
        bool dowork = false, create_connector = false;
 
        port = drm_dp_get_port(mstb, conn_stat->port_number);
@@ -2354,7 +2373,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        old_ddps = port->ddps;
        old_input = port->input;
        port->input = conn_stat->input_port;
-       port->mcs = conn_stat->message_capability_status;
        port->ldps = conn_stat->legacy_device_plug_status;
        port->ddps = conn_stat->displayport_device_plug_status;
 
@@ -2367,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
-
-       ret = drm_dp_port_set_pdt(port, new_pdt);
+       new_mcs = conn_stat->message_capability_status;
+       ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
        if (ret == 1) {
                dowork = true;
        } else if (ret < 0) {
@@ -3929,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
        switch (port->pdt) {
        case DP_PEER_DEVICE_NONE:
        case DP_PEER_DEVICE_MST_BRANCHING:
+               if (!port->mcs)
+                       ret = connector_status_connected;
                break;
 
        case DP_PEER_DEVICE_SST_SINK:
@@ -4541,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
        if (port->connector)
                port->mgr->cbs->destroy_connector(port->mgr, port->connector);
 
-       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
+       drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
        drm_dp_mst_put_port_malloc(port);
 }
 
index 3d4f577..25235ef 100644 (file)
@@ -9,16 +9,16 @@
 #include "i915_gem_ioctls.h"
 #include "i915_gem_object.h"
 
-static __always_inline u32 __busy_read_flag(u8 id)
+static __always_inline u32 __busy_read_flag(u16 id)
 {
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffff0000u;
 
        GEM_BUG_ON(id >= 16);
        return 0x10000u << id;
 }
 
-static __always_inline u32 __busy_write_id(u8 id)
+static __always_inline u32 __busy_write_id(u16 id)
 {
        /*
         * The uABI guarantees an active writer is also amongst the read
@@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id)
         * last_read - hence we always set both read and write busy for
         * last_write.
         */
-       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+       if (id == (u16)I915_ENGINE_CLASS_INVALID)
                return 0xffffffffu;
 
        return (id + 1) | __busy_read_flag(id);
 }
 
 static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
 {
        const struct i915_request *rq;
 
@@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
                return 0;
 
        /* Beware type-expansion follies! */
-       BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+       BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
        return flag(rq->engine->uabi_class);
 }
 
index 4c72d74..0dbb44d 100644 (file)
@@ -402,7 +402,7 @@ struct get_pages_work {
 
 static struct sg_table *
 __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
-                              struct page **pvec, int num_pages)
+                              struct page **pvec, unsigned long num_pages)
 {
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
@@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
        struct get_pages_work *work = container_of(_work, typeof(*work), work);
        struct drm_i915_gem_object *obj = work->obj;
-       const int npages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long npages = obj->base.size >> PAGE_SHIFT;
+       unsigned long pinned;
        struct page **pvec;
-       int pinned, ret;
+       int ret;
 
        ret = -ENOMEM;
        pinned = 0;
@@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 
 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
-       const int num_pages = obj->base.size >> PAGE_SHIFT;
+       const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        struct mm_struct *mm = obj->userptr.mm->mm;
        struct page **pvec;
        struct sg_table *pages;
index 17f1f14..2b44647 100644 (file)
@@ -274,8 +274,8 @@ struct intel_engine_cs {
        u8 class;
        u8 instance;
 
-       u8 uabi_class;
-       u8 uabi_instance;
+       u16 uabi_class;
+       u16 uabi_instance;
 
        u32 uabi_capabilities;
        u32 context_size;
index c083f51..4472780 100644 (file)
@@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
        pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
        vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
        do {
+               GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
                vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
 
                iter->dma += I915_GTT_PAGE_SIZE;
@@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
 
        vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
        do {
+               GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
                vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
 
                iter.dma += I915_GTT_PAGE_SIZE;
index f61364f..88b431a 100644 (file)
@@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
                struct drm_file *file)
 {
+       struct panfrost_file_priv *priv = file->driver_priv;
        struct panfrost_gem_object *bo;
        struct drm_panfrost_create_bo *args = data;
+       struct panfrost_gem_mapping *mapping;
 
        if (!args->size || args->pad ||
            (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
+       mapping = panfrost_gem_mapping_get(bo, priv);
+       if (!mapping) {
+               drm_gem_object_put_unlocked(&bo->base.base);
+               return -EINVAL;
+       }
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
 
        return 0;
 }
@@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev,
                  struct drm_panfrost_submit *args,
                  struct panfrost_job *job)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo;
+       unsigned int i;
+       int ret;
+
        job->bo_count = args->bo_handle_count;
 
        if (!job->bo_count)
@@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev,
        if (!job->implicit_fences)
                return -ENOMEM;
 
-       return drm_gem_objects_lookup(file_priv,
-                                     (void __user *)(uintptr_t)args->bo_handles,
-                                     job->bo_count, &job->bos);
+       ret = drm_gem_objects_lookup(file_priv,
+                                    (void __user *)(uintptr_t)args->bo_handles,
+                                    job->bo_count, &job->bos);
+       if (ret)
+               return ret;
+
+       job->mappings = kvmalloc_array(job->bo_count,
+                                      sizeof(struct panfrost_gem_mapping *),
+                                      GFP_KERNEL | __GFP_ZERO);
+       if (!job->mappings)
+               return -ENOMEM;
+
+       for (i = 0; i < job->bo_count; i++) {
+               struct panfrost_gem_mapping *mapping;
+
+               bo = to_panfrost_bo(job->bos[i]);
+               mapping = panfrost_gem_mapping_get(bo, priv);
+               if (!mapping) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               job->mappings[i] = mapping;
+       }
+
+       return ret;
 }
 
 /**
@@ -320,7 +357,9 @@ out:
 static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
                            struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_get_bo_offset *args = data;
+       struct panfrost_gem_mapping *mapping;
        struct drm_gem_object *gem_obj;
        struct panfrost_gem_object *bo;
 
@@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
        }
        bo = to_panfrost_bo(gem_obj);
 
-       args->offset = bo->node.start << PAGE_SHIFT;
-
+       mapping = panfrost_gem_mapping_get(bo, priv);
        drm_gem_object_put_unlocked(gem_obj);
+
+       if (!mapping)
+               return -EINVAL;
+
+       args->offset = mapping->mmnode.start << PAGE_SHIFT;
+       panfrost_gem_mapping_put(mapping);
        return 0;
 }
 
 static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
+       struct panfrost_file_priv *priv = file_priv->driver_priv;
        struct drm_panfrost_madvise *args = data;
        struct panfrost_device *pfdev = dev->dev_private;
        struct drm_gem_object *gem_obj;
+       struct panfrost_gem_object *bo;
+       int ret = 0;
 
        gem_obj = drm_gem_object_lookup(file_priv, args->handle);
        if (!gem_obj) {
@@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
                return -ENOENT;
        }
 
+       bo = to_panfrost_bo(gem_obj);
+
        mutex_lock(&pfdev->shrinker_lock);
+       mutex_lock(&bo->mappings.lock);
+       if (args->madv == PANFROST_MADV_DONTNEED) {
+               struct panfrost_gem_mapping *first;
+
+               first = list_first_entry(&bo->mappings.list,
+                                        struct panfrost_gem_mapping,
+                                        node);
+
+               /*
+                * If we want to mark the BO purgeable, there must be only one
+                * user: the caller FD.
+                * We could do something smarter and mark the BO purgeable only
+                * when all its users have marked it purgeable, but globally
+                * visible/shared BOs are likely to never be marked purgeable
+                * anyway, so let's not bother.
+                */
+               if (!list_is_singular(&bo->mappings.list) ||
+                   WARN_ON_ONCE(first->mmu != &priv->mmu)) {
+                       ret = -EINVAL;
+                       goto out_unlock_mappings;
+               }
+       }
+
        args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
 
        if (args->retained) {
-               struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
-
                if (args->madv == PANFROST_MADV_DONTNEED)
                        list_add_tail(&bo->base.madv_list,
                                      &pfdev->shrinker_list);
                else if (args->madv == PANFROST_MADV_WILLNEED)
                        list_del_init(&bo->base.madv_list);
        }
+
+out_unlock_mappings:
+       mutex_unlock(&bo->mappings.lock);
        mutex_unlock(&pfdev->shrinker_lock);
 
        drm_gem_object_put_unlocked(gem_obj);
-       return 0;
+       return ret;
 }
 
 int panfrost_unstable_ioctl_check(void)
index fd766b1..17b654e 100644 (file)
@@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
        list_del_init(&bo->base.madv_list);
        mutex_unlock(&pfdev->shrinker_lock);
 
+       /*
+        * If we still have mappings attached to the BO, there's a problem in
+        * our refcounting.
+        */
+       WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+
        if (bo->sgts) {
                int i;
                int n_sgt = bo->base.base.size / SZ_2M;
@@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
        drm_gem_shmem_free_object(obj);
 }
 
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv)
+{
+       struct panfrost_gem_mapping *iter, *mapping = NULL;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       kref_get(&iter->refcount);
+                       mapping = iter;
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
+
+       return mapping;
+}
+
+static void
+panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
+{
+       struct panfrost_file_priv *priv;
+
+       if (mapping->active)
+               panfrost_mmu_unmap(mapping);
+
+       priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu);
+       spin_lock(&priv->mm_lock);
+       if (drm_mm_node_allocated(&mapping->mmnode))
+               drm_mm_remove_node(&mapping->mmnode);
+       spin_unlock(&priv->mm_lock);
+}
+
+static void panfrost_gem_mapping_release(struct kref *kref)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
+
+       panfrost_gem_teardown_mapping(mapping);
+       drm_gem_object_put_unlocked(&mapping->obj->base.base);
+       kfree(mapping);
+}
+
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
+{
+       if (!mapping)
+               return;
+
+       kref_put(&mapping->refcount, panfrost_gem_mapping_release);
+}
+
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
+{
+       struct panfrost_gem_mapping *mapping;
+
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(mapping, &bo->mappings.list, node)
+               panfrost_gem_teardown_mapping(mapping);
+       mutex_unlock(&bo->mappings.lock);
+}
+
 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
        int ret;
@@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
        struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_mapping *mapping;
+
+       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+       if (!mapping)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&mapping->node);
+       kref_init(&mapping->refcount);
+       drm_gem_object_get(obj);
+       mapping->obj = bo;
 
        /*
         * Executable buffers cannot cross a 16MB boundary as the program
@@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
        else
                align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
 
-       bo->mmu = &priv->mmu;
+       mapping->mmu = &priv->mmu;
        spin_lock(&priv->mm_lock);
-       ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+       ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode,
                                         size >> PAGE_SHIFT, align, color, 0);
        spin_unlock(&priv->mm_lock);
        if (ret)
-               return ret;
+               goto err;
 
        if (!bo->is_heap) {
-               ret = panfrost_mmu_map(bo);
-               if (ret) {
-                       spin_lock(&priv->mm_lock);
-                       drm_mm_remove_node(&bo->node);
-                       spin_unlock(&priv->mm_lock);
-               }
+               ret = panfrost_mmu_map(mapping);
+               if (ret)
+                       goto err;
        }
+
+       mutex_lock(&bo->mappings.lock);
+       WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
+       list_add_tail(&mapping->node, &bo->mappings.list);
+       mutex_unlock(&bo->mappings.lock);
+
+err:
+       if (ret)
+               panfrost_gem_mapping_put(mapping);
        return ret;
 }
 
 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
 {
-       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
        struct panfrost_file_priv *priv = file_priv->driver_priv;
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+       struct panfrost_gem_mapping *mapping = NULL, *iter;
 
-       if (bo->is_mapped)
-               panfrost_mmu_unmap(bo);
+       mutex_lock(&bo->mappings.lock);
+       list_for_each_entry(iter, &bo->mappings.list, node) {
+               if (iter->mmu == &priv->mmu) {
+                       mapping = iter;
+                       list_del(&iter->node);
+                       break;
+               }
+       }
+       mutex_unlock(&bo->mappings.lock);
 
-       spin_lock(&priv->mm_lock);
-       if (drm_mm_node_allocated(&bo->node))
-               drm_mm_remove_node(&bo->node);
-       spin_unlock(&priv->mm_lock);
+       panfrost_gem_mapping_put(mapping);
 }
 
 static int panfrost_gem_pin(struct drm_gem_object *obj)
@@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
        if (!obj)
                return NULL;
 
+       INIT_LIST_HEAD(&obj->mappings.list);
+       mutex_init(&obj->mappings.lock);
        obj->base.base.funcs = &panfrost_gem_funcs;
 
        return &obj->base.base;
index 4b17e73..ca1bc90 100644 (file)
@@ -13,23 +13,46 @@ struct panfrost_gem_object {
        struct drm_gem_shmem_object base;
        struct sg_table *sgts;
 
-       struct panfrost_mmu *mmu;
-       struct drm_mm_node node;
-       bool is_mapped          :1;
+       /*
+        * Use a list for now. If searching a mapping ever becomes the
+        * bottleneck, we should consider using an RB-tree, or even better,
+        * let the core store drm_gem_object_mapping entries (where we
+        * could place driver specific data) instead of drm_gem_object ones
+        * in its drm_file->object_idr table.
+        *
+        * struct drm_gem_object_mapping {
+        *      struct drm_gem_object *obj;
+        *      void *driver_priv;
+        * };
+        */
+       struct {
+               struct list_head list;
+               struct mutex lock;
+       } mappings;
+
        bool noexec             :1;
        bool is_heap            :1;
 };
 
+struct panfrost_gem_mapping {
+       struct list_head node;
+       struct kref refcount;
+       struct panfrost_gem_object *obj;
+       struct drm_mm_node mmnode;
+       struct panfrost_mmu *mmu;
+       bool active             :1;
+};
+
 static inline
 struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
 {
        return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
 }
 
-static inline
-struct  panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+static inline struct panfrost_gem_mapping *
+drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
 {
-       return container_of(node, struct panfrost_gem_object, node);
+       return container_of(node, struct panfrost_gem_mapping, mmnode);
 }
 
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
@@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void panfrost_gem_close(struct drm_gem_object *obj,
                        struct drm_file *file_priv);
 
+struct panfrost_gem_mapping *
+panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
+                        struct panfrost_file_priv *priv);
+void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
+void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
+
 void panfrost_gem_shrinker_init(struct drm_device *dev);
 void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
 
index 458f0fa..f5dd7b2 100644 (file)
@@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc
 static bool panfrost_gem_purge(struct drm_gem_object *obj)
 {
        struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 
        if (!mutex_trylock(&shmem->pages_lock))
                return false;
 
-       panfrost_mmu_unmap(to_panfrost_bo(obj));
+       panfrost_gem_teardown_mappings(bo);
        drm_gem_shmem_purge_locked(obj);
 
        mutex_unlock(&shmem->pages_lock);
index d411eb6..e364ee0 100644 (file)
@@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref)
        dma_fence_put(job->done_fence);
        dma_fence_put(job->render_done_fence);
 
-       if (job->bos) {
+       if (job->mappings) {
                for (i = 0; i < job->bo_count; i++)
+                       panfrost_gem_mapping_put(job->mappings[i]);
+               kvfree(job->mappings);
+       }
+
+       if (job->bos) {
+               struct panfrost_gem_object *bo;
+
+               for (i = 0; i < job->bo_count; i++) {
+                       bo = to_panfrost_bo(job->bos[i]);
                        drm_gem_object_put_unlocked(job->bos[i]);
+               }
+
                kvfree(job->bos);
        }
 
index 6245412..bbd3ba9 100644 (file)
@@ -32,6 +32,7 @@ struct panfrost_job {
 
        /* Exclusive fences we have taken from the BOs to wait for */
        struct dma_fence **implicit_fences;
+       struct panfrost_gem_mapping **mappings;
        struct drm_gem_object **bos;
        u32 bo_count;
 
index a3ed64a..763cfca 100644 (file)
@@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
        return 0;
 }
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
        struct sg_table *sgt;
        int prot = IOMMU_READ | IOMMU_WRITE;
 
-       if (WARN_ON(bo->is_mapped))
+       if (WARN_ON(mapping->active))
                return 0;
 
        if (bo->noexec)
@@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
        if (WARN_ON(IS_ERR(sgt)))
                return PTR_ERR(sgt);
 
-       mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
-       bo->is_mapped = true;
+       mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+                  prot, sgt);
+       mapping->active = true;
 
        return 0;
 }
 
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
 {
+       struct panfrost_gem_object *bo = mapping->obj;
        struct drm_gem_object *obj = &bo->base.base;
        struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-       struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
-       u64 iova = bo->node.start << PAGE_SHIFT;
-       size_t len = bo->node.size << PAGE_SHIFT;
+       struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
+       u64 iova = mapping->mmnode.start << PAGE_SHIFT;
+       size_t len = mapping->mmnode.size << PAGE_SHIFT;
        size_t unmapped_len = 0;
 
-       if (WARN_ON(!bo->is_mapped))
+       if (WARN_ON(!mapping->active))
                return;
 
-       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
+       dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+               mapping->mmu->as, iova, len);
 
        while (unmapped_len < len) {
                size_t unmapped_page;
@@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
                unmapped_len += pgsize;
        }
 
-       panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
-       bo->is_mapped = false;
+       panfrost_mmu_flush_range(pfdev, mapping->mmu,
+                                mapping->mmnode.start << PAGE_SHIFT, len);
+       mapping->active = false;
 }
 
 static void mmu_tlb_inv_context_s1(void *cookie)
@@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
        free_io_pgtable_ops(mmu->pgtbl_ops);
 }
 
-static struct panfrost_gem_object *
-addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+static struct panfrost_gem_mapping *
+addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
 {
-       struct panfrost_gem_object *bo = NULL;
+       struct panfrost_gem_mapping *mapping = NULL;
        struct panfrost_file_priv *priv;
        struct drm_mm_node *node;
        u64 offset = addr >> PAGE_SHIFT;
@@ -418,8 +423,9 @@ found_mmu:
        drm_mm_for_each_node(node, &priv->mm) {
                if (offset >= node->start &&
                    offset < (node->start + node->size)) {
-                       bo = drm_mm_node_to_panfrost_bo(node);
-                       drm_gem_object_get(&bo->base.base);
+                       mapping = drm_mm_node_to_panfrost_mapping(node);
+
+                       kref_get(&mapping->refcount);
                        break;
                }
        }
@@ -427,7 +433,7 @@ found_mmu:
        spin_unlock(&priv->mm_lock);
 out:
        spin_unlock(&pfdev->as_lock);
-       return bo;
+       return mapping;
 }
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
@@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                                       u64 addr)
 {
        int ret, i;
+       struct panfrost_gem_mapping *bomapping;
        struct panfrost_gem_object *bo;
        struct address_space *mapping;
        pgoff_t page_offset;
        struct sg_table *sgt;
        struct page **pages;
 
-       bo = addr_to_drm_mm_node(pfdev, as, addr);
-       if (!bo)
+       bomapping = addr_to_mapping(pfdev, as, addr);
+       if (!bomapping)
                return -ENOENT;
 
+       bo = bomapping->obj;
        if (!bo->is_heap) {
                dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
-                        bo->node.start << PAGE_SHIFT);
+                        bomapping->mmnode.start << PAGE_SHIFT);
                ret = -EINVAL;
                goto err_bo;
        }
-       WARN_ON(bo->mmu->as != as);
+       WARN_ON(bomapping->mmu->as != as);
 
        /* Assume 2MB alignment and size multiple */
        addr &= ~((u64)SZ_2M - 1);
        page_offset = addr >> PAGE_SHIFT;
-       page_offset -= bo->node.start;
+       page_offset -= bomapping->mmnode.start;
 
        mutex_lock(&bo->base.pages_lock);
 
@@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
                goto err_map;
        }
 
-       mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+       mmu_map_sg(pfdev, bomapping->mmu, addr,
+                  IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
 
-       bo->is_mapped = true;
+       bomapping->active = true;
 
        dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
 
-       drm_gem_object_put_unlocked(&bo->base.base);
+       panfrost_gem_mapping_put(bomapping);
 
        return 0;
 
index 7c5b677..44fc2ed 100644 (file)
@@ -4,12 +4,12 @@
 #ifndef __PANFROST_MMU_H__
 #define __PANFROST_MMU_H__
 
-struct panfrost_gem_object;
+struct panfrost_gem_mapping;
 struct panfrost_file_priv;
 struct panfrost_mmu;
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo);
-void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+int panfrost_mmu_map(struct panfrost_gem_mapping *mapping);
+void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping);
 
 int panfrost_mmu_init(struct panfrost_device *pfdev);
 void panfrost_mmu_fini(struct panfrost_device *pfdev);
index 2c04e85..6848204 100644 (file)
@@ -25,7 +25,7 @@
 #define V4_SHADERS_PER_COREGROUP       4
 
 struct panfrost_perfcnt {
-       struct panfrost_gem_object *bo;
+       struct panfrost_gem_mapping *mapping;
        size_t bosize;
        void *buf;
        struct panfrost_file_priv *user;
@@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
        int ret;
 
        reinit_completion(&pfdev->perfcnt->dump_comp);
-       gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT;
+       gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
        gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
        gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
        gpu_write(pfdev, GPU_INT_CLEAR,
@@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
-       perfcnt->bo = to_panfrost_bo(&bo->base);
-
        /* Map the perfcnt buf in the address space attached to file_priv. */
-       ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
+       ret = panfrost_gem_open(&bo->base, file_priv);
        if (ret)
                goto err_put_bo;
 
+       perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base),
+                                                   user);
+       if (!perfcnt->mapping) {
+               ret = -EINVAL;
+               goto err_close_bo;
+       }
+
        perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
        if (IS_ERR(perfcnt->buf)) {
                ret = PTR_ERR(perfcnt->buf);
-               goto err_close_bo;
+               goto err_put_mapping;
        }
 
        /*
@@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
        if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186))
                gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
 
+       /* The BO ref is retained by the mapping. */
+       drm_gem_object_put_unlocked(&bo->base);
+
        return 0;
 
 err_vunmap:
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&bo->base, perfcnt->buf);
+err_put_mapping:
+       panfrost_gem_mapping_put(perfcnt->mapping);
 err_close_bo:
-       panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
+       panfrost_gem_close(&bo->base, file_priv);
 err_put_bo:
        drm_gem_object_put_unlocked(&bo->base);
        return ret;
@@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
                  GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
 
        perfcnt->user = NULL;
-       drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+       drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
        perfcnt->buf = NULL;
-       panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
-       drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
-       perfcnt->bo = NULL;
+       panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+       panfrost_gem_mapping_put(perfcnt->mapping);
+       perfcnt->mapping = NULL;
        pm_runtime_mark_last_busy(pfdev->dev);
        pm_runtime_put_autosuspend(pfdev->dev);
 
index 6c64d50..01c2eeb 100644 (file)
@@ -294,9 +294,10 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        long reg;
 
        if (bypass_attn & (1 << channel))
-               reg = (volt * 1024) / 2250;
+               reg = DIV_ROUND_CLOSEST(volt * 1024, 2250);
        else
-               reg = (volt * r[1] * 1024) / ((r[0] + r[1]) * 2250);
+               reg = DIV_ROUND_CLOSEST(volt * r[1] * 1024,
+                                       (r[0] + r[1]) * 2250);
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
index 1f3b30b..d018b20 100644 (file)
@@ -51,6 +51,7 @@ struct hwmon_device_attribute {
 
 #define to_hwmon_attr(d) \
        container_of(d, struct hwmon_device_attribute, dev_attr)
+#define to_dev_attr(a) container_of(a, struct device_attribute, attr)
 
 /*
  * Thermal zone information
@@ -58,7 +59,7 @@ struct hwmon_device_attribute {
  * also provides the sensor index.
  */
 struct hwmon_thermal_data {
-       struct hwmon_device *hwdev;     /* Reference to hwmon device */
+       struct device *dev;             /* Reference to hwmon device */
        int index;                      /* sensor index */
 };
 
@@ -95,9 +96,27 @@ static const struct attribute_group *hwmon_dev_attr_groups[] = {
        NULL
 };
 
+static void hwmon_free_attrs(struct attribute **attrs)
+{
+       int i;
+
+       for (i = 0; attrs[i]; i++) {
+               struct device_attribute *dattr = to_dev_attr(attrs[i]);
+               struct hwmon_device_attribute *hattr = to_hwmon_attr(dattr);
+
+               kfree(hattr);
+       }
+       kfree(attrs);
+}
+
 static void hwmon_dev_release(struct device *dev)
 {
-       kfree(to_hwmon_device(dev));
+       struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+       if (hwdev->group.attrs)
+               hwmon_free_attrs(hwdev->group.attrs);
+       kfree(hwdev->groups);
+       kfree(hwdev);
 }
 
 static struct class hwmon_class = {
@@ -119,11 +138,11 @@ static DEFINE_IDA(hwmon_ida);
 static int hwmon_thermal_get_temp(void *data, int *temp)
 {
        struct hwmon_thermal_data *tdata = data;
-       struct hwmon_device *hwdev = tdata->hwdev;
+       struct hwmon_device *hwdev = to_hwmon_device(tdata->dev);
        int ret;
        long t;
 
-       ret = hwdev->chip->ops->read(&hwdev->dev, hwmon_temp, hwmon_temp_input,
+       ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
                                     tdata->index, &t);
        if (ret < 0)
                return ret;
@@ -137,8 +156,7 @@ static const struct thermal_zone_of_device_ops hwmon_thermal_ops = {
        .get_temp = hwmon_thermal_get_temp,
 };
 
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        struct hwmon_thermal_data *tdata;
        struct thermal_zone_device *tzd;
@@ -147,10 +165,10 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        if (!tdata)
                return -ENOMEM;
 
-       tdata->hwdev = hwdev;
+       tdata->dev = dev;
        tdata->index = index;
 
-       tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata,
+       tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
                                                   &hwmon_thermal_ops);
        /*
         * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
@@ -162,8 +180,7 @@ static int hwmon_thermal_add_sensor(struct device *dev,
        return 0;
 }
 #else
-static int hwmon_thermal_add_sensor(struct device *dev,
-                                   struct hwmon_device *hwdev, int index)
+static int hwmon_thermal_add_sensor(struct device *dev, int index)
 {
        return 0;
 }
@@ -250,8 +267,7 @@ static bool is_string_attr(enum hwmon_sensor_types type, u32 attr)
               (type == hwmon_fan && attr == hwmon_fan_label);
 }
 
-static struct attribute *hwmon_genattr(struct device *dev,
-                                      const void *drvdata,
+static struct attribute *hwmon_genattr(const void *drvdata,
                                       enum hwmon_sensor_types type,
                                       u32 attr,
                                       int index,
@@ -279,7 +295,7 @@ static struct attribute *hwmon_genattr(struct device *dev,
        if ((mode & 0222) && !ops->write)
                return ERR_PTR(-EINVAL);
 
-       hattr = devm_kzalloc(dev, sizeof(*hattr), GFP_KERNEL);
+       hattr = kzalloc(sizeof(*hattr), GFP_KERNEL);
        if (!hattr)
                return ERR_PTR(-ENOMEM);
 
@@ -492,8 +508,7 @@ static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
        return n;
 }
 
-static int hwmon_genattrs(struct device *dev,
-                         const void *drvdata,
+static int hwmon_genattrs(const void *drvdata,
                          struct attribute **attrs,
                          const struct hwmon_ops *ops,
                          const struct hwmon_channel_info *info)
@@ -519,7 +534,7 @@ static int hwmon_genattrs(struct device *dev,
                        attr_mask &= ~BIT(attr);
                        if (attr >= template_size)
                                return -EINVAL;
-                       a = hwmon_genattr(dev, drvdata, info->type, attr, i,
+                       a = hwmon_genattr(drvdata, info->type, attr, i,
                                          templates[attr], ops);
                        if (IS_ERR(a)) {
                                if (PTR_ERR(a) != -ENOENT)
@@ -533,8 +548,7 @@ static int hwmon_genattrs(struct device *dev,
 }
 
 static struct attribute **
-__hwmon_create_attrs(struct device *dev, const void *drvdata,
-                    const struct hwmon_chip_info *chip)
+__hwmon_create_attrs(const void *drvdata, const struct hwmon_chip_info *chip)
 {
        int ret, i, aindex = 0, nattrs = 0;
        struct attribute **attrs;
@@ -545,15 +559,17 @@ __hwmon_create_attrs(struct device *dev, const void *drvdata,
        if (nattrs == 0)
                return ERR_PTR(-EINVAL);
 
-       attrs = devm_kcalloc(dev, nattrs + 1, sizeof(*attrs), GFP_KERNEL);
+       attrs = kcalloc(nattrs + 1, sizeof(*attrs), GFP_KERNEL);
        if (!attrs)
                return ERR_PTR(-ENOMEM);
 
        for (i = 0; chip->info[i]; i++) {
-               ret = hwmon_genattrs(dev, drvdata, &attrs[aindex], chip->ops,
+               ret = hwmon_genattrs(drvdata, &attrs[aindex], chip->ops,
                                     chip->info[i]);
-               if (ret < 0)
+               if (ret < 0) {
+                       hwmon_free_attrs(attrs);
                        return ERR_PTR(ret);
+               }
                aindex += ret;
        }
 
@@ -595,14 +611,13 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                        for (i = 0; groups[i]; i++)
                                ngroups++;
 
-               hwdev->groups = devm_kcalloc(dev, ngroups, sizeof(*groups),
-                                            GFP_KERNEL);
+               hwdev->groups = kcalloc(ngroups, sizeof(*groups), GFP_KERNEL);
                if (!hwdev->groups) {
                        err = -ENOMEM;
                        goto free_hwmon;
                }
 
-               attrs = __hwmon_create_attrs(dev, drvdata, chip);
+               attrs = __hwmon_create_attrs(drvdata, chip);
                if (IS_ERR(attrs)) {
                        err = PTR_ERR(attrs);
                        goto free_hwmon;
@@ -647,8 +662,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                                           hwmon_temp_input, j))
                                        continue;
                                if (info[i]->config[j] & HWMON_T_INPUT) {
-                                       err = hwmon_thermal_add_sensor(dev,
-                                                               hwdev, j);
+                                       err = hwmon_thermal_add_sensor(hdev, j);
                                        if (err) {
                                                device_unregister(hdev);
                                                /*
@@ -667,7 +681,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
        return hdev;
 
 free_hwmon:
-       kfree(hwdev);
+       hwmon_dev_release(hdev);
 ida_remove:
        ida_simple_remove(&hwmon_ida, id);
        return ERR_PTR(err);
index f3dd2a1..2e97e56 100644 (file)
@@ -23,8 +23,8 @@
 static const u8 REG_VOLTAGE[5] = { 0x09, 0x0a, 0x0c, 0x0d, 0x0e };
 
 static const u8 REG_VOLTAGE_LIMIT_LSB[2][5] = {
-       { 0x40, 0x00, 0x42, 0x44, 0x46 },
-       { 0x3f, 0x00, 0x41, 0x43, 0x45 },
+       { 0x46, 0x00, 0x40, 0x42, 0x44 },
+       { 0x45, 0x00, 0x3f, 0x41, 0x43 },
 };
 
 static const u8 REG_VOLTAGE_LIMIT_MSB[5] = { 0x48, 0x00, 0x47, 0x47, 0x48 };
@@ -58,6 +58,8 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
 struct nct7802_data {
        struct regmap *regmap;
        struct mutex access_lock; /* for multi-byte read and write operations */
+       u8 in_status;
+       struct mutex in_alarm_lock;
 };
 
 static ssize_t temp_type_show(struct device *dev,
@@ -368,6 +370,66 @@ static ssize_t in_store(struct device *dev, struct device_attribute *attr,
        return err ? : count;
 }
 
+static ssize_t in_alarm_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+       struct nct7802_data *data = dev_get_drvdata(dev);
+       int volt, min, max, ret;
+       unsigned int val;
+
+       mutex_lock(&data->in_alarm_lock);
+
+       /*
+        * The SMI Voltage status register is the only register giving a status
+        * for voltages. A bit is set for each input crossing a threshold, in
+        * both direction, but the "inside" or "outside" limits info is not
+        * available. Also this register is cleared on read.
+        * Note: this is not explicitly spelled out in the datasheet, but
+        * from experiment.
+        * To deal with this we use a status cache with one validity bit and
+        * one status bit for each input. Validity is cleared at startup and
+        * each time the register reports a change, and the status is processed
+        * by software based on current input value and limits.
+        */
+       ret = regmap_read(data->regmap, 0x1e, &val); /* SMI Voltage status */
+       if (ret < 0)
+               goto abort;
+
+       /* invalidate cached status for all inputs crossing a threshold */
+       data->in_status &= ~((val & 0x0f) << 4);
+
+       /* if cached status for requested input is invalid, update it */
+       if (!(data->in_status & (0x10 << sattr->index))) {
+               ret = nct7802_read_voltage(data, sattr->nr, 0);
+               if (ret < 0)
+                       goto abort;
+               volt = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 1);
+               if (ret < 0)
+                       goto abort;
+               min = ret;
+
+               ret = nct7802_read_voltage(data, sattr->nr, 2);
+               if (ret < 0)
+                       goto abort;
+               max = ret;
+
+               if (volt < min || volt > max)
+                       data->in_status |= (1 << sattr->index);
+               else
+                       data->in_status &= ~(1 << sattr->index);
+
+               data->in_status |= 0x10 << sattr->index;
+       }
+
+       ret = sprintf(buf, "%u\n", !!(data->in_status & (1 << sattr->index)));
+abort:
+       mutex_unlock(&data->in_alarm_lock);
+       return ret;
+}
+
 static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
@@ -660,7 +722,7 @@ static const struct attribute_group nct7802_temp_group = {
 static SENSOR_DEVICE_ATTR_2_RO(in0_input, in, 0, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in0_min, in, 0, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in0_max, in, 0, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, alarm, 0x1e, 3);
+static SENSOR_DEVICE_ATTR_2_RO(in0_alarm, in_alarm, 0, 3);
 static SENSOR_DEVICE_ATTR_2_RW(in0_beep, beep, 0x5a, 3);
 
 static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
@@ -668,19 +730,19 @@ static SENSOR_DEVICE_ATTR_2_RO(in1_input, in, 1, 0);
 static SENSOR_DEVICE_ATTR_2_RO(in2_input, in, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_min, in, 2, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in2_max, in, 2, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, alarm, 0x1e, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in2_alarm, in_alarm, 2, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in2_beep, beep, 0x5a, 0);
 
 static SENSOR_DEVICE_ATTR_2_RO(in3_input, in, 3, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in3_min, in, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_max, in, 3, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, alarm, 0x1e, 1);
+static SENSOR_DEVICE_ATTR_2_RO(in3_alarm, in_alarm, 3, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in3_beep, beep, 0x5a, 1);
 
 static SENSOR_DEVICE_ATTR_2_RO(in4_input, in, 4, 0);
 static SENSOR_DEVICE_ATTR_2_RW(in4_min, in, 4, 1);
 static SENSOR_DEVICE_ATTR_2_RW(in4_max, in, 4, 2);
-static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, alarm, 0x1e, 2);
+static SENSOR_DEVICE_ATTR_2_RO(in4_alarm, in_alarm, 4, 2);
 static SENSOR_DEVICE_ATTR_2_RW(in4_beep, beep, 0x5a, 2);
 
 static struct attribute *nct7802_in_attrs[] = {
@@ -1011,6 +1073,7 @@ static int nct7802_probe(struct i2c_client *client,
                return PTR_ERR(data->regmap);
 
        mutex_init(&data->access_lock);
+       mutex_init(&data->in_alarm_lock);
 
        ret = nct7802_init_chip(data);
        if (ret < 0)
index f918fca..cb6e3a5 100644 (file)
@@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file)
        struct evdev_client *client;
        int error;
 
-       client = kzalloc(struct_size(client, buffer, bufsize),
-                        GFP_KERNEL | __GFP_NOWARN);
-       if (!client)
-               client = vzalloc(struct_size(client, buffer, bufsize));
+       client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL);
        if (!client)
                return -ENOMEM;
 
index 83368f1..4650f4a 100644 (file)
@@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev)
        int retval = 0;
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x11, 0x40, 0x5601, 0x0, NULL, 0, 0);
+                                0x11, 0x40, 0x5601, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n",
                        __func__, retval);
@@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x44, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x44, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n",
                        __func__, retval);
@@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev)
        }
 
        retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
-                                0x22, 0x40, 0x0, 0x0, NULL, 0, 0);
+                                0x22, 0x40, 0x0, 0x0, NULL, 0,
+                                USB_CTRL_SET_TIMEOUT);
        if (retval) {
                dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n",
                        __func__, retval);
index 4d875f2..ee55f22 100644 (file)
@@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev)
        return input_register_device(onkey->input);
 }
 
+static const struct of_device_id max77650_onkey_of_match[] = {
+       { .compatible = "maxim,max77650-onkey" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_onkey_of_match);
+
 static struct platform_driver max77650_onkey_driver = {
        .driver = {
                .name = "max77650-onkey",
+               .of_match_table = max77650_onkey_of_match,
        },
        .probe = max77650_onkey_probe,
 };
index ecd762f..53ad25e 100644 (file)
@@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on)
 
        if (regs->enable_mask)
                rc = regmap_update_bits(vib->regmap, regs->enable_addr,
-                                       on ? regs->enable_mask : 0, val);
+                                       regs->enable_mask, on ? ~0 : 0);
 
        return rc;
 }
index 0bc01cf..6b23e67 100644 (file)
 #define F54_NUM_TX_OFFSET       1
 #define F54_NUM_RX_OFFSET       0
 
+/*
+ * The smbus protocol can read only 32 bytes max at a time.
+ * But this should be fine for i2c/spi as well.
+ */
+#define F54_REPORT_DATA_SIZE   32
+
 /* F54 commands */
 #define F54_GET_REPORT          1
 #define F54_FORCE_CAL           2
@@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work)
        int report_size;
        u8 command;
        int error;
+       int i;
 
        report_size = rmi_f54_get_report_size(f54);
        if (report_size == 0) {
@@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work)
 
        rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n");
 
-       fifo[0] = 0;
-       fifo[1] = 0;
-       error = rmi_write_block(fn->rmi_dev,
-                               fn->fd.data_base_addr + F54_FIFO_OFFSET,
-                               fifo, sizeof(fifo));
-       if (error) {
-               dev_err(&fn->dev, "Failed to set fifo start offset\n");
-               goto abort;
-       }
+       for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) {
+               int size = min(F54_REPORT_DATA_SIZE, report_size - i);
+
+               fifo[0] = i & 0xff;
+               fifo[1] = i >> 8;
+               error = rmi_write_block(fn->rmi_dev,
+                                       fn->fd.data_base_addr + F54_FIFO_OFFSET,
+                                       fifo, sizeof(fifo));
+               if (error) {
+                       dev_err(&fn->dev, "Failed to set fifo start offset\n");
+                       goto abort;
+               }
 
-       error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
-                              F54_REPORT_DATA_OFFSET, f54->report_data,
-                              report_size);
-       if (error) {
-               dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
-                       __func__, report_size, error);
-               goto abort;
+               error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr +
+                                      F54_REPORT_DATA_OFFSET,
+                                      f54->report_data + i, size);
+               if (error) {
+                       dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n",
+                               __func__, size, error);
+                       goto abort;
+               }
        }
 
 abort:
index b313c57..2407ea4 100644 (file)
@@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to write next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 exit:
        mutex_unlock(&rmi_smb->page_mutex);
@@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
                /* prepare to read next block of bytes */
                cur_len -= SMB_MAX_COUNT;
                databuff += SMB_MAX_COUNT;
+               rmiaddr += SMB_MAX_COUNT;
        }
 
        retval = 0;
index 2ca586f..e08b0ef 100644 (file)
@@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
 
        aiptek->inputdev = inputdev;
        aiptek->intf = intf;
-       aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
+       aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
        aiptek->inDelay = 0;
        aiptek->endDelay = 0;
        aiptek->previousJitterable = 0;
@@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
        input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
 
        /* Verify that a device really has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev,
                        "interface has %d endpoints, but must have minimum 1\n",
-                       intf->altsetting[0].desc.bNumEndpoints);
+                       intf->cur_altsetting->desc.bNumEndpoints);
                err = -EINVAL;
                goto fail3;
        }
-       endpoint = &intf->altsetting[0].endpoint[0].desc;
+       endpoint = &intf->cur_altsetting->endpoint[0].desc;
 
        /* Go set up our URB, which is called when the tablet receives
         * input.
index 3503122..96d6557 100644 (file)
@@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
        }
 
        /* Sanity check that a device has an endpoint */
-       if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+       if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&usbinterface->dev,
                        "Invalid number of endpoints\n");
                error = -EINVAL;
                goto err_free_urb;
        }
 
-       /*
-        * The endpoint is always altsetting 0, we know this since we know
-        * this device only has one interrupt endpoint
-        */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
+       endpoint = &usbinterface->cur_altsetting->endpoint[0].desc;
 
        /* Some debug */
        dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting);
@@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
        if (usb_endpoint_xfer_int(endpoint))
                dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n");
 
-       dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen);
+       dev_dbg(&usbinterface->dev, "interface extra len:%d\n",
+               usbinterface->cur_altsetting->extralen);
 
        /*
         * Find the HID descriptor so we can find out the size of the
@@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface,
        input_dev->dev.parent = &usbinterface->dev;
 
        /* Setup the URB, it will be posted later on open of input device */
-       endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
-
        usb_fill_int_urb(gtco->urbinfo,
                         udev,
                         usb_rcvintpipe(udev,
index a1f3a0c..38f0874 100644 (file)
@@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf,
                return -ENODEV;
 
        /* Sanity check that the device has an endpoint */
-       if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+       if (intf->cur_altsetting->desc.bNumEndpoints < 1) {
                dev_err(&intf->dev, "Invalid number of endpoints\n");
                return -EINVAL;
        }
index 0af0fe8..742a7e9 100644 (file)
@@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct device *hwmon;
+       struct thermal_zone_device *thermal;
        int error;
        u32 reg;
        bool ts_attached;
@@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        if (IS_ERR(hwmon))
                return PTR_ERR(hwmon);
 
-       devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
+       thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts,
+                                                      &sun4i_ts_tz_ops);
+       if (IS_ERR(thermal))
+               return PTR_ERR(thermal);
 
        writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
 
index 1dd47dd..34d31c7 100644 (file)
@@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface,
        int error;
 
        /* Check if we really have the right interface. */
-       iface_desc = &interface->altsetting[0];
+       iface_desc = interface->cur_altsetting;
        if (iface_desc->desc.bInterfaceClass != 0xFF)
                return -ENODEV;
 
index 568c523..483f7bc 100644 (file)
@@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
 {
        struct pci_dev *pdev = iommu->dev;
-       u64 val = 0xabcd, val2 = 0;
+       u64 val = 0xabcd, val2 = 0, save_reg = 0;
 
        if (!iommu_feature(iommu, FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
 
+       /* save the value to restore, if writable */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
+               goto pc_false;
+
        /* Check if the performance counters can be written to */
        if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
            (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
-           (val != val2)) {
-               pci_err(pdev, "Unable to write to IOMMU perf counter.\n");
-               amd_iommu_pc_present = false;
-               return;
-       }
+           (val != val2))
+               goto pc_false;
+
+       /* restore */
+       if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
+               goto pc_false;
 
        pci_info(pdev, "IOMMU performance counters supported\n");
 
        val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
        iommu->max_banks = (u8) ((val >> 12) & 0x3f);
        iommu->max_counters = (u8) ((val >> 7) & 0xf);
+
+       return;
+
+pc_false:
+       pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
+       amd_iommu_pc_present = false;
+       return;
 }
 
 static ssize_t amd_iommu_show_cap(struct device *dev,
index 1801f0a..932267f 100644 (file)
@@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
 
        spin_lock_irqsave(&device_domain_lock, flags);
        info = dev->archdata.iommu;
-       if (info)
+       if (info && info != DEFER_DEVICE_DOMAIN_INFO
+           && info != DUMMY_DEVICE_DOMAIN_INFO)
                __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
index b7e0ae1..e8922fa 100644 (file)
@@ -493,16 +493,17 @@ static int as3645a_parse_node(struct as3645a *flash,
                switch (id) {
                case AS_LED_FLASH:
                        flash->flash_node = child;
+                       fwnode_handle_get(child);
                        break;
                case AS_LED_INDICATOR:
                        flash->indicator_node = child;
+                       fwnode_handle_get(child);
                        break;
                default:
                        dev_warn(&flash->client->dev,
                                 "unknown LED %u encountered, ignoring\n", id);
                        break;
                }
-               fwnode_handle_get(child);
        }
 
        if (!flash->flash_node) {
index a5c73f3..2bf7459 100644 (file)
@@ -151,9 +151,14 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                struct gpio_led led = {};
                const char *state = NULL;
 
+               /*
+                * Acquire gpiod from DT with uninitialized label, which
+                * will be updated after LED class device is registered,
+                * Only then the final LED name is known.
+                */
                led.gpiod = devm_fwnode_get_gpiod_from_child(dev, NULL, child,
                                                             GPIOD_ASIS,
-                                                            led.name);
+                                                            NULL);
                if (IS_ERR(led.gpiod)) {
                        fwnode_handle_put(child);
                        return ERR_CAST(led.gpiod);
@@ -186,6 +191,9 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
                        fwnode_handle_put(child);
                        return ERR_PTR(ret);
                }
+               /* Set gpiod label to match the corresponding LED name. */
+               gpiod_set_consumer_name(led_dat->gpiod,
+                                       led_dat->cdev.dev->kobj.name);
                priv->num_leds++;
        }
 
index 0507c65..491268b 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // TI LM3532 LED driver
 // Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// http://www.ti.com/lit/ds/symlink/lm3532.pdf
 
 #include <linux/i2c.h>
 #include <linux/leds.h>
@@ -623,7 +624,7 @@ static int lm3532_parse_node(struct lm3532_data *priv)
 
                led->num_leds = fwnode_property_count_u32(child, "led-sources");
                if (led->num_leds > LM3532_MAX_LED_STRINGS) {
-                       dev_err(&priv->client->dev, "To many LED string defined\n");
+                       dev_err(&priv->client->dev, "Too many LED string defined\n");
                        continue;
                }
 
index 4c2d0b3..a0d4b72 100644 (file)
@@ -135,9 +135,16 @@ err_node_put:
        return rv;
 }
 
+static const struct of_device_id max77650_led_of_match[] = {
+       { .compatible = "maxim,max77650-led" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max77650_led_of_match);
+
 static struct platform_driver max77650_led_driver = {
        .driver = {
                .name = "max77650-led",
+               .of_match_table = max77650_led_of_match,
        },
        .probe = max77650_led_probe,
 };
index db5af83..b6447c1 100644 (file)
@@ -21,7 +21,6 @@ static void rb532_led_set(struct led_classdev *cdev,
 {
        if (brightness)
                set_latch_u5(LO_ULED, 0);
-
        else
                set_latch_u5(0, LO_ULED);
 }
index 718729c..3abcafe 100644 (file)
@@ -455,7 +455,7 @@ static void __exit pattern_trig_exit(void)
 module_init(pattern_trig_init);
 module_exit(pattern_trig_exit);
 
-MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com");
-MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org");
+MODULE_AUTHOR("Raphael Teysseyre <rteysseyre@gmail.com>");
+MODULE_AUTHOR("Baolin Wang <baolin.wang@linaro.org>");
 MODULE_DESCRIPTION("LED Pattern trigger");
 MODULE_LICENSE("GPL v2");
index 7bc9505..403ac44 100644 (file)
@@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
                if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
                        misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
-               if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50)
+               if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
                        clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
        }
 
index 1b1c26d..659a945 100644 (file)
@@ -3913,11 +3913,13 @@ int sdhci_setup_host(struct sdhci_host *host)
        if (host->ops->get_min_clock)
                mmc->f_min = host->ops->get_min_clock(host);
        else if (host->version >= SDHCI_SPEC_300) {
-               if (host->clk_mul) {
-                       mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
+               if (host->clk_mul)
                        max_clk = host->max_clk * host->clk_mul;
-               } else
-                       mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
+               /*
+                * Divided Clock Mode minimum clock rate is always less than
+                * Programmable Clock Mode minimum clock rate.
+                */
+               mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
        } else
                mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
index b8e897e..b8fe94f 100644 (file)
@@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
        writeb(val, host->ioaddr + reg);
 }
 
+static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+       int err = sdhci_execute_tuning(mmc, opcode);
+
+       if (err)
+               return err;
+       /*
+        * Tuning data remains in the buffer after tuning.
+        * Do a command and data reset to get rid of it
+        */
+       sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+
+       return 0;
+}
+
+static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+       int cmd_error = 0;
+       int data_error = 0;
+
+       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+               return intmask;
+
+       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+       return 0;
+}
+
 static struct sdhci_ops sdhci_am654_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = {
        .set_power = sdhci_am654_set_power,
        .set_clock = sdhci_am654_set_clock,
        .write_b = sdhci_am654_write_b,
+       .irq = sdhci_am654_cqhci_irq,
        .reset = sdhci_reset,
 };
 
 static const struct sdhci_pltfm_data sdhci_am654_pdata = {
        .ops = &sdhci_am654_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = {
        .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT,
 };
 
-static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
-{
-       int cmd_error = 0;
-       int data_error = 0;
-
-       if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
-               return intmask;
-
-       cqhci_irq(host->mmc, intmask, cmd_error, data_error);
-
-       return 0;
-}
-
 static struct sdhci_ops sdhci_j721e_8bit_ops = {
        .get_max_clock = sdhci_pltfm_clk_get_max_clock,
        .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
@@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
        .ops = &sdhci_j721e_8bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = {
 
 static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
        .ops = &sdhci_j721e_4bit_ops,
-       .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
-                 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
 };
 
@@ -549,6 +563,8 @@ static int sdhci_am654_probe(struct platform_device *pdev)
                goto pm_runtime_put;
        }
 
+       host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
+
        ret = sdhci_am654_init(host);
        if (ret)
                goto pm_runtime_put;
index 2e57122..2f5c287 100644 (file)
@@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work)
  */
 static void slcan_write_wakeup(struct tty_struct *tty)
 {
-       struct slcan *sl = tty->disc_data;
+       struct slcan *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 /* Send a can_frame to a TTY queue. */
@@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* Flush network side */
index 0139592..e50a153 100644 (file)
@@ -2157,8 +2157,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
                                  DMA_END_ADDR);
 
        /* Initialize Tx NAPI */
-       netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
-                      NAPI_POLL_WEIGHT);
+       netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
+                         NAPI_POLL_WEIGHT);
 }
 
 /* Initialize a RDMA ring */
index 58f89f6..97ff860 100644 (file)
@@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (!is_offload(adapter))
                        return -EOPNOTSUPP;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
                if (!(adapter->flags & FULL_INIT_DONE))
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
index ee3aab5..9d1f2f8 100644 (file)
@@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos)
 static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = seq_tab_get_idx(seq->private, *pos + 1);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
index e9e4500..1a16449 100644 (file)
@@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
 static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = l2t_get_idx(seq, *pos);
-       if (v)
-               ++*pos;
+       ++(*pos);
        return v;
 }
 
index 41c6fa2..e190187 100644 (file)
@@ -110,7 +110,7 @@ do {                                                                        \
 /* Interface Mode Register (IF_MODE) */
 
 #define IF_MODE_MASK           0x00000003 /* 30-31 Mask on i/f mode bits */
-#define IF_MODE_XGMII          0x00000000 /* 30-31 XGMII (10G) interface */
+#define IF_MODE_10G            0x00000000 /* 30-31 10G interface */
 #define IF_MODE_GMII           0x00000002 /* 30-31 GMII (1G) interface */
 #define IF_MODE_RGMII          0x00000004
 #define IF_MODE_RGMII_AUTO     0x00008000
@@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
        tmp = 0;
        switch (phy_if) {
        case PHY_INTERFACE_MODE_XGMII:
-               tmp |= IF_MODE_XGMII;
+               tmp |= IF_MODE_10G;
                break;
        default:
                tmp |= IF_MODE_GMII;
index e03b30c..c82c85e 100644 (file)
@@ -49,6 +49,7 @@ struct tgec_mdio_controller {
 struct mdio_fsl_priv {
        struct  tgec_mdio_controller __iomem *mdio_base;
        bool    is_little_endian;
+       bool    has_a011043;
 };
 
 static u32 xgmac_read32(void __iomem *regs,
@@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
                return ret;
 
        /* Return all Fs if nothing was there */
-       if (xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) {
+       if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+           !priv->has_a011043) {
                dev_err(&bus->dev,
                        "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
@@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
                                                       "little-endian");
 
+       priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
+                                                 "fsl,erratum-a011043");
+
        ret = of_mdiobus_register(bus, np);
        if (ret) {
                dev_err(&pdev->dev, "cannot register MDIO bus\n");
index d405503..45b90eb 100644 (file)
@@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
         */
        pba_size--;
        if (pba_num_size < (((u32)pba_size * 2) + 1)) {
-               hw_dbg(hw, "Buffer to small for PBA data.\n");
+               hw_dbg(hw, "Buffer too small for PBA data.\n");
                return I40E_ERR_PARAM;
        }
 
index 778dab1..f260dd9 100644 (file)
@@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
 
 struct tx_sync_info {
        u64 rcd_sn;
-       s32 sync_len;
+       u32 sync_len;
        int nr_frags;
        skb_frag_t frags[MAX_SKB_FRAGS];
 };
@@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval {
 
 static enum mlx5e_ktls_sync_retval
 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                u32 tcp_seq, struct tx_sync_info *info)
+                u32 tcp_seq, int datalen, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
        enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
+       bool ends_before;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
@@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto out;
        }
 
-       if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               ret = tls_record_is_start_marker(record) ?
-                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+       /* There are the following cases:
+        * 1. packet ends before start marker: bypass offload.
+        * 2. packet starts before start marker and ends after it: drop,
+        *    not supported, breaks contract with kernel.
+        * 3. packet ends before tls record info starts: drop,
+        *    this packet was already acknowledged and its record info
+        *    was released.
+        */
+       ends_before = before(tcp_seq + datalen, tls_record_start_seq(record));
+
+       if (unlikely(tls_record_is_start_marker(record))) {
+               ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
+               goto out;
+       } else if (ends_before) {
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        u8 num_wqebbs;
        int i = 0;
 
-       ret = tx_sync_info_get(priv_tx, seq, &info);
+       ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
        if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
                if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
                        stats->tls_skip_no_sync_data++;
@@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                goto err_out;
        }
 
-       if (unlikely(info.sync_len < 0)) {
-               if (likely(datalen <= -info.sync_len))
-                       return MLX5E_KTLS_SYNC_DONE;
-
-               stats->tls_drop_bypass_req++;
-               goto err_out;
-       }
-
        stats->tls_ooo++;
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
@@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
-       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
-
        for (; i < info.nr_frags; i++) {
                unsigned int orig_fsz, frag_offset = 0, n = 0;
                skb_frag_t *f = &info.frags[i];
@@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                enum mlx5e_ktls_sync_retval ret =
                        mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
 
-               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+               switch (ret) {
+               case MLX5E_KTLS_SYNC_DONE:
                        *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
-               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       break;
+               case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
+                       if (likely(!skb->decrypted))
+                               goto out;
+                       WARN_ON_ONCE(1);
+                       /* fall-through */
+               default: /* MLX5E_KTLS_SYNC_FAIL */
                        goto err_out;
-               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
-                       goto out;
+               }
        }
 
        priv_tx->expected_seq = seq + datalen;
index 4f184c7..915afb2 100644 (file)
@@ -4084,6 +4084,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
        u32 rate_mbps;
        int err;
 
+       vport_num = rpriv->rep->vport;
+       if (vport_num >= MLX5_VPORT_ECPF) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Ingress rate limit is supported only for Eswitch ports connected to VFs");
+               return -EOPNOTSUPP;
+       }
+
        esw = priv->mdev->priv.eswitch;
        /* rate is given in bytes/sec.
         * First convert to bits/sec and then round to the nearest mbit/secs.
@@ -4092,8 +4099,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
         * 1 mbit/sec.
         */
        rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
-       vport_num = rpriv->rep->vport;
-
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
index 05b13a1..5acf60b 100644 (file)
@@ -1931,8 +1931,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
        struct mlx5_vport *vport;
        int i;
 
-       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
                memset(&vport->info, 0, sizeof(vport->info));
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+       }
 }
 
 /* Public E-Switch API */
index a6d0b62..979f13b 100644 (file)
@@ -1172,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
                return -EINVAL;
        }
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
        if (err) {
@@ -2014,7 +2014,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
 
 int esw_offloads_enable(struct mlx5_eswitch *esw)
 {
-       int err;
+       struct mlx5_vport *vport;
+       int err, i;
 
        if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
            MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
@@ -2031,6 +2032,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_vport_metadata;
 
+       /* Representor will control the vport link state */
+       mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+               vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
        err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
        if (err)
                goto err_vports;
@@ -2060,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
 {
        int err, err1;
 
-       mlx5_eswitch_disable(esw, false);
+       mlx5_eswitch_disable(esw, true);
        err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
index cf7b8da..f554cfd 100644 (file)
@@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x101d) },                      /* ConnectX-6 Dx */
        { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF},   /* ConnectX Family mlx5Gen Virtual Function */
        { PCI_VDEVICE(MELLANOX, 0x101f) },                      /* ConnectX-6 LX */
+       { PCI_VDEVICE(MELLANOX, 0x1021) },                      /* ConnectX-7 */
        { PCI_VDEVICE(MELLANOX, 0xa2d2) },                      /* BlueField integrated ConnectX-5 network controller */
        { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF},   /* BlueField integrated ConnectX-5 network controller VF */
        { PCI_VDEVICE(MELLANOX, 0xa2d6) },                      /* BlueField-2 integrated ConnectX-6 Dx network controller */
index 51803ee..c7f10d4 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 /* Copyright (c) 2019 Mellanox Technologies. */
 
+#include <linux/smp.h>
 #include "dr_types.h"
 
 #define QUEUE_SIZE 128
@@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        if (!in)
                goto err_cqwq;
 
-       vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
+       vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev);
        err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
        if (err) {
                kvfree(in);
index b43275c..3abfc81 100644 (file)
@@ -379,7 +379,6 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
        if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
                list_for_each_entry(dst, &fte->node.children, node.list) {
                        enum mlx5_flow_destination_type type = dst->dest_attr.type;
-                       u32 id;
 
                        if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
                            num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
@@ -387,19 +386,10 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                                goto free_actions;
                        }
 
-                       switch (type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
-                               id = dst->dest_attr.counter_id;
+                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
 
-                               tmp_action =
-                                       mlx5dr_action_create_flow_counter(id);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               actions[num_actions++] = tmp_action;
-                               break;
+                       switch (type) {
                        case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
                                tmp_action = create_ft_action(domain, dst);
                                if (!tmp_action) {
@@ -432,6 +422,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
                }
        }
 
+       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       u32 id;
+
+                       if (dst->dest_attr.type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -ENOSPC;
+                               goto free_actions;
+                       }
+
+                       id = dst->dest_attr.counter_id;
+                       tmp_action =
+                               mlx5dr_action_create_flow_counter(id);
+                       if (!tmp_action) {
+                               err = -ENOMEM;
+                               goto free_actions;
+                       }
+
+                       fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                       actions[num_actions++] = tmp_action;
+               }
+       }
+
        params.match_sz = match_sz;
        params.match_buf = (u64 *)fte->val;
        if (num_term_actions == 1) {
index 150b3a1..3d3cca5 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/string.h>
 #include <linux/rhashtable.h>
 #include <linux/netdevice.h>
+#include <linux/mutex.h>
 #include <net/net_namespace.h>
 #include <net/tc_act/tc_vlan.h>
 
@@ -25,6 +26,7 @@ struct mlxsw_sp_acl {
        struct mlxsw_sp_fid *dummy_fid;
        struct rhashtable ruleset_ht;
        struct list_head rules;
+       struct mutex rules_lock; /* Protects rules list */
        struct {
                struct delayed_work dw;
                unsigned long interval; /* ms */
@@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
                        goto err_ruleset_block_bind;
        }
 
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        block->rule_count++;
        block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
        return 0;
@@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
 
        block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
        ruleset->ht_key.block->rule_count--;
+       mutex_lock(&mlxsw_sp->acl->rules_lock);
        list_del(&rule->list);
+       mutex_unlock(&mlxsw_sp->acl->rules_lock);
        if (!ruleset->ht_key.chain_index &&
            mlxsw_sp_acl_ruleset_is_singular(ruleset))
                mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset,
@@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
        struct mlxsw_sp_acl_rule *rule;
        int err;
 
-       /* Protect internal structures from changes */
-       rtnl_lock();
+       mutex_lock(&acl->rules_lock);
        list_for_each_entry(rule, &acl->rules, list) {
                err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
                                                        rule);
                if (err)
                        goto err_rule_update;
        }
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return 0;
 
 err_rule_update:
-       rtnl_unlock();
+       mutex_unlock(&acl->rules_lock);
        return err;
 }
 
@@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        acl->dummy_fid = fid;
 
        INIT_LIST_HEAD(&acl->rules);
+       mutex_init(&acl->rules_lock);
        err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam);
        if (err)
                goto err_acl_ops_init;
@@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
        return 0;
 
 err_acl_ops_init:
+       mutex_destroy(&acl->rules_lock);
        mlxsw_sp_fid_put(fid);
 err_fid_get:
        rhashtable_destroy(&acl->ruleset_ht);
@@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
 
        cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
        mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam);
+       mutex_destroy(&acl->rules_lock);
        WARN_ON(!list_empty(&acl->rules));
        mlxsw_sp_fid_put(acl->dummy_fid);
        rhashtable_destroy(&acl->ruleset_ht);
index fdebc85..31be3ba 100644 (file)
@@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev)
 
        netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
 
+       spin_lock_init(&lp->lock);
+
        for (i = 0; i < SONIC_NUM_RRS; i++) {
                struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
                if (skb == NULL) {
@@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev)
        return 0;
 }
 
+/* Wait for the SONIC to become idle. */
+static void sonic_quiesce(struct net_device *dev, u16 mask)
+{
+       struct sonic_local * __maybe_unused lp = netdev_priv(dev);
+       int i;
+       u16 bits;
+
+       for (i = 0; i < 1000; ++i) {
+               bits = SONIC_READ(SONIC_CMD) & mask;
+               if (!bits)
+                       return;
+               if (irqs_disabled() || in_interrupt())
+                       udelay(20);
+               else
+                       usleep_range(100, 200);
+       }
+       WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits);
+}
 
 /*
  * Close the SONIC device
@@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev)
        /*
         * stop the SONIC, disable interrupts
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
         * put the Sonic into software-reset mode and
         * disable all interrupts before releasing DMA buffers
         */
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       sonic_quiesce(dev, SONIC_CR_ALL);
+
        SONIC_WRITE(SONIC_IMR, 0);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
  *   wake the tx queue
  * Concurrently with all of this, the SONIC is potentially writing to
  * the status flags of the TDs.
- * Until some mutual exclusion is added, this code will not work with SMP. However,
- * MIPS Jazz machines and m68k Macs were all uni-processor machines.
  */
 
 static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
@@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        struct sonic_local *lp = netdev_priv(dev);
        dma_addr_t laddr;
        int length;
-       int entry = lp->next_tx;
+       int entry;
+       unsigned long flags;
 
        netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
 
@@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       spin_lock_irqsave(&lp->lock, flags);
+
+       entry = lp->next_tx;
+
        sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
        sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
        sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
@@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
        sonic_tda_put(dev, entry, SONIC_TD_LINK,
                sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
 
-       /*
-        * Must set tx_skb[entry] only after clearing status, and
-        * before clearing EOL and before stopping queue
-        */
        wmb();
        lp->tx_len[entry] = length;
        lp->tx_laddr[entry] = laddr;
@@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
 
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return NETDEV_TX_OK;
 }
 
@@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
        struct net_device *dev = dev_id;
        struct sonic_local *lp = netdev_priv(dev);
        int status;
+       unsigned long flags;
+
+       /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt()
+        * with sonic_send_packet() so that the two functions can share state.
+        * Secondly, it makes sonic_interrupt() re-entrant, as that is required
+        * by macsonic which must use two IRQs with different priority levels.
+        */
+       spin_lock_irqsave(&lp->lock, flags);
+
+       status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       if (!status) {
+               spin_unlock_irqrestore(&lp->lock, flags);
 
-       if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
                return IRQ_NONE;
+       }
 
        do {
+               SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */
+
                if (status & SONIC_INT_PKTRX) {
                        netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
                        sonic_rx(dev);  /* got packet(s) */
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
                }
 
                if (status & SONIC_INT_TXDN) {
@@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        int td_status;
                        int freed_some = 0;
 
-                       /* At this point, cur_tx is the index of a TD that is one of:
-                        *   unallocated/freed                          (status set   & tx_skb[entry] clear)
-                        *   allocated and sent                         (status set   & tx_skb[entry] set  )
-                        *   allocated and not yet sent                 (status clear & tx_skb[entry] set  )
-                        *   still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
+                       /* The state of a Transmit Descriptor may be inferred
+                        * from { tx_skb[entry], td_status } as follows.
+                        * { clear, clear } => the TD has never been used
+                        * { set,   clear } => the TD was handed to SONIC
+                        * { set,   set   } => the TD was handed back
+                        * { clear, set   } => the TD is available for re-use
                         */
 
                        netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
@@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                                if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
                                        break;
 
-                               if (td_status & 0x0001) {
+                               if (td_status & SONIC_TCR_PTX) {
                                        lp->stats.tx_packets++;
                                        lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
                                } else {
-                                       lp->stats.tx_errors++;
-                                       if (td_status & 0x0642)
+                                       if (td_status & (SONIC_TCR_EXD |
+                                           SONIC_TCR_EXC | SONIC_TCR_BCM))
                                                lp->stats.tx_aborted_errors++;
-                                       if (td_status & 0x0180)
+                                       if (td_status &
+                                           (SONIC_TCR_NCRS | SONIC_TCR_CRLS))
                                                lp->stats.tx_carrier_errors++;
-                                       if (td_status & 0x0020)
+                                       if (td_status & SONIC_TCR_OWC)
                                                lp->stats.tx_window_errors++;
-                                       if (td_status & 0x0004)
+                                       if (td_status & SONIC_TCR_FU)
                                                lp->stats.tx_fifo_errors++;
                                }
 
@@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        if (freed_some || lp->tx_skb[entry] == NULL)
                                netif_wake_queue(dev);  /* The ring is no longer full */
                        lp->cur_tx = entry;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
                }
 
                /*
@@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                if (status & SONIC_INT_RFO) {
                        netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
                                  __func__);
-                       lp->stats.rx_fifo_errors++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
                }
                if (status & SONIC_INT_RDE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
                }
                if (status & SONIC_INT_RBAE) {
                        netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
                                  __func__);
-                       lp->stats.rx_dropped++;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
                }
 
                /* counter overruns; all counters are 16bit wide */
-               if (status & SONIC_INT_FAE) {
+               if (status & SONIC_INT_FAE)
                        lp->stats.rx_frame_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_CRC) {
+               if (status & SONIC_INT_CRC)
                        lp->stats.rx_crc_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
-               }
-               if (status & SONIC_INT_MP) {
+               if (status & SONIC_INT_MP)
                        lp->stats.rx_missed_errors += 65536;
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
-               }
 
                /* transmit error */
                if (status & SONIC_INT_TXER) {
-                       if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
-                               netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
-                                         __func__);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
+                       u16 tcr = SONIC_READ(SONIC_TCR);
+
+                       netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n",
+                                 __func__, tcr);
+
+                       if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC |
+                                  SONIC_TCR_FU | SONIC_TCR_BCM)) {
+                               /* Aborted transmission. Try again. */
+                               netif_stop_queue(dev);
+                               SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
+                       }
                }
 
                /* bus retry */
@@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
                        /* ... to help debug DMA problems causing endless interrupts. */
                        /* Bounce the eth interface to turn on the interrupt again. */
                        SONIC_WRITE(SONIC_IMR, 0);
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
                }
 
-               /* load CAM done */
-               if (status & SONIC_INT_LCD)
-                       SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
-       } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
+               status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT;
+       } while (status);
+
+       spin_unlock_irqrestore(&lp->lock, flags);
+
        return IRQ_HANDLED;
 }
 
+/* Return the array index corresponding to a given Receive Buffer pointer. */
+static int index_from_addr(struct sonic_local *lp, dma_addr_t addr,
+                          unsigned int last)
+{
+       unsigned int i = last;
+
+       do {
+               i = (i + 1) & SONIC_RRS_MASK;
+               if (addr == lp->rx_laddr[i])
+                       return i;
+       } while (i != last);
+
+       return -ENOENT;
+}
+
+/* Allocate and map a new skb to be used as a receive buffer. */
+static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
+                          struct sk_buff **new_skb, dma_addr_t *new_addr)
+{
+       *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
+       if (!*new_skb)
+               return false;
+
+       if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
+               skb_reserve(*new_skb, 2);
+
+       *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
+                                  SONIC_RBSIZE, DMA_FROM_DEVICE);
+       if (!*new_addr) {
+               dev_kfree_skb(*new_skb);
+               *new_skb = NULL;
+               return false;
+       }
+
+       return true;
+}
+
+/* Place a new receive resource in the Receive Resource Area and update RWP. */
+static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp,
+                            dma_addr_t old_addr, dma_addr_t new_addr)
+{
+       unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP));
+       unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP));
+       u32 buf;
+
+       /* The resources in the range [RRP, RWP) belong to the SONIC. This loop
+        * scans the other resources in the RRA, those in the range [RWP, RRP).
+        */
+       do {
+               buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) |
+                     sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L);
+
+               if (buf == old_addr)
+                       break;
+
+               entry = (entry + 1) & SONIC_RRS_MASK;
+       } while (entry != end);
+
+       WARN_ONCE(buf != old_addr, "failed to find resource!\n");
+
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16);
+       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff);
+
+       entry = (entry + 1) & SONIC_RRS_MASK;
+
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry));
+}
+
 /*
  * We have a good packet(s), pass it/them up the network stack.
  */
 static void sonic_rx(struct net_device *dev)
 {
        struct sonic_local *lp = netdev_priv(dev);
-       int status;
        int entry = lp->cur_rx;
+       int prev_entry = lp->eol_rx;
+       bool rbe = false;
 
        while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
-               struct sk_buff *used_skb;
-               struct sk_buff *new_skb;
-               dma_addr_t new_laddr;
-               u16 bufadr_l;
-               u16 bufadr_h;
-               int pkt_len;
-
-               status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
-               if (status & SONIC_RCR_PRX) {
-                       /* Malloc up new buffer. */
-                       new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
-                       if (new_skb == NULL) {
-                               lp->stats.rx_dropped++;
+               u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
+
+               /* If the RD has LPKT set, the chip has finished with the RB */
+               if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) {
+                       struct sk_buff *new_skb;
+                       dma_addr_t new_laddr;
+                       u32 addr = (sonic_rda_get(dev, entry,
+                                                 SONIC_RD_PKTPTR_H) << 16) |
+                                  sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L);
+                       int i = index_from_addr(lp, addr, entry);
+
+                       if (i < 0) {
+                               WARN_ONCE(1, "failed to find buffer!\n");
                                break;
                        }
-                       /* provide 16 byte IP header alignment unless DMA requires otherwise */
-                       if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
-                               skb_reserve(new_skb, 2);
-
-                       new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
-                                              SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       if (!new_laddr) {
-                               dev_kfree_skb(new_skb);
-                               printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
+
+                       if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) {
+                               struct sk_buff *used_skb = lp->rx_skb[i];
+                               int pkt_len;
+
+                               /* Pass the used buffer up the stack */
+                               dma_unmap_single(lp->device, addr, SONIC_RBSIZE,
+                                                DMA_FROM_DEVICE);
+
+                               pkt_len = sonic_rda_get(dev, entry,
+                                                       SONIC_RD_PKTLEN);
+                               skb_trim(used_skb, pkt_len);
+                               used_skb->protocol = eth_type_trans(used_skb,
+                                                                   dev);
+                               netif_rx(used_skb);
+                               lp->stats.rx_packets++;
+                               lp->stats.rx_bytes += pkt_len;
+
+                               lp->rx_skb[i] = new_skb;
+                               lp->rx_laddr[i] = new_laddr;
+                       } else {
+                               /* Failed to obtain a new buffer so re-use it */
+                               new_laddr = addr;
                                lp->stats.rx_dropped++;
-                               break;
                        }
-
-                       /* now we have a new skb to replace it, pass the used one up the stack */
-                       dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
-                       used_skb = lp->rx_skb[entry];
-                       pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
-                       skb_trim(used_skb, pkt_len);
-                       used_skb->protocol = eth_type_trans(used_skb, dev);
-                       netif_rx(used_skb);
-                       lp->stats.rx_packets++;
-                       lp->stats.rx_bytes += pkt_len;
-
-                       /* and insert the new skb */
-                       lp->rx_laddr[entry] = new_laddr;
-                       lp->rx_skb[entry] = new_skb;
-
-                       bufadr_l = (unsigned long)new_laddr & 0xffff;
-                       bufadr_h = (unsigned long)new_laddr >> 16;
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
-                       sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
-               } else {
-                       /* This should only happen, if we enable accepting broken packets. */
-                       lp->stats.rx_errors++;
-                       if (status & SONIC_RCR_FAER)
-                               lp->stats.rx_frame_errors++;
-                       if (status & SONIC_RCR_CRCR)
-                               lp->stats.rx_crc_errors++;
-               }
-               if (status & SONIC_RCR_LPKT) {
-                       /*
-                        * this was the last packet out of the current receive buffer
-                        * give the buffer back to the SONIC
+                       /* If RBE is already asserted when RWP advances then
+                        * it's safe to clear RBE after processing this packet.
                         */
-                       lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
-                       if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
-                       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
-                       if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
-                               netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
-                                         __func__);
-                               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
-                       }
-               } else
-                       printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
-                            dev->name);
+                       rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE;
+                       sonic_update_rra(dev, lp, addr, new_laddr);
+               }
                /*
                 * give back the descriptor
                 */
-               sonic_rda_put(dev, entry, SONIC_RD_LINK,
-                       sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
+               sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0);
                sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
-               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
-                       sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
-               lp->eol_rx = entry;
-               lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
+
+               prev_entry = entry;
+               entry = (entry + 1) & SONIC_RDS_MASK;
+       }
+
+       lp->cur_rx = entry;
+
+       if (prev_entry != lp->eol_rx) {
+               /* Advance the EOL flag to put descriptors back into service */
+               sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL |
+                             sonic_rda_get(dev, prev_entry, SONIC_RD_LINK));
+               sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL &
+                             sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK));
+               lp->eol_rx = prev_entry;
        }
+
+       if (rbe)
+               SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE);
        /*
         * If any worth-while packets have been received, netif_rx()
         * has done a mark_bh(NET_BH) for us and will work on them
@@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev)
                    (netdev_mc_count(dev) > 15)) {
                        rcr |= SONIC_RCR_AMC;
                } else {
+                       unsigned long flags;
+
                        netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
                                  netdev_mc_count(dev));
                        sonic_set_cam_enable(dev, 1);  /* always enable our own address */
@@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev)
                                i++;
                        }
                        SONIC_WRITE(SONIC_CDC, 16);
-                       /* issue Load CAM command */
                        SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
+
+                       /* LCAM and TXP commands can't be used simultaneously */
+                       spin_lock_irqsave(&lp->lock, flags);
+                       sonic_quiesce(dev, SONIC_CR_TXP);
                        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
+                       sonic_quiesce(dev, SONIC_CR_LCAM);
+                       spin_unlock_irqrestore(&lp->lock, flags);
                }
        }
 
@@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev)
  */
 static int sonic_init(struct net_device *dev)
 {
-       unsigned int cmd;
        struct sonic_local *lp = netdev_priv(dev);
        int i;
 
@@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev)
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
 
+       /* While in reset mode, clear CAM Enable register */
+       SONIC_WRITE(SONIC_CE, 0);
+
        /*
         * clear software reset flag, disable receiver, clear and
         * enable interrupts, then completely initialize the SONIC
         */
        SONIC_WRITE(SONIC_CMD, 0);
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
+       sonic_quiesce(dev, SONIC_CR_ALL);
 
        /*
         * initialize the receive resource area
@@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev)
        }
 
        /* initialize all RRA registers */
-       lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-       lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
-                                       SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
-
-       SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_REA, lp->rra_end);
-       SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
-       SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
+       SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS));
+       SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0));
+       SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1));
        SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
        SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
 
@@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev)
        netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
 
        SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
-                       break;
-       }
-
-       netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), i);
+       sonic_quiesce(dev, SONIC_CR_RRRA);
 
        /*
         * Initialize the receive descriptors so that they
@@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev)
         * load the CAM
         */
        SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
-
-       i = 0;
-       while (i++ < 100) {
-               if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
-                       break;
-       }
-       netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
-                 SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
+       sonic_quiesce(dev, SONIC_CR_LCAM);
 
        /*
         * enable receiver, disable loopback
         * and enable all interrupts
         */
-       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
        SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
        SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
        SONIC_WRITE(SONIC_ISR, 0x7fff);
        SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
-
-       cmd = SONIC_READ(SONIC_CMD);
-       if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
-               printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
+       SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN);
 
        netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
                  SONIC_READ(SONIC_CMD));
index f154448..e0e4cba 100644 (file)
 #define SONIC_CR_TXP            0x0002
 #define SONIC_CR_HTX            0x0001
 
+#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \
+                     SONIC_CR_RXEN | SONIC_CR_TXP)
+
 /*
  * SONIC data configuration bits
  */
 #define SONIC_TCR_NCRS          0x0100
 #define SONIC_TCR_CRLS          0x0080
 #define SONIC_TCR_EXC           0x0040
+#define SONIC_TCR_OWC           0x0020
 #define SONIC_TCR_PMB           0x0008
 #define SONIC_TCR_FU            0x0004
 #define SONIC_TCR_BCM           0x0002
 #define SONIC_NUM_RDS   SONIC_NUM_RRS /* number of receive descriptors */
 #define SONIC_NUM_TDS   16            /* number of transmit descriptors */
 
-#define SONIC_RDS_MASK  (SONIC_NUM_RDS-1)
-#define SONIC_TDS_MASK  (SONIC_NUM_TDS-1)
+#define SONIC_RRS_MASK  (SONIC_NUM_RRS - 1)
+#define SONIC_RDS_MASK  (SONIC_NUM_RDS - 1)
+#define SONIC_TDS_MASK  (SONIC_NUM_TDS - 1)
 
 #define SONIC_RBSIZE   1520          /* size of one resource buffer */
 
@@ -312,8 +317,6 @@ struct sonic_local {
        u32 rda_laddr;              /* logical DMA address of RDA */
        dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */
        dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */
-       unsigned int rra_end;
-       unsigned int cur_rwp;
        unsigned int cur_rx;
        unsigned int cur_tx;           /* first unacked transmit packet */
        unsigned int eol_rx;
@@ -322,6 +325,7 @@ struct sonic_local {
        int msg_enable;
        struct device *device;         /* generic device */
        struct net_device_stats stats;
+       spinlock_t lock;
 };
 
 #define TX_TIMEOUT (3 * HZ)
@@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev);
    as far as we can tell. */
 /* OpenBSD calls this "SWO".  I'd like to think that sonic_buf_put()
    is a much better name. */
-static inline void sonic_buf_put(void* base, int bitmode,
+static inline void sonic_buf_put(u16 *base, int bitmode,
                                 int offset, __u16 val)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               ((__u16 *) base + (offset*2))[1] = val;
+               __raw_writew(val, base + (offset * 2) + 1);
 #else
-               ((__u16 *) base + (offset*2))[0] = val;
+               __raw_writew(val, base + (offset * 2) + 0);
 #endif
        else
-               ((__u16 *) base)[offset] = val;
+               __raw_writew(val, base + (offset * 1) + 0);
 }
 
-static inline __u16 sonic_buf_get(void* base, int bitmode,
+static inline __u16 sonic_buf_get(u16 *base, int bitmode,
                                  int offset)
 {
        if (bitmode)
 #ifdef __BIG_ENDIAN
-               return ((volatile __u16 *) base + (offset*2))[1];
+               return __raw_readw(base + (offset * 2) + 1);
 #else
-               return ((volatile __u16 *) base + (offset*2))[0];
+               return __raw_readw(base + (offset * 2) + 0);
 #endif
        else
-               return ((volatile __u16 *) base)[offset];
+               return __raw_readw(base + (offset * 1) + 0);
 }
 
 /* Inlines that you should actually use for reading/writing DMA buffers */
@@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
                             (entry * SIZEOF_SONIC_RR) + offset);
 }
 
+static inline u16 sonic_rr_addr(struct net_device *dev, int entry)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return lp->rra_laddr +
+              entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
+}
+
+static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr)
+{
+       struct sonic_local *lp = netdev_priv(dev);
+
+       return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR *
+                                             SONIC_BUS_SCALE(lp->dma_bitmode));
+}
+
 static const char version[] =
     "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n";
 
index a496390..07f9067 100644 (file)
@@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
                        break;
                }
                entry += p_hdr->size;
+               cond_resched();
        }
        p_dev->ahw->reset.seq_index = index;
 }
index afa10a1..f34ae8c 100644 (file)
@@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
                addr += 16;
                reg_read -= 16;
                ret += 16;
+               cond_resched();
        }
 out:
        mutex_unlock(&adapter->ahw->mem_lock);
@@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                buf_offset += entry->hdr.cap_size;
                entry_offset += entry->hdr.offset;
                buffer = fw_dump->data + buf_offset;
+               cond_resched();
        }
 
        fw_dump->clr = 1;
index 4775f49..d10ac54 100644 (file)
@@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
                *mac = NULL;
        }
 
-       rc = of_get_phy_mode(np, &plat->phy_interface);
-       if (rc)
-               return ERR_PTR(rc);
+       plat->phy_interface = device_get_phy_mode(&pdev->dev);
+       if (plat->phy_interface < 0)
+               return ERR_PTR(plat->phy_interface);
 
        plat->interface = stmmac_of_get_mac_mode(np);
        if (plat->interface < 0)
index bc85db8..7032a24 100644 (file)
@@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
                return NULL;
        }
 
-       if (sock->sk->sk_protocol != IPPROTO_UDP) {
+       sk = sock->sk;
+       if (sk->sk_protocol != IPPROTO_UDP ||
+           sk->sk_type != SOCK_DGRAM ||
+           (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
                pr_debug("socket fd=%d not UDP\n", fd);
                sk = ERR_PTR(-EINVAL);
                goto out_sock;
        }
 
-       lock_sock(sock->sk);
-       if (sock->sk->sk_user_data) {
+       lock_sock(sk);
+       if (sk->sk_user_data) {
                sk = ERR_PTR(-EBUSY);
                goto out_rel_sock;
        }
 
-       sk = sock->sk;
        sock_hold(sk);
 
        tuncfg.sk_user_data = gtp;
index 317d3a8..6f4d7ba 100644 (file)
@@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work)
  */
 static void slip_write_wakeup(struct tty_struct *tty)
 {
-       struct slip *sl = tty->disc_data;
+       struct slip *sl;
+
+       rcu_read_lock();
+       sl = rcu_dereference(tty->disc_data);
+       if (!sl)
+               goto out;
 
        schedule_work(&sl->tx_work);
+out:
+       rcu_read_unlock();
 }
 
 static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty)
                return;
 
        spin_lock_bh(&sl->lock);
-       tty->disc_data = NULL;
+       rcu_assign_pointer(tty->disc_data, NULL);
        sl->tty = NULL;
        spin_unlock_bh(&sl->lock);
 
+       synchronize_rcu();
        flush_work(&sl->tx_work);
 
        /* VSV = very important to remove timers */
index 3a5a6c6..650c937 100644 (file)
@@ -1936,6 +1936,10 @@ drop:
                        if (ret != XDP_PASS) {
                                rcu_read_unlock();
                                local_bh_enable();
+                               if (frags) {
+                                       tfile->napi.skb = NULL;
+                                       mutex_unlock(&tfile->napi_mutex);
+                               }
                                return total_len;
                        }
                }
index d2d61f0..eccbf4c 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mdio.h>
 #include <linux/phy.h>
 #include <net/ip6_checksum.h>
+#include <net/vxlan.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
@@ -3660,6 +3661,19 @@ static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
        tasklet_schedule(&dev->bh);
 }
 
+static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
+                                               struct net_device *netdev,
+                                               netdev_features_t features)
+{
+       if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE)
+               features &= ~NETIF_F_GSO_MASK;
+
+       features = vlan_features_check(skb, features);
+       features = vxlan_features_check(skb, features);
+
+       return features;
+}
+
 static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_open               = lan78xx_open,
        .ndo_stop               = lan78xx_stop,
@@ -3673,6 +3687,7 @@ static const struct net_device_ops lan78xx_netdev_ops = {
        .ndo_set_features       = lan78xx_set_features,
        .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
+       .ndo_features_check     = lan78xx_features_check,
 };
 
 static void lan78xx_stat_monitor(struct timer_list *t)
index 3605128..e8cd8c0 100644 (file)
@@ -31,7 +31,7 @@
 #define NETNEXT_VERSION                "11"
 
 /* Information for net */
-#define NET_VERSION            "10"
+#define NET_VERSION            "11"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -68,6 +68,7 @@
 #define PLA_LED_FEATURE                0xdd92
 #define PLA_PHYAR              0xde00
 #define PLA_BOOT_CTRL          0xe004
+#define PLA_LWAKE_CTRL_REG     0xe007
 #define PLA_GPHY_INTR_IMR      0xe022
 #define PLA_EEE_CR             0xe040
 #define PLA_EEEP_CR            0xe080
@@ -95,6 +96,7 @@
 #define PLA_TALLYCNT           0xe890
 #define PLA_SFF_STS_7          0xe8de
 #define PLA_PHYSTATUS          0xe908
+#define PLA_CONFIG6            0xe90a /* CONFIG6 */
 #define PLA_BP_BA              0xfc26
 #define PLA_BP_0               0xfc28
 #define PLA_BP_1               0xfc2a
 #define PLA_BP_EN              0xfc38
 
 #define USB_USB2PHY            0xb41e
+#define USB_SSPHYLINK1         0xb426
 #define USB_SSPHYLINK2         0xb428
 #define USB_U2P3_CTRL          0xb460
 #define USB_CSR_DUMMY1         0xb464
 #define LINK_ON_WAKE_EN                0x0010
 #define LINK_OFF_WAKE_EN       0x0008
 
+/* PLA_CONFIG6 */
+#define LANWAKE_CLR_EN         BIT(0)
+
 /* PLA_CONFIG5 */
 #define BWF_EN                 0x0040
 #define MWF_EN                 0x0020
 /* PLA_PHY_PWR */
 #define TX_10M_IDLE_EN         0x0080
 #define PFM_PWM_SWITCH         0x0040
+#define TEST_IO_OFF            BIT(4)
 
 /* PLA_MAC_PWR_CTRL */
 #define D3_CLK_GATED_EN                0x00004000
 #define MAC_CLK_SPDWN_EN       BIT(15)
 
 /* PLA_MAC_PWR_CTRL3 */
+#define PLA_MCU_SPDWN_EN       BIT(14)
 #define PKT_AVAIL_SPDWN_EN     0x0100
 #define SUSPEND_SPDWN_EN       0x0004
 #define U1U2_SPDWN_EN          0x0002
 /* PLA_BOOT_CTRL */
 #define AUTOLOAD_DONE          0x0002
 
+/* PLA_LWAKE_CTRL_REG */
+#define LANWAKE_PIN            BIT(7)
+
 /* PLA_SUSPEND_FLAG */
 #define LINK_CHG_EVENT         BIT(0)
 
 #define DEBUG_LTSSM            0x0082
 
 /* PLA_EXTRA_STATUS */
+#define CUR_LINK_OK            BIT(15)
 #define U3P3_CHECK_EN          BIT(7)  /* RTL_VER_05 only */
 #define LINK_CHANGE_FLAG       BIT(8)
+#define POLL_LINK_CHG          BIT(0)
 
 /* USB_USB2PHY */
 #define USB2PHY_SUSPEND                0x0001
 #define USB2PHY_L1             0x0002
 
+/* USB_SSPHYLINK1 */
+#define DELAY_PHY_PWR_CHG      BIT(1)
+
 /* USB_SSPHYLINK2 */
 #define pwd_dn_scale_mask      0x3ffe
 #define pwd_dn_scale(x)                ((x) << 1)
@@ -2861,6 +2877,17 @@ static int rtl8153_enable(struct r8152 *tp)
        r8153_set_rx_early_timeout(tp);
        r8153_set_rx_early_size(tp);
 
+       if (tp->version == RTL_VER_09) {
+               u32 ocp_data;
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK);
+               ocp_data &= ~FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+               usleep_range(1000, 2000);
+               ocp_data |= FC_PATCH_TASK;
+               ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data);
+       }
+
        return rtl_enable(tp);
 }
 
@@ -3374,8 +3401,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable)
                r8153b_ups_en(tp, false);
                r8153_queue_wake(tp, false);
                rtl_runtime_suspend_enable(tp, false);
-               r8153_u2p3en(tp, true);
-               r8153b_u1u2en(tp, true);
+               if (tp->udev->speed != USB_SPEED_HIGH)
+                       r8153b_u1u2en(tp, true);
        }
 }
 
@@ -4673,7 +4700,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
 
        r8153_aldps_en(tp, true);
        r8152b_enable_fc(tp);
-       r8153_u2p3en(tp, true);
 
        set_bit(PHY_RESET, &tp->flags);
 }
@@ -4952,6 +4978,8 @@ static void rtl8152_down(struct r8152 *tp)
 
 static void rtl8153_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -4959,6 +4987,19 @@ static void rtl8153_up(struct r8152 *tp)
        r8153_u2p3en(tp, false);
        r8153_aldps_en(tp, false);
        r8153_first_init(tp);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1);
+       ocp_data &= ~DELAY_PHY_PWR_CHG;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data);
+
        r8153_aldps_en(tp, true);
 
        switch (tp->version) {
@@ -4977,11 +5018,17 @@ static void rtl8153_up(struct r8152 *tp)
 
 static void rtl8153_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data &= ~LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
        r8153_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153_power_cut_en(tp, false);
@@ -4992,6 +5039,8 @@ static void rtl8153_down(struct r8152 *tp)
 
 static void rtl8153b_up(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
@@ -5002,18 +5051,29 @@ static void rtl8153b_up(struct r8152 *tp)
        r8153_first_init(tp);
        ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153_aldps_en(tp, true);
-       r8153_u2p3en(tp, true);
-       r8153b_u1u2en(tp, true);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
 }
 
 static void rtl8153b_down(struct r8152 *tp)
 {
+       u32 ocp_data;
+
        if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
                rtl_drop_queued_tx(tp);
                return;
        }
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data |= PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
        r8153b_u1u2en(tp, false);
        r8153_u2p3en(tp, false);
        r8153b_power_cut_en(tp, false);
@@ -5385,6 +5445,16 @@ static void r8153_init(struct r8152 *tp)
                else
                        ocp_data |= DYNAMIC_BURST;
                ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+
+               r8153_queue_wake(tp, false);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+               if (rtl8152_get_speed(tp) & LINK_STATUS)
+                       ocp_data |= CUR_LINK_OK;
+               else
+                       ocp_data &= ~CUR_LINK_OK;
+               ocp_data |= POLL_LINK_CHG;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
        }
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -5414,10 +5484,19 @@ static void r8153_init(struct r8152 *tp)
        ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001);
 
        r8153_power_cut_en(tp, false);
+       rtl_runtime_suspend_enable(tp, false);
        r8153_u1u2en(tp, true);
        r8153_mac_clk_spd(tp, false);
        usb_enable_lpm(tp->udev);
 
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6);
+       ocp_data |= LANWAKE_CLR_EN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG);
+       ocp_data &= ~LANWAKE_PIN;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data);
+
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
        ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
@@ -5482,7 +5561,17 @@ static void r8153b_init(struct r8152 *tp)
        r8153b_ups_en(tp, false);
        r8153_queue_wake(tp, false);
        rtl_runtime_suspend_enable(tp, false);
-       r8153b_u1u2en(tp, true);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS);
+       if (rtl8152_get_speed(tp) & LINK_STATUS)
+               ocp_data |= CUR_LINK_OK;
+       else
+               ocp_data &= ~CUR_LINK_OK;
+       ocp_data |= POLL_LINK_CHG;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data);
+
+       if (tp->udev->speed != USB_SPEED_HIGH)
+               r8153b_u1u2en(tp, true);
        usb_enable_lpm(tp->udev);
 
        /* MAC clock speed down */
@@ -5490,6 +5579,19 @@ static void r8153b_init(struct r8152 *tp)
        ocp_data |= MAC_CLK_SPDWN_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data);
 
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3);
+       ocp_data &= ~PLA_MCU_SPDWN_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data);
+
+       if (tp->version == RTL_VER_09) {
+               /* Disable Test IO for 32QFN */
+               if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) {
+                       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
+                       ocp_data |= TEST_IO_OFF;
+                       ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
+               }
+       }
+
        set_bit(GREEN_ETHERNET, &tp->flags);
 
        /* rx aggregation */
@@ -6705,6 +6807,11 @@ static int rtl8152_probe(struct usb_interface *intf,
 
        intf->needs_remote_wakeup = 1;
 
+       if (!rtl_can_wakeup(tp))
+               __rtl_set_wol(tp, 0);
+       else
+               tp->saved_wolopts = __rtl_get_wol(tp);
+
        tp->rtl_ops.init(tp);
 #if IS_BUILTIN(CONFIG_USB_RTL8152)
        /* Retry in case request_firmware() is not ready yet. */
@@ -6722,10 +6829,6 @@ static int rtl8152_probe(struct usb_interface *intf,
                goto out1;
        }
 
-       if (!rtl_can_wakeup(tp))
-               __rtl_set_wol(tp, 0);
-
-       tp->saved_wolopts = __rtl_get_wol(tp);
        if (tp->saved_wolopts)
                device_set_wakeup_enable(&udev->dev, true);
        else
index f43c065..c4c8f1b 100644 (file)
@@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
        case AIROGVLIST:    ridcode = RID_APLIST;       break;
        case AIROGDRVNAM:   ridcode = RID_DRVNAME;      break;
        case AIROGEHTENC:   ridcode = RID_ETHERENCAP;   break;
-       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
-       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;
-               /* Only super-user can read WEP keys */
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               break;
+       case AIROGWEPKTMP:  ridcode = RID_WEP_TEMP;     break;
+       case AIROGWEPKNV:   ridcode = RID_WEP_PERM;     break;
        case AIROGSTAT:     ridcode = RID_STATUS;       break;
        case AIROGSTATSD32: ridcode = RID_STATSDELTA;   break;
        case AIROGSTATSC32: ridcode = RID_STATS;        break;
@@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
                return -EINVAL;
        }
 
-       if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
+       if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) {
+               /* Only super-user can read WEP keys */
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+       }
+
+       if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
                return -ENOMEM;
 
        PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
index cd73fc5..fd45483 100644 (file)
@@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_station_priv *sta_priv = NULL;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        __le16 fc;
        u8 hdr_len;
@@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        if (unlikely(!dev_cmd))
                goto drop_unlock_priv;
 
-       memset(dev_cmd, 0, sizeof(*dev_cmd));
        dev_cmd->hdr.cmd = REPLY_TX;
        tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
 
index 40fe2d6..48d375a 100644 (file)
@@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
 {
        union acpi_object *wifi_pkg, *data;
        bool enabled;
-       int i, n_profiles, tbl_rev;
-       int  ret = 0;
+       int i, n_profiles, tbl_rev, pos;
+       int ret = 0;
 
        data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
        if (IS_ERR(data))
@@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
                goto out_free;
        }
 
-       for (i = 0; i < n_profiles; i++) {
-               /* the tables start at element 3 */
-               int pos = 3;
+       /* the tables start at element 3 */
+       pos = 3;
 
+       for (i = 0; i < n_profiles; i++) {
                /* The EWRD profiles officially go from 2 to 4, but we
                 * save them in sar_profiles[1-3] (because we don't
                 * have profile 0).  So in the array we start from 1.
index ed90dd1..4c60f99 100644 (file)
@@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
 {
        int ret = 0;
 
-       /* if the FW crashed or not debug monitor cfg was given, there is
-        * no point in changing the recording state
-        */
-       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
-           (!fwrt->trans->dbg.dest_tlv &&
-            fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
+       if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
                return 0;
 
        if (fw_has_capa(&fwrt->fw->ucode_capa,
index 92d9898..c2f7252 100644 (file)
@@ -379,7 +379,7 @@ enum {
 
 
 /* CSR GIO */
-#define CSR_GIO_REG_VAL_L0S_ENABLED    (0x00000002)
+#define CSR_GIO_REG_VAL_L0S_DISABLED   (0x00000002)
 
 /*
  * UCODE-DRIVER GP (general purpose) mailbox register 1
index f266647..ce8f248 100644 (file)
@@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
        if (!frag || frag->size || !pages)
                return -EIO;
 
-       while (pages) {
+       /*
+        * We try to allocate as many pages as we can, starting with
+        * the requested amount and going down until we can allocate
+        * something.  Because of DIV_ROUND_UP(), pages will never go
+        * down to 0 and stop the loop, so stop when pages reaches 1,
+        * which is too small anyway.
+        */
+       while (pages > 1) {
                block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
                                           &physical,
                                           GFP_KERNEL | __GFP_NOWARN);
index 4096ccf..bc8c959 100644 (file)
@@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling,
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
 
-module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
-MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
-
 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
 MODULE_PARM_DESC(uapsd_disable,
                 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
index ebea3f3..82e5cac 100644 (file)
@@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
  * @nvm_file: specifies a external NVM file
  * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
  *     IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
- * @lar_disable: disable LAR (regulatory), default = 0
  * @fw_monitor: allow to use firmware monitor
  * @disable_11ac: disable VHT capabilities, default = false.
  * @remove_when_gone: remove an inaccessible device from the PCIe bus.
@@ -136,7 +135,6 @@ struct iwl_mod_params {
        int antenna_coupling;
        char *nvm_file;
        u32 uapsd_disable;
-       bool lar_disable;
        bool fw_monitor;
        bool disable_11ac;
        /**
index 1e240a2..d4f834b 100644 (file)
@@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
        NVM_CHANNEL_DC_HIGH             = BIT(12),
 };
 
+/**
+ * enum iwl_reg_capa_flags - global flags applied for the whole regulatory
+ * domain.
+ * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     2.4Ghz band is allowed.
+ * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ *     5Ghz band is allowed.
+ * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
+ *     for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
+ */
+enum iwl_reg_capa_flags {
+       REG_CAPA_BF_CCD_LOW_BAND        = BIT(0),
+       REG_CAPA_BF_CCD_HIGH_BAND       = BIT(1),
+       REG_CAPA_160MHZ_ALLOWED         = BIT(2),
+       REG_CAPA_80MHZ_ALLOWED          = BIT(3),
+       REG_CAPA_MCS_8_ALLOWED          = BIT(4),
+       REG_CAPA_MCS_9_ALLOWED          = BIT(5),
+       REG_CAPA_40MHZ_FORBIDDEN        = BIT(7),
+       REG_CAPA_DC_HIGH_ENABLED        = BIT(9),
+};
+
 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
                                               int chan, u32 flags)
 {
@@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+                  u8 tx_chains, u8 rx_chains)
 {
        struct iwl_nvm_data *data;
        bool lar_enabled;
@@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                return NULL;
        }
 
-       if (lar_fw_supported && lar_enabled)
+       if (lar_enabled &&
+           fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
 
        if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
@@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
 
 static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
                                       int ch_idx, u16 nvm_flags,
+                                      u16 cap_flags,
                                       const struct iwl_cfg *cfg)
 {
        u32 flags = NL80211_RRF_NO_HT40;
@@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
            (flags & NL80211_RRF_NO_IR))
                flags |= NL80211_RRF_GO_CONCURRENT;
 
+       /*
+        * cap_flags is per regulatory domain so apply it for every channel
+        */
+       if (ch_idx >= NUM_2GHZ_CHANNELS) {
+               if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+                       flags |= NL80211_RRF_NO_HT40;
+
+               if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_80MHZ;
+
+               if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+                       flags |= NL80211_RRF_NO_160MHZ;
+       }
+
        return flags;
 }
 
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info)
+                      u16 geo_info, u16 cap)
 {
        int ch_idx;
        u16 ch_flags;
@@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                }
 
                reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
-                                                            ch_flags, cfg);
+                                                            ch_flags, cap,
+                                                            cfg);
 
                /* we can't continue the same rule */
                if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
@@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
                .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
        };
        int  ret;
-       bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
-                               fw_has_capa(&fw->ucode_capa,
-                                           IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
        bool empty_otp;
        u32 mac_flags;
        u32 sbands_flags = 0;
@@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
        nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
        nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
 
-       if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+       if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
+           fw_has_capa(&fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
                nvm->lar_enabled = true;
                sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
        }
index b7e1ddf..fb0b385 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018        Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
  */
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+                  const struct iwl_fw *fw,
                   const __be16 *nvm_hw, const __le16 *nvm_sw,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
-                  u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+                  u8 tx_chains, u8 rx_chains);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
@@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                       int num_of_ch, __le32 *channels, u16 fw_mcc,
-                      u16 geo_info);
+                      u16 geo_info, u16 cap);
 
 /**
  * struct iwl_nvm_section - describes an NVM section in memory.
index 28bdc9a..f91197e 100644 (file)
@@ -66,7 +66,9 @@
 
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops)
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align)
 {
        struct iwl_trans *trans;
 #ifdef CONFIG_LOCKDEP
@@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                 "iwl_cmd_pool:%s", dev_name(trans->dev));
        trans->dev_cmd_pool =
                kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd),
-                                 sizeof(void *),
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
+                                 cmd_pool_size, cmd_pool_align,
+                                 SLAB_HWCACHE_ALIGN, NULL);
        if (!trans->dev_cmd_pool)
                return NULL;
 
index 8cadad7..e33df5a 100644 (file)
@@ -193,6 +193,18 @@ struct iwl_device_cmd {
        };
 } __packed;
 
+/**
+ * struct iwl_device_tx_cmd - buffer for TX command
+ * @hdr: the header
+ * @payload: the payload placeholder
+ *
+ * The actual structure is sized dynamically according to need.
+ */
+struct iwl_device_tx_cmd {
+       struct iwl_cmd_header hdr;
+       u8 payload[];
+} __packed;
+
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 
 /*
@@ -544,7 +556,7 @@ struct iwl_trans_ops {
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
        int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
-                 struct iwl_device_cmd *dev_cmd, int queue);
+                 struct iwl_device_tx_cmd *dev_cmd, int queue);
        void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
                        struct sk_buff_head *skbs);
 
@@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
        return trans->ops->dump_data(trans, dump_mask);
 }
 
-static inline struct iwl_device_cmd *
+static inline struct iwl_device_tx_cmd *
 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 {
-       return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+       return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
 }
 
 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
-                                        struct iwl_device_cmd *dev_cmd)
+                                        struct iwl_device_tx_cmd *dev_cmd)
 {
        kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 }
 
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                              struct iwl_device_cmd *dev_cmd, int queue)
+                              struct iwl_device_tx_cmd *dev_cmd, int queue)
 {
        if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
                return -EIO;
@@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
  *****************************************************/
 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                                  struct device *dev,
-                                 const struct iwl_trans_ops *ops);
+                                 const struct iwl_trans_ops *ops,
+                                 unsigned int cmd_pool_size,
+                                 unsigned int cmd_pool_align);
 void iwl_trans_free(struct iwl_trans *trans);
 
 /*****************************************************
index 60aff2e..58df25e 100644 (file)
 #define IWL_MVM_D3_DEBUG                       false
 #define IWL_MVM_USE_TWT                                false
 #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA       10
+#define IWL_MVM_USE_NSSN_SYNC                  0
 
 #endif /* __MVM_CONSTANTS_H */
index dd685f7..c09624d 100644 (file)
@@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
                return 0;
        }
 
+       if (!mvm->fwrt.ppag_table.enabled) {
+               IWL_DEBUG_RADIO(mvm,
+                               "PPAG not enabled, command not sent.\n");
+               return 0;
+       }
+
        IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
-       IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
-                       mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
 
        for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
                for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
index 32dc9d6..6717f25 100644 (file)
@@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                      __le32_to_cpu(resp->n_channels),
                                      resp->channels,
                                      __le16_to_cpu(resp->mcc),
-                                     __le16_to_cpu(resp->geo_info));
+                                     __le16_to_cpu(resp->geo_info),
+                                     __le16_to_cpu(resp->cap));
        /* Store the return source id */
        src_id = resp->source_id;
        kfree(resp);
@@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        return ret;
 }
 
+static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_sta *sta)
+{
+       if (likely(sta)) {
+               if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
+                       return;
+       } else {
+               if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
+                       return;
+       }
+
+       ieee80211_free_txskb(mvm->hw, skb);
+}
+
 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                           struct ieee80211_tx_control *control,
                           struct sk_buff *skb)
@@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                }
        }
 
-       if (sta) {
-               if (iwl_mvm_tx_skb(mvm, skb, sta))
-                       goto drop;
-               return;
-       }
-
-       if (iwl_mvm_tx_skb_non_sta(mvm, skb))
-               goto drop;
+       iwl_mvm_tx_skb(mvm, skb, sta);
        return;
  drop:
        ieee80211_free_txskb(hw, skb);
@@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
                                break;
                        }
 
-                       if (!txq->sta)
-                               iwl_mvm_tx_skb_non_sta(mvm, skb);
-                       else
-                               iwl_mvm_tx_skb(mvm, skb, txq->sta);
+                       iwl_mvm_tx_skb(mvm, skb, txq->sta);
                }
        } while (atomic_dec_return(&mvmtxq->tx_request));
        rcu_read_unlock();
@@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        return ret;
 }
 
+static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
+{
+       switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               rinfo->bw = RATE_INFO_BW_20;
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               rinfo->bw = RATE_INFO_BW_40;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               rinfo->bw = RATE_INFO_BW_80;
+               break;
+       case RATE_MCS_CHAN_WIDTH_160:
+               rinfo->bw = RATE_INFO_BW_160;
+               break;
+       }
+
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_HT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+               if (rate_n_flags & RATE_MCS_SGI_MSK)
+                       rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+       } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+               u32 gi_ltf = u32_get_bits(rate_n_flags,
+                                         RATE_MCS_HE_GI_LTF_MSK);
+
+               rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
+               rinfo->mcs = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_RATE_CODE_MSK);
+               rinfo->nss = u32_get_bits(rate_n_flags,
+                                         RATE_VHT_MCS_NSS_MSK) + 1;
+
+               if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
+                       rinfo->bw = RATE_INFO_BW_HE_RU;
+                       rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+               }
+
+               switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
+               case RATE_MCS_HE_TYPE_SU:
+               case RATE_MCS_HE_TYPE_EXT_SU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else if (rate_n_flags & RATE_MCS_SGI_MSK)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_MU:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+                       else if (gi_ltf == 2)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               case RATE_MCS_HE_TYPE_TRIG:
+                       if (gi_ltf == 0 || gi_ltf == 1)
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+                       else
+                               rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+                       break;
+               }
+
+               if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
+                       rinfo->he_dcm = 1;
+       } else {
+               switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
+               case IWL_RATE_1M_PLCP:
+                       rinfo->legacy = 10;
+                       break;
+               case IWL_RATE_2M_PLCP:
+                       rinfo->legacy = 20;
+                       break;
+               case IWL_RATE_5M_PLCP:
+                       rinfo->legacy = 55;
+                       break;
+               case IWL_RATE_11M_PLCP:
+                       rinfo->legacy = 110;
+                       break;
+               case IWL_RATE_6M_PLCP:
+                       rinfo->legacy = 60;
+                       break;
+               case IWL_RATE_9M_PLCP:
+                       rinfo->legacy = 90;
+                       break;
+               case IWL_RATE_12M_PLCP:
+                       rinfo->legacy = 120;
+                       break;
+               case IWL_RATE_18M_PLCP:
+                       rinfo->legacy = 180;
+                       break;
+               case IWL_RATE_24M_PLCP:
+                       rinfo->legacy = 240;
+                       break;
+               case IWL_RATE_36M_PLCP:
+                       rinfo->legacy = 360;
+                       break;
+               case IWL_RATE_48M_PLCP:
+                       rinfo->legacy = 480;
+                       break;
+               case IWL_RATE_54M_PLCP:
+                       rinfo->legacy = 540;
+                       break;
+               }
+       }
+}
+
 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
                                       struct ieee80211_sta *sta,
@@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
        }
 
+       if (iwl_mvm_has_tlc_offload(mvm)) {
+               struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+               iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+       }
+
        /* if beacon filtering isn't on mac80211 does it anyway */
        if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
                return;
index 3ec8de0..67ab7e7 100644 (file)
@@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
        bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
                                   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
-       if (iwlwifi_mod_params.lar_disable)
-               return false;
-
        /*
         * Enable LAR only if it is supported by the FW (TLV) &&
         * enabled in the NVM
@@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
 int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta);
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
index 945c1ea..46128a2 100644 (file)
@@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __be16 *hw;
        const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
-       bool lar_enabled;
        int regulatory_type;
 
        /* Checking for required sections */
-       if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+       if (mvm->trans->cfg->nvm_type == IWL_NVM) {
                if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
                    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
                        IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
@@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
                (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
 
-       lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                     fw_has_capa(&mvm->fw->ucode_capa,
-                                 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
-
-       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+       return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
-                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
-                                 lar_enabled);
+                                 mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
 }
 
 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
index ef99c49..c15f7db 100644 (file)
@@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
 
 static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
 {
-       struct iwl_mvm_rss_sync_notif notif = {
-               .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
-               .metadata.sync = 0,
-               .nssn_sync.baid = baid,
-               .nssn_sync.nssn = nssn,
-       };
-
-       iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+       if (IWL_MVM_USE_NSSN_SYNC) {
+               struct iwl_mvm_rss_sync_notif notif = {
+                       .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+                       .metadata.sync = 0,
+                       .nssn_sync.baid = baid,
+                       .nssn_sync.nssn = nssn,
+               };
+
+               iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif,
+                                               sizeof(notif));
+       }
 }
 
 #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
index a046ac9..a5af8f4 100644 (file)
@@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
                cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
-       cmd_size += num_channels;
+       cmd_size += mvm->fw->ucode_capa.n_scan_channels;
 
        cfg = kzalloc(cmd_size, GFP_KERNEL);
        if (!cfg)
index 6a241d3..a8d0d17 100644 (file)
@@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
 /*
  * Allocates and sets the Tx cmd the driver data pointers in the skb
  */
-static struct iwl_device_cmd *
+static struct iwl_device_tx_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
                      struct ieee80211_tx_info *info, int hdrlen,
                      struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
        dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
@@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (unlikely(!dev_cmd))
                return NULL;
 
-       /* Make sure we zero enough of dev_cmd */
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
-       BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
-
-       memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
        dev_cmd->hdr.cmd = TX_CMD;
 
        if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -597,7 +592,7 @@ out:
 }
 
 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
-                                      struct iwl_device_cmd *cmd)
+                                      struct iwl_device_tx_cmd *cmd)
 {
        struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
 
@@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info info;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        __le16 fc = hdr->frame_control;
@@ -1073,7 +1068,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct iwl_mvm_sta *mvmsta;
-       struct iwl_device_cmd *dev_cmd;
+       struct iwl_device_tx_cmd *dev_cmd;
        __le16 fc;
        u16 seq_number = 0;
        u8 tid = IWL_MAX_TID_COUNT;
@@ -1149,7 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                spin_unlock(&mvmsta->lock);
-               return 0;
+               return -1;
        }
 
        if (!iwl_mvm_has_new_tx_api(mvm)) {
@@ -1201,8 +1196,8 @@ drop:
        return -1;
 }
 
-int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
-                  struct ieee80211_sta *sta)
+int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
+                      struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_tx_info info;
index d38cefb..e249e3f 100644 (file)
 #include "internal.h"
 #include "iwl-prph.h"
 
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                   size_t size,
+                                                   dma_addr_t *phys,
+                                                   int depth)
+{
+       void *result;
+
+       if (WARN(depth > 2,
+                "failed to allocate DMA memory not crossing 2^32 boundary"))
+               return NULL;
+
+       result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
+
+       if (!result)
+               return NULL;
+
+       if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+               void *old = result;
+               dma_addr_t oldphys = *phys;
+
+               result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
+                                                               phys,
+                                                               depth + 1);
+               dma_free_coherent(trans->dev, size, old, oldphys);
+       }
+
+       return result;
+}
+
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
+                                                  size_t size,
+                                                  dma_addr_t *phys)
+{
+       return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
+}
+
 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
 {
        struct iwl_self_init_dram *dram = &trans->init_dram;
@@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
        struct iwl_context_info *ctxt_info;
        struct iwl_context_info_rbd_cfg *rx_cfg;
        u32 control_flags = 0, rb_size;
+       dma_addr_t phys;
        int ret;
 
-       ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
-                                      &trans_pcie->ctxt_info_dma_addr,
-                                      GFP_KERNEL);
+       ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
+                                                         sizeof(*ctxt_info),
+                                                         &phys);
        if (!ctxt_info)
                return -ENOMEM;
 
+       trans_pcie->ctxt_info_dma_addr = phys;
+
        ctxt_info->version.version = 0;
        ctxt_info->version.mac_id =
                cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
index a091690..f14bcef 100644 (file)
@@ -305,7 +305,7 @@ struct iwl_cmd_meta {
 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 
 struct iwl_pcie_txq_entry {
-       struct iwl_device_cmd *cmd;
+       void *cmd;
        struct sk_buff *skb;
        /* buffer to free after command completes */
        const void *free_buf;
@@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
 /*****************************************************
 * TX / HCMD
 ******************************************************/
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
+{
+       return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
 int iwl_pcie_tx_init(struct iwl_trans *trans);
 int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
                          int queue_size);
@@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
 void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
                                  struct iwl_txq *txq);
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id);
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
@@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb);
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb);
 #endif
 
 /* common functions that are used by gen3 transport */
@@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                                 unsigned int timeout);
 void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id);
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id);
 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
                                  struct iwl_host_cmd *cmd);
 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
index 452da44..f0b8ff6 100644 (file)
@@ -1529,13 +1529,13 @@ out:
 
        napi = &rxq->napi;
        if (napi->poll) {
+               napi_gro_flush(napi, false);
+
                if (napi->rx_count) {
                        netif_receive_skb_list(&napi->rx_list);
                        INIT_LIST_HEAD(&napi->rx_list);
                        napi->rx_count = 0;
                }
-
-               napi_gro_flush(napi, false);
        }
 
        iwl_pcie_rxq_restock(trans, rxq);
index a067713..f60d66f 100644 (file)
@@ -79,6 +79,7 @@
 #include "iwl-agn-hw.h"
 #include "fw/error-dump.h"
 #include "fw/dbg.h"
+#include "fw/api/tx.h"
 #include "internal.h"
 #include "iwl-fh.h"
 
@@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
        u16 cap;
 
        /*
-        * HW bug W/A for instability in PCIe bus L0S->L1 transition.
-        * Check if BIOS (or OS) enabled L1-ASPM on this device.
-        * If so (likely), disable L0S, so device moves directly L0->L1;
-        *    costs negligible amount of power savings.
-        * If not (unlikely), enable L0S, so there is at least some
-        *    power savings, even without L1.
+        * L0S states have been found to be unstable with our devices
+        * and in newer hardware they are not officially supported at
+        * all, so we must always set the L0S_DISABLED bit.
         */
+       iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
+
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
-               iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-       else
-               iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
 
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
@@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 {
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
-       int ret, addr_size;
+       int ret, addr_size, txcmd_size, txcmd_align;
+       const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
+
+       if (!cfg_trans->gen2) {
+               ops = &trans_ops_pcie;
+               txcmd_size = sizeof(struct iwl_tx_cmd);
+               txcmd_align = sizeof(void *);
+       } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+               txcmd_align = 64;
+       } else {
+               txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+               txcmd_align = 128;
+       }
+
+       txcmd_size += sizeof(struct iwl_cmd_header);
+       txcmd_size += 36; /* biggest possible 802.11 header */
+
+       /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+       if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+               return ERR_PTR(-EINVAL);
 
        ret = pcim_enable_device(pdev);
        if (ret)
                return ERR_PTR(ret);
 
-       if (cfg_trans->gen2)
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie_gen2);
-       else
-               trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
-                                       &pdev->dev, &trans_ops_pcie);
-
+       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+                               txcmd_size, txcmd_align);
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
index 8ca0250..bfb984b 100644 (file)
@@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
        struct iwl_tfh_tb *tb;
 
+       /*
+        * Only WARN here so we know about the issue, but we mess up our
+        * unmap path because not every place currently checks for errors
+        * returned from this function - it can only return an error if
+        * there's no more space, and so when we know there is enough we
+        * don't always check ...
+        */
+       WARN(iwl_pcie_crosses_4g_boundary(addr, len),
+            "possible DMA problem with iova:0x%llx, len:%d\n",
+            (unsigned long long)addr, len);
+
        if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
                return -EINVAL;
        tb = &tfd->tbs[idx];
@@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
        return idx;
 }
 
+static struct page *get_workaround_page(struct iwl_trans *trans,
+                                       struct sk_buff *skb)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct page **page_ptr;
+       struct page *ret;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       ret = alloc_page(GFP_ATOMIC);
+       if (!ret)
+               return NULL;
+
+       /* set the chaining pointer to the previous page if there */
+       *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+       *page_ptr = ret;
+
+       return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
+                                       struct sk_buff *skb,
+                                       struct iwl_tfh_tfd *tfd,
+                                       dma_addr_t phys, void *virt,
+                                       u16 len, struct iwl_cmd_meta *meta)
+{
+       dma_addr_t oldphys = phys;
+       struct page *page;
+       int ret;
+
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+
+       if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
+               ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+
+               if (ret < 0)
+                       goto unmap;
+
+               if (meta)
+                       meta->tbs |= BIT(ret);
+
+               ret = 0;
+               goto trace;
+       }
+
+       /*
+        * Work around a hardware bug. If (as expressed in the
+        * condition above) the TB ends on a 32-bit boundary,
+        * then the next TB may be accessed with the wrong
+        * address.
+        * To work around it, copy the data elsewhere and make
+        * a new mapping for it so the device will not fail.
+        */
+
+       if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+               ret = -ENOBUFS;
+               goto unmap;
+       }
+
+       page = get_workaround_page(trans, skb);
+       if (!page) {
+               ret = -ENOMEM;
+               goto unmap;
+       }
+
+       memcpy(page_address(page), virt, len);
+
+       phys = dma_map_single(trans->dev, page_address(page), len,
+                             DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(trans->dev, phys)))
+               return -ENOMEM;
+       ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
+       if (ret < 0) {
+               /* unmap the new allocation as single */
+               oldphys = phys;
+               meta = NULL;
+               goto unmap;
+       }
+       IWL_WARN(trans,
+                "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+                len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+       ret = 0;
+unmap:
+       if (meta)
+               dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+       else
+               dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+       trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+       return ret;
+}
+
 static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                                     struct sk_buff *skb,
                                     struct iwl_tfh_tfd *tfd, int start_len,
-                                    u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+                                    u8 hdr_len,
+                                    struct iwl_device_tx_cmd *dev_cmd)
 {
 #ifdef CONFIG_INET
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
        u16 length, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
@@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
 
        /*
         * Pull the ieee80211 header to be able to use TSO core,
@@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
                        dev_kfree_skb(csum_skb);
                        goto out_err;
                }
+               /*
+                * No need for _with_wa, this is from the TSO page and
+                * we leave some space at the end of it so can't hit
+                * the buggy scenario.
+                */
                iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
                trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
                                        tb_phys, tb_len);
@@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
 
                /* put the payload */
                while (data_left) {
+                       int ret;
+
                        tb_len = min_t(unsigned int, tso.size, data_left);
                        tb_phys = dma_map_single(trans->dev, tso.data,
                                                 tb_len, DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
+                                                          tb_phys, tso.data,
+                                                          tb_len, NULL);
+                       if (ret) {
                                dev_kfree_skb(csum_skb);
                                goto out_err;
                        }
-                       iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
-                       trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
-                                               tb_phys, tb_len);
 
                        data_left -= tb_len;
                        tso_build_data(skb, &tso, tb_len);
@@ -372,7 +487,7 @@ out_err:
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
                                          struct iwl_txq *txq,
-                                         struct iwl_device_cmd *dev_cmd,
+                                         struct iwl_device_tx_cmd *dev_cmd,
                                          struct sk_buff *skb,
                                          struct iwl_cmd_meta *out_meta,
                                          int hdr_len,
@@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
 
        tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
 
        if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
@@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                dma_addr_t tb_phys;
-               int tb_idx;
+               unsigned int fragsz = skb_frag_size(frag);
+               int ret;
 
-               if (!skb_frag_size(frag))
+               if (!fragsz)
                        continue;
 
                tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
-                                          skb_frag_size(frag), DMA_TO_DEVICE);
-
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
-                       return -ENOMEM;
-               tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
-                                             skb_frag_size(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
-                                       tb_phys, skb_frag_size(frag));
-               if (tb_idx < 0)
-                       return tb_idx;
-
-               out_meta->tbs |= BIT(tb_idx);
+                                          fragsz, DMA_TO_DEVICE);
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb_frag_address(frag),
+                                                  fragsz, out_meta);
+               if (ret)
+                       return ret;
        }
 
        return 0;
@@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
 static struct
 iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
                                    struct iwl_txq *txq,
-                                   struct iwl_device_cmd *dev_cmd,
+                                   struct iwl_device_tx_cmd *dev_cmd,
                                    struct sk_buff *skb,
                                    struct iwl_cmd_meta *out_meta,
                                    int hdr_len,
@@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        /* The first TB points to bi-directional DMA data */
        memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
 
+       /*
+        * No need for _with_wa, the first TB allocation is aligned up
+        * to a 64-byte boundary and thus can't be at the end or cross
+        * a page boundary (much less a 2^32 boundary).
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
 
        /*
@@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
        if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
                goto out_err;
+       /*
+        * No need for _with_wa(), we ensure (via alignment) that the data
+        * here can never cross or end at a page boundary.
+        */
        iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
        trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
                             IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
@@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
        tb2_len = skb_headlen(skb) - hdr_len;
 
        if (tb2_len > 0) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
                                         tb2_len, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  skb->data + hdr_len, tb2_len,
+                                                  NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
-                                       tb_phys, tb2_len);
        }
 
        if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
                goto out_err;
 
        skb_walk_frags(skb, frag) {
+               int ret;
+
                tb_phys = dma_map_single(trans->dev, frag->data,
                                         skb_headlen(frag), DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+               ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+                                                  frag->data,
+                                                  skb_headlen(frag), NULL);
+               if (ret)
                        goto out_err;
-               iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
-               trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
-                                       tb_phys, skb_headlen(frag));
                if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
                        goto out_err;
        }
@@ -538,7 +670,7 @@ out_err:
 static
 struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
                                            struct iwl_txq *txq,
-                                           struct iwl_device_cmd *dev_cmd,
+                                           struct iwl_device_tx_cmd *dev_cmd,
                                            struct sk_buff *skb,
                                            struct iwl_cmd_meta *out_meta)
 {
@@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
 }
 
 int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                          struct iwl_device_cmd *dev_cmd, int txq_id)
+                          struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_cmd_meta *out_meta;
@@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index f21f16a..b0eb52b 100644 (file)
@@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        u8 sec_ctl = 0;
        u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[txq->write_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        u8 sta_id = tx_cmd->sta_id;
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
@@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
        int read_ptr = txq->read_ptr;
        u8 sta_id = 0;
        __le16 bc_ent;
-       struct iwl_tx_cmd *tx_cmd =
-               (void *)txq->entries[read_ptr].cmd->payload;
+       struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
 
        WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
 
@@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
                            struct sk_buff *skb)
 {
        struct page **page_ptr;
+       struct page *next;
 
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+       next = *page_ptr;
+       *page_ptr = NULL;
 
-       if (*page_ptr) {
-               __free_page(*page_ptr);
-               *page_ptr = NULL;
+       while (next) {
+               struct page *tmp = next;
+
+               next = *(void **)(page_address(next) + PAGE_SIZE -
+                                 sizeof(void *));
+               __free_page(tmp);
        }
 }
 
@@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
                while (!skb_queue_empty(&overflow_skbs)) {
                        struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
-                       struct iwl_device_cmd *dev_cmd_ptr;
+                       struct iwl_device_tx_cmd *dev_cmd_ptr;
 
                        dev_cmd_ptr = *(void **)((u8 *)skb->cb +
                                                 trans_pcie->dev_cmd_offs);
@@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
 }
 
 #ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+                                     struct sk_buff *skb)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+       struct page **page_ptr;
+
+       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+       if (WARN_ON(*page_ptr))
+               return NULL;
 
        if (!p->page)
                goto alloc;
 
-       /* enough room on this page */
-       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
-               return p;
+       /*
+        * Check if there's enough room on this page
+        *
+        * Note that we put a page chaining pointer *last* in the
+        * page - we need it somewhere, and if it's there then we
+        * avoid DMA mapping the last bits of the page which may
+        * trigger the 32-bit boundary hardware bug.
+        *
+        * (see also get_workaround_page() in tx-gen2.c)
+        */
+       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+                          sizeof(void *))
+               goto out;
 
        /* We don't have enough room on this page, get a new one. */
        __free_page(p->page);
@@ -2072,6 +2095,11 @@ alloc:
        if (!p->page)
                return NULL;
        p->pos = page_address(p->page);
+       /* set the chaining pointer to NULL */
+       *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+       *page_ptr = p->page;
+       get_page(p->page);
        return p;
 }
 
@@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
@@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
        u16 length, iv_len, amsdu_pad;
        u8 *start_hdr;
        struct iwl_tso_hdr_page *hdr_page;
-       struct page **page_ptr;
        struct tso_t tso;
 
        /* if the packet is protected, then it must be CCMP or GCMP */
@@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
 
        /* Our device supports 9 segments at most, it will fit in 1 page */
-       hdr_page = get_page_hdr(trans, hdr_room);
+       hdr_page = get_page_hdr(trans, hdr_room, skb);
        if (!hdr_page)
                return -ENOMEM;
 
-       get_page(hdr_page->page);
        start_hdr = hdr_page->pos;
-       page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-       *page_ptr = hdr_page->page;
        memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
        hdr_page->pos += iv_len;
 
@@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_txq *txq, u8 hdr_len,
                                   struct iwl_cmd_meta *out_meta,
-                                  struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+                                  struct iwl_device_tx_cmd *dev_cmd,
+                                  u16 tb1_len)
 {
        /* No A-MSDU without CONFIG_INET */
        WARN_ON(1);
@@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
 #endif /* CONFIG_INET */
 
 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
-                     struct iwl_device_cmd *dev_cmd, int txq_id)
+                     struct iwl_device_tx_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct ieee80211_hdr *hdr;
@@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                /* don't put the packet on the ring, if there is no room */
                if (unlikely(iwl_queue_space(trans, txq) < 3)) {
-                       struct iwl_device_cmd **dev_cmd_ptr;
+                       struct iwl_device_tx_cmd **dev_cmd_ptr;
 
                        dev_cmd_ptr = (void *)((u8 *)skb->cb +
                                               trans_pcie->dev_cmd_offs);
index 57edfad..c9401c1 100644 (file)
@@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
        int hw, ap, ap_max = ie[1];
        u8 hw_rate;
 
+       if (ap_max > MAX_RATES) {
+               lbs_deb_assoc("invalid rates\n");
+               return tlv;
+       }
        /* Advance past IE header */
        ie += 2;
 
@@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        struct cmd_ds_802_11_ad_hoc_join cmd;
        u8 preamble = RADIO_PREAMBLE_SHORT;
        int ret = 0;
+       int hw, i;
+       u8 rates_max;
+       u8 *rates;
 
        /* TODO: set preamble based on scan result */
        ret = lbs_set_radio(priv, preamble, 1);
@@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
        if (!rates_eid) {
                lbs_add_rates(cmd.bss.rates);
        } else {
-               int hw, i;
-               u8 rates_max = rates_eid[1];
-               u8 *rates = cmd.bss.rates;
+               rates_max = rates_eid[1];
+               if (rates_max > MAX_RATES) {
+                       lbs_deb_join("invalid rates");
+                       goto out;
+               }
+               rates = cmd.bss.rates;
                for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
                        u8 hw_rate = lbs_rates[hw].bitrate / 5;
                        for (i = 0; i < rates_max; i++) {
index 55116f3..a4a7854 100644 (file)
@@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
                        return 0;
 
                sband = dev->hw->wiphy->bands[status->band];
-               if (!sband || status->rate_idx > sband->n_bitrates)
+               if (!sband || status->rate_idx >= sband->n_bitrates)
                        return 0;
 
                rate = &sband->bitrates[status->rate_idx];
index b9f2a40..96018fd 100644 (file)
@@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
 {
        struct ieee80211_hw *hw = dev->hw;
 
-       mt76_led_cleanup(dev);
+       if (IS_ENABLED(CONFIG_MT76_LEDS))
+               mt76_led_cleanup(dev);
        mt76_tx_status_check(dev, NULL, true);
        ieee80211_unregister_hw(hw);
 }
index 4937a08..fbeb9f7 100644 (file)
@@ -5074,18 +5074,25 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
 
 #ifdef CONFIG_PCI_ATS
 /*
- * Some devices have a broken ATS implementation causing IOMMU stalls.
- * Don't use ATS for those devices.
+ * Some devices require additional driver setup to enable ATS.  Don't use
+ * ATS for those devices as ATS will be enabled before the driver has had a
+ * chance to load and configure the device.
  */
-static void quirk_no_ats(struct pci_dev *pdev)
+static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
 {
-       pci_info(pdev, "disabling ATS (broken on this device)\n");
+       if (pdev->device == 0x7340 && pdev->revision != 0xc5)
+               return;
+
+       pci_info(pdev, "disabling ATS\n");
        pdev->ats_cap = 0;
 }
 
 /* AMD Stoney platform GPU */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
+/* AMD Iceland dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
+/* AMD Navi14 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
 #endif /* CONFIG_PCI_ATS */
 
 /* Freescale PCIe doesn't support MSI in RC mode */
index 44d7f50..d936e7a 100644 (file)
@@ -49,6 +49,7 @@
                .padown_offset = SPT_PAD_OWN,           \
                .padcfglock_offset = SPT_PADCFGLOCK,    \
                .hostown_offset = SPT_HOSTSW_OWN,       \
+               .is_offset = SPT_GPI_IS,                \
                .ie_offset = SPT_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
index d1ad512..3ca71e3 100644 (file)
@@ -3,6 +3,7 @@
 config OPTEE
        tristate "OP-TEE"
        depends on HAVE_ARM_SMCCC
+       depends on MMU
        help
          This implements the OP-TEE Trusted Execution Environment (TEE)
          driver.
index f639dde..ba4d8f3 100644 (file)
@@ -500,11 +500,8 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
                              &dev_replace->scrub_progress, 0, 1);
 
        ret = btrfs_dev_replace_finishing(fs_info, ret);
-       if (ret == -EINPROGRESS) {
+       if (ret == -EINPROGRESS)
                ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
-       } else if (ret != -ECANCELED) {
-               WARN_ON(ret);
-       }
 
        return ret;
 
index 21de630..fd266a2 100644 (file)
@@ -3577,17 +3577,27 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * This can easily boost the amount of SYSTEM chunks if cleaner
                 * thread can't be triggered fast enough, and use up all space
                 * of btrfs_super_block::sys_chunk_array
+                *
+                * While for dev replace, we need to try our best to mark block
+                * group RO, to prevent race between:
+                * - Write duplication
+                *   Contains latest data
+                * - Scrub copy
+                *   Contains data from commit tree
+                *
+                * If target block group is not marked RO, nocow writes can
+                * be overwritten by scrub copy, causing data corruption.
+                * So for dev-replace, it's not allowed to continue if a block
+                * group is not RO.
                 */
-               ret = btrfs_inc_block_group_ro(cache, false);
-               scrub_pause_off(fs_info);
-
+               ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
                if (ret == 0) {
                        ro_set = 1;
-               } else if (ret == -ENOSPC) {
+               } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
                        /*
                         * btrfs_inc_block_group_ro return -ENOSPC when it
                         * failed in creating new chunk for metadata.
-                        * It is not a problem for scrub/replace, because
+                        * It is not a problem for scrub, because
                         * metadata are always cowed, and our scrub paused
                         * commit_transactions.
                         */
@@ -3596,9 +3606,22 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        btrfs_warn(fs_info,
                                   "failed setting block group ro: %d", ret);
                        btrfs_put_block_group(cache);
+                       scrub_pause_off(fs_info);
                        break;
                }
 
+               /*
+                * Now the target block is marked RO, wait for nocow writes to
+                * finish before dev-replace.
+                * COW is fine, as COW never overwrites extents in commit tree.
+                */
+               if (sctx->is_dev_replace) {
+                       btrfs_wait_nocow_writers(cache);
+                       btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
+                                       cache->length);
+               }
+
+               scrub_pause_off(fs_info);
                down_write(&dev_replace->rwsem);
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
index 374db1b..145d46b 100644 (file)
@@ -708,8 +708,10 @@ void ceph_mdsc_release_request(struct kref *kref)
                /* avoid calling iput_final() in mds dispatch threads */
                ceph_async_iput(req->r_inode);
        }
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ceph_async_iput(req->r_parent);
+       }
        ceph_async_iput(req->r_target_inode);
        if (req->r_dentry)
                dput(req->r_dentry);
@@ -2676,8 +2678,10 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
        /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
        if (req->r_inode)
                ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
-       if (req->r_parent)
+       if (req->r_parent) {
                ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
+               ihold(req->r_parent);
+       }
        if (req->r_old_dentry_dir)
                ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
                                  CEPH_CAP_PIN);
index 187dd94..5953d7f 100644 (file)
@@ -4463,13 +4463,15 @@ static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
+       if (up.resv)
+               return -EINVAL;
        if (check_add_overflow(up.offset, nr_args, &done))
                return -EOVERFLOW;
        if (done > ctx->nr_user_files)
                return -EINVAL;
 
        done = 0;
-       fds = (__s32 __user *) up.fds;
+       fds = u64_to_user_ptr(up.fds);
        while (nr_args) {
                struct fixed_file_table *table;
                unsigned index;
index d26d5ea..de2ecef 100644 (file)
@@ -102,10 +102,14 @@ EXPORT_SYMBOL(iterate_dir);
  * filename length, and the above "soft error" worry means
  * that it's probably better left alone until we have that
  * issue clarified.
+ *
+ * Note the PATH_MAX check - it's arbitrary but the real
+ * kernel limit on a possible path component, not NAME_MAX,
+ * which is the technical standard limit.
  */
 static int verify_dirent_name(const char *name, int len)
 {
-       if (!len)
+       if (len <= 0 || len >= PATH_MAX)
                return -EIO;
        if (memchr(name, '/', len))
                return -EIO;
@@ -206,7 +210,7 @@ struct linux_dirent {
 struct getdents_callback {
        struct dir_context ctx;
        struct linux_dirent __user * current_dir;
-       struct linux_dirent __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -214,12 +218,13 @@ struct getdents_callback {
 static int filldir(struct dir_context *ctx, const char *name, int namlen,
                   loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent __user * dirent;
+       struct linux_dirent __user *dirent, *prev;
        struct getdents_callback *buf =
                container_of(ctx, struct getdents_callback, ctx);
        unsigned long d_ino;
        int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
                sizeof(long));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -232,28 +237,24 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                buf->error = -EOVERFLOW;
                return -EOVERFLOW;
        }
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *) dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->current_dir = (void __user *)dirent + reclen;
+       buf->prev_reclen = reclen;
        buf->count -= reclen;
        return 0;
 efault_end:
@@ -267,7 +268,6 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
                struct linux_dirent __user *, dirent, unsigned int, count)
 {
        struct fd f;
-       struct linux_dirent __user * lastdirent;
        struct getdents_callback buf = {
                .ctx.actor = filldir,
                .count = count,
@@ -285,8 +285,10 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent __user * lastdirent;
+               lastdirent = (void __user *)buf.current_dir - buf.prev_reclen;
+
                if (put_user(buf.ctx.pos, &lastdirent->d_off))
                        error = -EFAULT;
                else
@@ -299,7 +301,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
 struct getdents_callback64 {
        struct dir_context ctx;
        struct linux_dirent64 __user * current_dir;
-       struct linux_dirent64 __user * previous;
+       int prev_reclen;
        int count;
        int error;
 };
@@ -307,11 +309,12 @@ struct getdents_callback64 {
 static int filldir64(struct dir_context *ctx, const char *name, int namlen,
                     loff_t offset, u64 ino, unsigned int d_type)
 {
-       struct linux_dirent64 __user *dirent;
+       struct linux_dirent64 __user *dirent, *prev;
        struct getdents_callback64 *buf =
                container_of(ctx, struct getdents_callback64, ctx);
        int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
                sizeof(u64));
+       int prev_reclen;
 
        buf->error = verify_dirent_name(name, namlen);
        if (unlikely(buf->error))
@@ -319,30 +322,27 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
-       dirent = buf->previous;
-       if (dirent && signal_pending(current))
+       prev_reclen = buf->prev_reclen;
+       if (prev_reclen && signal_pending(current))
                return -EINTR;
-
-       /*
-        * Note! This range-checks 'previous' (which may be NULL).
-        * The real range was checked in getdents
-        */
-       if (!user_access_begin(dirent, sizeof(*dirent)))
-               goto efault;
-       if (dirent)
-               unsafe_put_user(offset, &dirent->d_off, efault_end);
        dirent = buf->current_dir;
+       prev = (void __user *)dirent - prev_reclen;
+       if (!user_access_begin(prev, reclen + prev_reclen))
+               goto efault;
+
+       /* This might be 'dirent->d_off', but if so it will get overwritten */
+       unsafe_put_user(offset, &prev->d_off, efault_end);
        unsafe_put_user(ino, &dirent->d_ino, efault_end);
        unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
        unsafe_put_user(d_type, &dirent->d_type, efault_end);
        unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
        user_access_end();
 
-       buf->previous = dirent;
-       dirent = (void __user *)dirent + reclen;
-       buf->current_dir = dirent;
+       buf->prev_reclen = reclen;
+       buf->current_dir = (void __user *)dirent + reclen;
        buf->count -= reclen;
        return 0;
+
 efault_end:
        user_access_end();
 efault:
@@ -354,7 +354,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
                    unsigned int count)
 {
        struct fd f;
-       struct linux_dirent64 __user * lastdirent;
        struct getdents_callback64 buf = {
                .ctx.actor = filldir64,
                .count = count,
@@ -372,9 +371,11 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent,
        error = iterate_dir(f.file, &buf.ctx);
        if (error >= 0)
                error = buf.error;
-       lastdirent = buf.previous;
-       if (lastdirent) {
+       if (buf.prev_reclen) {
+               struct linux_dirent64 __user * lastdirent;
                typeof(lastdirent->d_off) d_off = buf.ctx.pos;
+
+               lastdirent = (void __user *) buf.current_dir - buf.prev_reclen;
                if (__put_user(d_off, &lastdirent->d_off))
                        error = -EFAULT;
                else
index 62b40df..28b241c 100644 (file)
@@ -319,8 +319,12 @@ static int reiserfs_for_each_xattr(struct inode *inode,
 out_dir:
        dput(dir);
 out:
-       /* -ENODATA isn't an error */
-       if (err == -ENODATA)
+       /*
+        * -ENODATA: this object doesn't have any xattrs
+        * -EOPNOTSUPP: this file system doesn't have xattrs enabled on disk.
+        * Neither are errors
+        */
+       if (err == -ENODATA || err == -EOPNOTSUPP)
                err = 0;
        return err;
 }
index 11bdf6c..78e9c6c 100644 (file)
@@ -3705,6 +3705,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
 int dev_get_alias(const struct net_device *, char *, size_t);
 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
 int __dev_set_mtu(struct net_device *, int);
+int dev_validate_mtu(struct net_device *dev, int mtu,
+                    struct netlink_ext_ack *extack);
 int dev_set_mtu_ext(struct net_device *dev, int mtu,
                    struct netlink_ext_ack *extack);
 int dev_set_mtu(struct net_device *, int);
index 4d8b1ea..908d38d 100644 (file)
@@ -426,13 +426,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
               sizeof(*addr));
 }
 
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
-       return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
 /* How often should the gc be run by default */
 #define IPSET_GC_TIME                  (3 * 60)
 
index cf09ab3..851425c 100644 (file)
@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
        const struct nfnl_callback *cb; /* callback for individual types */
        struct module *owner;
        int (*commit)(struct net *net, struct sk_buff *skb);
-       int (*abort)(struct net *net, struct sk_buff *skb);
+       int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
        void (*cleanup)(struct net *net);
        bool (*valid_genid)(struct net *net, u32 genid);
 };
index 86eecbd..f73e177 100644 (file)
@@ -416,6 +416,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
        return xa->xa_flags & XA_FLAGS_MARK(mark);
 }
 
+/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index.  You may modify @index during the iteration if you
+ * want to skip or reprocess indices.  It is safe to modify the array
+ * during the iteration.  At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n).  You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context.  Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last)               \
+       for (index = start,                                             \
+            entry = xa_find(xa, &index, last, XA_PRESENT);             \
+            entry;                                                     \
+            entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
 /**
  * xa_for_each_start() - Iterate over a portion of an XArray.
  * @xa: XArray.
@@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
  *
  * Context: Any context.  Takes and releases the RCU lock.
  */
-#define xa_for_each_start(xa, index, entry, start)                     \
-       for (index = start,                                             \
-            entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT);        \
-            entry;                                                     \
-            entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+       xa_for_each_range(xa, index, entry, start, ULONG_MAX)
 
 /**
  * xa_for_each() - Iterate over present entries in an XArray.
@@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
                                spin_lock_irqsave(&(xa)->xa_lock, flags)
 #define xa_unlock_irqrestore(xa, flags) \
                                spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+                               spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+                               spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+                               spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+               spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
 
 /*
  * Versions of the normal API which require the caller to hold the
index 286fd96..a1a8d45 100644 (file)
@@ -7,6 +7,7 @@
 struct netns_nftables {
        struct list_head        tables;
        struct list_head        commit_list;
+       struct list_head        module_list;
        struct mutex            commit_mutex;
        unsigned int            base_seq;
        u8                      gencursor;
index 9a0e8af..a5ccfa6 100644 (file)
@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
            TP_PROTO(xen_mc_callback_fn_t fn, void *data),
            TP_ARGS(fn, data),
            TP_STRUCT__entry(
-                   __field(xen_mc_callback_fn_t, fn)
+                   /*
+                    * Use field_struct to avoid is_signed_type()
+                    * comparison of a function pointer.
+                    */
+                   __field_struct(xen_mc_callback_fn_t, fn)
                    __field(void *, data)
                    ),
            TP_fast_assign(
index a3300e1..55cfcb7 100644 (file)
@@ -178,7 +178,8 @@ struct io_uring_params {
 
 struct io_uring_files_update {
        __u32 offset;
-       __s32 *fds;
+       __u32 resv;
+       __aligned_u64 /* __s32 * */ fds;
 };
 
 #endif
index 26b9168..d65f2d5 100644 (file)
@@ -1147,24 +1147,24 @@ void free_basic_memory_bitmaps(void)
 
 void clear_free_pages(void)
 {
-#ifdef CONFIG_PAGE_POISONING_ZERO
        struct memory_bitmap *bm = free_pages_map;
        unsigned long pfn;
 
        if (WARN_ON(!(free_pages_map)))
                return;
 
-       memory_bm_position_reset(bm);
-       pfn = memory_bm_next_pfn(bm);
-       while (pfn != BM_END_OF_MAP) {
-               if (pfn_valid(pfn))
-                       clear_highpage(pfn_to_page(pfn));
-
+       if (IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) || want_init_on_free()) {
+               memory_bm_position_reset(bm);
                pfn = memory_bm_next_pfn(bm);
+               while (pfn != BM_END_OF_MAP) {
+                       if (pfn_valid(pfn))
+                               clear_highpage(pfn_to_page(pfn));
+
+                       pfn = memory_bm_next_pfn(bm);
+               }
+               memory_bm_position_reset(bm);
+               pr_info("free pages cleared after restore\n");
        }
-       memory_bm_position_reset(bm);
-       pr_info("free pages cleared after restore\n");
-#endif /* PAGE_POISONING_ZERO */
 }
 
 /**
index ddb7e7f..5b6ee4a 100644 (file)
@@ -9420,6 +9420,11 @@ __init static int tracing_set_default_clock(void)
 {
        /* sched_clock_stable() is determined in late_initcall */
        if (!trace_boot_clock && !sched_clock_stable()) {
+               if (security_locked_down(LOCKDOWN_TRACEFS)) {
+                       pr_warn("Can not set tracing clock due to lockdown\n");
+                       return -EPERM;
+               }
+
                printk(KERN_WARNING
                       "Unstable clock detected, switching default tracing clock to \"global\"\n"
                       "If you want to keep using the local clock, then add:\n"
index f62de5f..6ac35b9 100644 (file)
@@ -116,6 +116,7 @@ struct hist_field {
        struct ftrace_event_field       *field;
        unsigned long                   flags;
        hist_field_fn_t                 fn;
+       unsigned int                    ref;
        unsigned int                    size;
        unsigned int                    offset;
        unsigned int                    is_signed;
@@ -1766,11 +1767,13 @@ static struct hist_field *find_var(struct hist_trigger_data *hist_data,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
+       lockdep_assert_held(&event_mutex);
+
        hist_field = find_var_field(hist_data, var_name);
        if (hist_field)
                return hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -1820,7 +1823,9 @@ static struct hist_field *find_file_var(struct trace_event_file *file,
        struct event_trigger_data *test;
        struct hist_field *hist_field;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        test_data = test->private_data;
                        hist_field = find_var_field(test_data, var_name);
@@ -2423,8 +2428,16 @@ static int contains_operator(char *str)
        return field_op;
 }
 
+static void get_hist_field(struct hist_field *hist_field)
+{
+       hist_field->ref++;
+}
+
 static void __destroy_hist_field(struct hist_field *hist_field)
 {
+       if (--hist_field->ref > 1)
+               return;
+
        kfree(hist_field->var.name);
        kfree(hist_field->name);
        kfree(hist_field->type);
@@ -2466,6 +2479,8 @@ static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
        if (!hist_field)
                return NULL;
 
+       hist_field->ref = 1;
+
        hist_field->hist_data = hist_data;
 
        if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
@@ -2661,6 +2676,17 @@ static struct hist_field *create_var_ref(struct hist_trigger_data *hist_data,
 {
        unsigned long flags = HIST_FIELD_FL_VAR_REF;
        struct hist_field *ref_field;
+       int i;
+
+       /* Check if the variable already exists */
+       for (i = 0; i < hist_data->n_var_refs; i++) {
+               ref_field = hist_data->var_refs[i];
+               if (ref_field->var.idx == var_field->var.idx &&
+                   ref_field->var.hist_data == var_field->hist_data) {
+                       get_hist_field(ref_field);
+                       return ref_field;
+               }
+       }
 
        ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
        if (ref_field) {
@@ -3115,7 +3141,9 @@ static char *find_trigger_filter(struct hist_trigger_data *hist_data,
 {
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (test->private_data == hist_data)
                                return test->filter_str;
@@ -3166,9 +3194,11 @@ find_compatible_hist(struct hist_trigger_data *target_hist_data,
        struct event_trigger_data *test;
        unsigned int n_keys;
 
+       lockdep_assert_held(&event_mutex);
+
        n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
 
@@ -5528,7 +5558,7 @@ static int hist_show(struct seq_file *m, void *v)
                goto out_unlock;
        }
 
-       list_for_each_entry_rcu(data, &event_file->triggers, list) {
+       list_for_each_entry(data, &event_file->triggers, list) {
                if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
                        hist_trigger_show(m, data, n++);
        }
@@ -5921,7 +5951,9 @@ static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
        if (hist_data->attrs->name && !named_data)
                goto new;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6005,10 +6037,12 @@ static bool have_hist_trigger_match(struct event_trigger_data *data,
        struct event_trigger_data *test, *named_data = NULL;
        bool match = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (hist_trigger_match(data, test, named_data, false)) {
                                match = true;
@@ -6026,10 +6060,12 @@ static bool hist_trigger_check_refs(struct event_trigger_data *data,
        struct hist_trigger_data *hist_data = data->private_data;
        struct event_trigger_data *test, *named_data = NULL;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6051,10 +6087,12 @@ static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test, *named_data = NULL;
        bool unregistered = false;
 
+       lockdep_assert_held(&event_mutex);
+
        if (hist_data->attrs->name)
                named_data = find_named_trigger(hist_data->attrs->name);
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (!hist_trigger_match(data, test, named_data, false))
                                continue;
@@ -6080,7 +6118,9 @@ static bool hist_file_check_refs(struct trace_event_file *file)
        struct hist_trigger_data *hist_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        hist_data = test->private_data;
                        if (check_var_refs(hist_data))
@@ -6323,7 +6363,8 @@ hist_enable_trigger(struct event_trigger_data *data, void *rec,
        struct enable_trigger_data *enable_data = data->private_data;
        struct event_trigger_data *test;
 
-       list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
+       list_for_each_entry_rcu(test, &enable_data->file->triggers, list,
+                               lockdep_is_held(&event_mutex)) {
                if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
                        if (enable_data->enable)
                                test->paused = false;
index 2cd53ca..40106ff 100644 (file)
@@ -501,7 +501,9 @@ void update_cond_flag(struct trace_event_file *file)
        struct event_trigger_data *data;
        bool set_cond = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->filter || event_command_post_trigger(data->cmd_ops) ||
                    event_command_needs_rec(data->cmd_ops)) {
                        set_cond = true;
@@ -536,7 +538,9 @@ static int register_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
                        ret = -EEXIST;
                        goto out;
@@ -581,7 +585,9 @@ static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
                        unregistered = true;
                        list_del_rcu(&data->list);
@@ -1497,7 +1503,9 @@ int event_enable_register_trigger(char *glob,
        struct event_trigger_data *test;
        int ret = 0;
 
-       list_for_each_entry_rcu(test, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(test, &file->triggers, list) {
                test_enable_data = test->private_data;
                if (test_enable_data &&
                    (test->cmd_ops->trigger_type ==
@@ -1537,7 +1545,9 @@ void event_enable_unregister_trigger(char *glob,
        struct event_trigger_data *data;
        bool unregistered = false;
 
-       list_for_each_entry_rcu(data, &file->triggers, list) {
+       lockdep_assert_held(&event_mutex);
+
+       list_for_each_entry(data, &file->triggers, list) {
                enable_data = data->private_data;
                if (enable_data &&
                    (data->cmd_ops->trigger_type ==
index 7f89026..3f54dc2 100644 (file)
@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
        INIT_HLIST_NODE(&tk->rp.kp.hlist);
        INIT_LIST_HEAD(&tk->rp.kp.list);
 
-       ret = trace_probe_init(&tk->tp, event, group);
+       ret = trace_probe_init(&tk->tp, event, group, false);
        if (ret < 0)
                goto error;
 
index 905b10a..9ae87be 100644 (file)
@@ -984,15 +984,19 @@ void trace_probe_cleanup(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group)
+                    const char *group, bool alloc_filter)
 {
        struct trace_event_call *call;
+       size_t size = sizeof(struct trace_probe_event);
        int ret = 0;
 
        if (!event || !group)
                return -EINVAL;
 
-       tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL);
+       if (alloc_filter)
+               size += sizeof(struct trace_uprobe_filter);
+
+       tp->event = kzalloc(size, GFP_KERNEL);
        if (!tp->event)
                return -ENOMEM;
 
index 4ee7037..a0ff9e2 100644 (file)
@@ -223,6 +223,12 @@ struct probe_arg {
        const struct fetch_type *type;  /* Type of this argument */
 };
 
+struct trace_uprobe_filter {
+       rwlock_t                rwlock;
+       int                     nr_systemwide;
+       struct list_head        perf_events;
+};
+
 /* Event call and class holder */
 struct trace_probe_event {
        unsigned int                    flags;  /* For TP_FLAG_* */
@@ -230,6 +236,7 @@ struct trace_probe_event {
        struct trace_event_call         call;
        struct list_head                files;
        struct list_head                probes;
+       struct trace_uprobe_filter      filter[0];
 };
 
 struct trace_probe {
@@ -322,7 +329,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
 }
 
 int trace_probe_init(struct trace_probe *tp, const char *event,
-                    const char *group);
+                    const char *group, bool alloc_filter);
 void trace_probe_cleanup(struct trace_probe *tp);
 int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
 void trace_probe_unlink(struct trace_probe *tp);
index 352073d..2619bc5 100644 (file)
@@ -34,12 +34,6 @@ struct uprobe_trace_entry_head {
 #define DATAOF_TRACE_ENTRY(entry, is_return)           \
        ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
 
-struct trace_uprobe_filter {
-       rwlock_t                rwlock;
-       int                     nr_systemwide;
-       struct list_head        perf_events;
-};
-
 static int trace_uprobe_create(int argc, const char **argv);
 static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
 static int trace_uprobe_release(struct dyn_event *ev);
@@ -60,7 +54,6 @@ static struct dyn_event_operations trace_uprobe_ops = {
  */
 struct trace_uprobe {
        struct dyn_event                devent;
-       struct trace_uprobe_filter      filter;
        struct uprobe_consumer          consumer;
        struct path                     path;
        struct inode                    *inode;
@@ -351,7 +344,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        if (!tu)
                return ERR_PTR(-ENOMEM);
 
-       ret = trace_probe_init(&tu->tp, event, group);
+       ret = trace_probe_init(&tu->tp, event, group, true);
        if (ret < 0)
                goto error;
 
@@ -359,7 +352,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        tu->consumer.handler = uprobe_dispatcher;
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
-       init_trace_uprobe_filter(&tu->filter);
+       init_trace_uprobe_filter(tu->tp.event->filter);
        return tu;
 
 error:
@@ -1067,13 +1060,14 @@ static void __probe_event_disable(struct trace_probe *tp)
        struct trace_probe *pos;
        struct trace_uprobe *tu;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
                if (!tu->inode)
                        continue;
 
-               WARN_ON(!uprobe_filter_is_empty(&tu->filter));
-
                uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
                tu->inode = NULL;
        }
@@ -1108,7 +1102,7 @@ static int probe_event_enable(struct trace_event_call *call,
        }
 
        tu = container_of(tp, struct trace_uprobe, tp);
-       WARN_ON(!uprobe_filter_is_empty(&tu->filter));
+       WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
 
        if (enabled)
                return 0;
@@ -1205,39 +1199,39 @@ __uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
 }
 
 static inline bool
-uprobe_filter_event(struct trace_uprobe *tu, struct perf_event *event)
+trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
+                         struct perf_event *event)
 {
-       return __uprobe_perf_filter(&tu->filter, event->hw.target->mm);
+       return __uprobe_perf_filter(filter, event->hw.target->mm);
 }
 
-static int uprobe_perf_close(struct trace_uprobe *tu, struct perf_event *event)
+static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
+                                      struct perf_event *event)
 {
        bool done;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                list_del(&event->hw.tp_list);
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        (event->hw.target->flags & PF_EXITING) ||
-                       uprobe_filter_event(tu, event);
+                       trace_uprobe_filter_event(filter, event);
        } else {
-               tu->filter.nr_systemwide--;
-               done = tu->filter.nr_systemwide;
+               filter->nr_systemwide--;
+               done = filter->nr_systemwide;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       if (!done)
-               return uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
-
-       return 0;
+       return done;
 }
 
-static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
+/* This returns true if the filter always covers target mm */
+static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
+                                   struct perf_event *event)
 {
        bool done;
-       int err;
 
-       write_lock(&tu->filter.rwlock);
+       write_lock(&filter->rwlock);
        if (event->hw.target) {
                /*
                 * event->parent != NULL means copy_process(), we can avoid
@@ -1247,28 +1241,21 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event)
                 * attr.enable_on_exec means that exec/mmap will install the
                 * breakpoints we need.
                 */
-               done = tu->filter.nr_systemwide ||
+               done = filter->nr_systemwide ||
                        event->parent || event->attr.enable_on_exec ||
-                       uprobe_filter_event(tu, event);
-               list_add(&event->hw.tp_list, &tu->filter.perf_events);
+                       trace_uprobe_filter_event(filter, event);
+               list_add(&event->hw.tp_list, &filter->perf_events);
        } else {
-               done = tu->filter.nr_systemwide;
-               tu->filter.nr_systemwide++;
+               done = filter->nr_systemwide;
+               filter->nr_systemwide++;
        }
-       write_unlock(&tu->filter.rwlock);
+       write_unlock(&filter->rwlock);
 
-       err = 0;
-       if (!done) {
-               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
-               if (err)
-                       uprobe_perf_close(tu, event);
-       }
-       return err;
+       return done;
 }
 
-static int uprobe_perf_multi_call(struct trace_event_call *call,
-                                 struct perf_event *event,
-               int (*op)(struct trace_uprobe *tu, struct perf_event *event))
+static int uprobe_perf_close(struct trace_event_call *call,
+                            struct perf_event *event)
 {
        struct trace_probe *pos, *tp;
        struct trace_uprobe *tu;
@@ -1278,25 +1265,59 @@ static int uprobe_perf_multi_call(struct trace_event_call *call,
        if (WARN_ON_ONCE(!tp))
                return -ENODEV;
 
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
+               return 0;
+
        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
                tu = container_of(pos, struct trace_uprobe, tp);
-               ret = op(tu, event);
+               ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
                if (ret)
                        break;
        }
 
        return ret;
 }
+
+static int uprobe_perf_open(struct trace_event_call *call,
+                           struct perf_event *event)
+{
+       struct trace_probe *pos, *tp;
+       struct trace_uprobe *tu;
+       int err = 0;
+
+       tp = trace_probe_primary_from_call(call);
+       if (WARN_ON_ONCE(!tp))
+               return -ENODEV;
+
+       tu = container_of(tp, struct trace_uprobe, tp);
+       if (trace_uprobe_filter_add(tu->tp.event->filter, event))
+               return 0;
+
+       list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+               err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
+               if (err) {
+                       uprobe_perf_close(call, event);
+                       break;
+               }
+       }
+
+       return err;
+}
+
 static bool uprobe_perf_filter(struct uprobe_consumer *uc,
                                enum uprobe_filter_ctx ctx, struct mm_struct *mm)
 {
+       struct trace_uprobe_filter *filter;
        struct trace_uprobe *tu;
        int ret;
 
        tu = container_of(uc, struct trace_uprobe, consumer);
-       read_lock(&tu->filter.rwlock);
-       ret = __uprobe_perf_filter(&tu->filter, mm);
-       read_unlock(&tu->filter.rwlock);
+       filter = tu->tp.event->filter;
+
+       read_lock(&filter->rwlock);
+       ret = __uprobe_perf_filter(filter, mm);
+       read_unlock(&filter->rwlock);
 
        return ret;
 }
@@ -1419,10 +1440,10 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
                return 0;
 
        case TRACE_REG_PERF_OPEN:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_open);
+               return uprobe_perf_open(event, data);
 
        case TRACE_REG_PERF_CLOSE:
-               return uprobe_perf_multi_call(event, data, uprobe_perf_close);
+               return uprobe_perf_close(event, data);
 
 #endif
        default:
index dccb95a..706020b 100644 (file)
@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
        const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
        unsigned long res = 0;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        if (IS_UNALIGNED(src, dst))
                goto byte_at_a_time;
 
@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                kasan_check_write(dst, count);
                check_object_size(dst, count, false);
                if (user_access_begin(src, max)) {
index 6c0005d..41670d4 100644 (file)
@@ -26,13 +26,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
        unsigned long align, res = 0;
        unsigned long c;
 
-       /*
-        * Truncate 'max' to the user-specified limit, so that
-        * we only have one limit we need to check in the loop
-        */
-       if (max > count)
-               max = count;
-
        /*
         * Do everything aligned. But that means that we
         * need to also expand the maximum..
@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
                unsigned long max = max_addr - src_addr;
                long retval;
 
+               /*
+                * Truncate 'max' to the user-specified limit, so that
+                * we only have one limit we need to check in the loop
+                */
+               if (max > count)
+                       max = count;
+
                if (user_access_begin(str, max)) {
                        retval = do_strnlen_user(str, count, max);
                        user_access_end();
index 7df4f7f..55c14e8 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * test_xarray.c: Test the XArray API
  * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
 {
 #ifdef CONFIG_XARRAY_MULTI
+       unsigned long multi = 3 << order;
+       unsigned long next = 4 << order;
        unsigned long index;
 
-       xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
-       XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+       xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+       XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+       XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
 
        index = 0;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, index != 12);
-       index = 13;
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, index != multi);
+       index = multi + 1;
        XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(12));
-       XA_BUG_ON(xa, (index < 12) || (index >= 16));
+                       xa_mk_value(multi));
+       XA_BUG_ON(xa, (index < multi) || (index >= next));
        XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
-                       xa_mk_value(16));
-       XA_BUG_ON(xa, index != 16);
-
-       xa_erase_index(xa, 12);
-       xa_erase_index(xa, 16);
+                       xa_mk_value(next));
+       XA_BUG_ON(xa, index != next);
+       XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+       XA_BUG_ON(xa, index != next);
+
+       xa_erase_index(xa, multi);
+       xa_erase_index(xa, next);
+       xa_erase_index(xa, next + 1);
        XA_BUG_ON(xa, !xa_empty(xa));
 #endif
 }
@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
        xa_destroy(xa);
 }
 
+static noinline void check_find_4(struct xarray *xa)
+{
+       unsigned long index = 0;
+       void *entry;
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+       entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+       XA_BUG_ON(xa, entry);
+
+       xa_erase_index(xa, ULONG_MAX);
+}
+
 static noinline void check_find(struct xarray *xa)
 {
+       unsigned i;
+
        check_find_1(xa);
        check_find_2(xa);
        check_find_3(xa);
-       check_multi_find(xa);
+       check_find_4(xa);
+
+       for (i = 2; i < 10; i++)
+               check_multi_find_1(xa, i);
        check_multi_find_2(xa);
 }
 
@@ -1132,6 +1160,27 @@ static noinline void check_move_tiny(struct xarray *xa)
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 
+static noinline void check_move_max(struct xarray *xa)
+{
+       XA_STATE(xas, xa, 0);
+
+       xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xas_set(&xas, 0);
+       rcu_read_lock();
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+       xas_pause(&xas);
+       XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+       rcu_read_unlock();
+
+       xa_erase_index(xa, ULONG_MAX);
+       XA_BUG_ON(xa, !xa_empty(xa));
+}
+
 static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 {
        XA_STATE(xas, xa, 0);
@@ -1240,6 +1289,7 @@ static noinline void check_move(struct xarray *xa)
        xa_destroy(xa);
 
        check_move_tiny(xa);
+       check_move_max(xa);
 
        for (i = 0; i < 16; i++)
                check_move_small(xa, 1UL << i);
index 1237c21..1d9fab7 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0+
 /*
  * XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
  * Author: Matthew Wilcox <willy@infradead.org>
  */
 
@@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas)
        if (xas_invalid(xas))
                return;
 
+       xas->xa_node = XAS_RESTART;
        if (node) {
                unsigned int offset = xas->xa_offset;
                while (++offset < XA_CHUNK_SIZE) {
@@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas)
                                break;
                }
                xas->xa_index += (offset - xas->xa_offset) << node->shift;
+               if (xas->xa_index == 0)
+                       xas->xa_node = XAS_BOUNDS;
        } else {
                xas->xa_index++;
        }
-       xas->xa_node = XAS_RESTART;
 }
 EXPORT_SYMBOL_GPL(xas_pause);
 
@@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
 {
        void *entry;
 
-       if (xas_error(xas))
+       if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
                return NULL;
+       if (xas->xa_index > max)
+               return set_bounds(xas);
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
                return set_bounds(xas);
-       } else if (xas_top(xas->xa_node)) {
+       } else if (xas->xa_node == XAS_RESTART) {
                entry = xas_load(xas);
                if (entry || xas_not_node(xas->xa_node))
                        return entry;
@@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
 
        if (xas_error(xas))
                return NULL;
+       if (xas->xa_index > max)
+               goto max;
 
        if (!xas->xa_node) {
                xas->xa_index = 1;
@@ -1824,6 +1831,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
 }
 EXPORT_SYMBOL(xa_find);
 
+static bool xas_sibling(struct xa_state *xas)
+{
+       struct xa_node *node = xas->xa_node;
+       unsigned long mask;
+
+       if (!node)
+               return false;
+       mask = (XA_CHUNK_SIZE << node->shift) - 1;
+       return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+}
+
 /**
  * xa_find_after() - Search the XArray for a present entry.
  * @xa: XArray.
@@ -1847,21 +1865,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
        XA_STATE(xas, xa, *indexp + 1);
        void *entry;
 
+       if (xas.xa_index == 0)
+               return NULL;
+
        rcu_read_lock();
        for (;;) {
                if ((__force unsigned int)filter < XA_MAX_MARKS)
                        entry = xas_find_marked(&xas, max, filter);
                else
                        entry = xas_find(&xas, max);
-               if (xas.xa_node == XAS_BOUNDS)
+
+               if (xas_invalid(&xas))
                        break;
-               if (xas.xa_shift) {
-                       if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
-                               continue;
-               } else {
-                       if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
-                               continue;
-               }
+               if (xas_sibling(&xas))
+                       continue;
                if (!xas_retry(&xas, entry))
                        break;
        }
index d79221f..c318967 100644 (file)
@@ -134,8 +134,7 @@ static void vcc_seq_stop(struct seq_file *seq, void *v)
 static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        v = vcc_walk(seq, 1);
-       if (v)
-               (*pos)++;
+       (*pos)++;
        return v;
 }
 
index 76bd678..a0116b9 100644 (file)
@@ -62,7 +62,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
        hpad = (info->hdr_len + CFUSB_PAD_DESCR_SZ) & (CFUSB_ALIGNMENT - 1);
 
        if (skb_headroom(skb) < ETH_HLEN + CFUSB_PAD_DESCR_SZ + hpad) {
-               pr_warn("Headroom to small\n");
+               pr_warn("Headroom too small\n");
                kfree_skb(skb);
                return -EIO;
        }
index 4dcc1b3..c806b07 100644 (file)
@@ -5489,9 +5489,29 @@ static void flush_all_backlogs(void)
        put_online_cpus();
 }
 
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+       if (!napi->rx_count)
+               return;
+       netif_receive_skb_list_internal(&napi->rx_list);
+       INIT_LIST_HEAD(&napi->rx_list);
+       napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+       list_add_tail(&skb->list, &napi->rx_list);
+       if (++napi->rx_count >= gro_normal_batch)
+               gro_normal_list(napi);
+}
+
 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
-static int napi_gro_complete(struct sk_buff *skb)
+static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
@@ -5524,7 +5544,8 @@ static int napi_gro_complete(struct sk_buff *skb)
        }
 
 out:
-       return netif_receive_skb_internal(skb);
+       gro_normal_one(napi, skb);
+       return NET_RX_SUCCESS;
 }
 
 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
@@ -5537,7 +5558,7 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
                if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
                        return;
                skb_list_del_init(skb);
-               napi_gro_complete(skb);
+               napi_gro_complete(napi, skb);
                napi->gro_hash[index].count--;
        }
 
@@ -5639,7 +5660,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
        }
 }
 
-static void gro_flush_oldest(struct list_head *head)
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
 {
        struct sk_buff *oldest;
 
@@ -5655,7 +5676,7 @@ static void gro_flush_oldest(struct list_head *head)
         * SKB to the chain.
         */
        skb_list_del_init(oldest);
-       napi_gro_complete(oldest);
+       napi_gro_complete(napi, oldest);
 }
 
 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
@@ -5731,7 +5752,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
 
        if (pp) {
                skb_list_del_init(pp);
-               napi_gro_complete(pp);
+               napi_gro_complete(napi, pp);
                napi->gro_hash[hash].count--;
        }
 
@@ -5742,7 +5763,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                goto normal;
 
        if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
-               gro_flush_oldest(gro_head);
+               gro_flush_oldest(napi, gro_head);
        } else {
                napi->gro_hash[hash].count++;
        }
@@ -5800,26 +5821,6 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
-       if (!napi->rx_count)
-               return;
-       netif_receive_skb_list_internal(&napi->rx_list);
-       INIT_LIST_HEAD(&napi->rx_list);
-       napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
-       list_add_tail(&skb->list, &napi->rx_list);
-       if (++napi->rx_count >= gro_normal_batch)
-               gro_normal_list(napi);
-}
-
 static void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
        skb_dst_drop(skb);
@@ -6198,8 +6199,6 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                                 NAPIF_STATE_IN_BUSY_POLL)))
                return false;
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                unsigned long timeout = 0;
 
@@ -6215,6 +6214,9 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                        hrtimer_start(&n->timer, ns_to_ktime(timeout),
                                      HRTIMER_MODE_REL_PINNED);
        }
+
+       gro_normal_list(n);
+
        if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
@@ -6546,8 +6548,6 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                goto out_unlock;
        }
 
-       gro_normal_list(n);
-
        if (n->gro_bitmask) {
                /* flush too old packets
                 * If HZ < 1000, flush all packets.
@@ -6555,6 +6555,8 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                napi_gro_flush(n, HZ >= 1000);
        }
 
+       gro_normal_list(n);
+
        /* Some drivers may have called napi_schedule
         * prior to exhausting their budget.
         */
@@ -8192,6 +8194,22 @@ int __dev_set_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(__dev_set_mtu);
 
+int dev_validate_mtu(struct net_device *dev, int new_mtu,
+                    struct netlink_ext_ack *extack)
+{
+       /* MTU must be positive, and in range */
+       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
+               return -EINVAL;
+       }
+
+       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
+               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /**
  *     dev_set_mtu_ext - Change maximum transfer unit
  *     @dev: device
@@ -8208,16 +8226,9 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
        if (new_mtu == dev->mtu)
                return 0;
 
-       /* MTU must be positive, and in range */
-       if (new_mtu < 0 || new_mtu < dev->min_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu less than device minimum");
-               return -EINVAL;
-       }
-
-       if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
-               NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
-               return -EINVAL;
-       }
+       err = dev_validate_mtu(dev, new_mtu, extack);
+       if (err)
+               return err;
 
        if (!netif_device_present(dev))
                return -ENODEV;
@@ -9317,8 +9328,10 @@ int register_netdevice(struct net_device *dev)
                goto err_uninit;
 
        ret = netdev_register_kobject(dev);
-       if (ret)
+       if (ret) {
+               dev->reg_state = NETREG_UNREGISTERED;
                goto err_uninit;
+       }
        dev->reg_state = NETREG_REGISTERED;
 
        __netdev_update_features(dev);
index 920784a..789a73a 100644 (file)
@@ -3290,6 +3290,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return per_cpu_ptr(tbl->stats, cpu);
        }
+       (*pos)++;
        return NULL;
 }
 
index 20bc406..cdad6ed 100644 (file)
@@ -3053,8 +3053,17 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
        dev->rtnl_link_ops = ops;
        dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
 
-       if (tb[IFLA_MTU])
-               dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+       if (tb[IFLA_MTU]) {
+               u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+               int err;
+
+               err = dev_validate_mtu(dev, mtu, extack);
+               if (err) {
+                       free_netdev(dev);
+                       return ERR_PTR(err);
+               }
+               dev->mtu = mtu;
+       }
        if (tb[IFLA_ADDRESS]) {
                memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
                                nla_len(tb[IFLA_ADDRESS]));
index 3866d7e..ded2d52 100644 (file)
@@ -594,8 +594,6 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
 
 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 {
-       sock_owned_by_me(sk);
-
        sk_psock_cork_free(psock);
        sk_psock_zap_ingress(psock);
 
index 6b6e51d..1f31a39 100644 (file)
@@ -438,6 +438,23 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
+/**
+ * inet_proto_csum_replace16 - update layer 4 header checksum field
+ * @sum: Layer 4 header checksum field
+ * @skb: sk_buff for the packet
+ * @from: old IPv6 address
+ * @to: new IPv6 address
+ * @pseudohdr: True if layer 4 header checksum includes pseudoheader
+ *
+ * Update layer 4 header as per the update in IPv6 src/dst address.
+ *
+ * There is no need to update skb->csum in this function, because update in two
+ * fields a.) IPv6 src/dst address and b.) L4 header checksum cancels each other
+ * for skb->csum calculation. Whereas inet_proto_csum_replace4 function needs to
+ * update skb->csum, because update in 3 fields a.) IPv4 src/dst address,
+ * b.) IPv4 Header checksum and c.) L4 header checksum results in same diff as
+ * L4 Header checksum for skb->csum calculation.
+ */
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                               const __be32 *from, const __be32 *to,
                               bool pseudohdr)
@@ -449,9 +466,6 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
                *sum = csum_fold(csum_partial(diff, sizeof(diff),
                                 ~csum_unfold(*sum)));
-               if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-                       skb->csum = ~csum_partial(diff, sizeof(diff),
-                                                 ~skb->csum);
        } else if (pseudohdr)
                *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
                                  csum_unfold(*sum)));
index 0e4a7cf..e2e219c 100644 (file)
@@ -57,6 +57,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index 30fa771..dcc79ff 100644 (file)
@@ -662,8 +662,8 @@ static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
        [FOU_ATTR_REMCSUM_NOPARTIAL]    = { .type = NLA_FLAG, },
        [FOU_ATTR_LOCAL_V4]             = { .type = NLA_U32, },
        [FOU_ATTR_PEER_V4]              = { .type = NLA_U32, },
-       [FOU_ATTR_LOCAL_V6]             = { .type = sizeof(struct in6_addr), },
-       [FOU_ATTR_PEER_V6]              = { .type = sizeof(struct in6_addr), },
+       [FOU_ATTR_LOCAL_V6]             = { .len = sizeof(struct in6_addr), },
+       [FOU_ATTR_PEER_V6]              = { .len = sizeof(struct in6_addr), },
        [FOU_ATTR_PEER_PORT]            = { .type = NLA_U16, },
        [FOU_ATTR_IFINDEX]              = { .type = NLA_S32, },
 };
index 0fe2a5d..74e1d96 100644 (file)
@@ -1236,10 +1236,8 @@ int ip_tunnel_init(struct net_device *dev)
        iph->version            = 4;
        iph->ihl                = 5;
 
-       if (tunnel->collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (tunnel->collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
index e90b600..37cddd1 100644 (file)
@@ -187,8 +187,17 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        int mtu;
 
        if (!dst) {
-               dev->stats.tx_carrier_errors++;
-               goto tx_error_icmp;
+               struct rtable *rt;
+
+               fl->u.ip4.flowi4_oif = dev->ifindex;
+               fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+               rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
+               if (IS_ERR(rt)) {
+                       dev->stats.tx_carrier_errors++;
+                       goto tx_error_icmp;
+               }
+               dst = &rt->dst;
+               skb_dst_set(skb, dst);
        }
 
        dst_hold(dst);
index 2010888..d5c57b3 100644 (file)
@@ -271,6 +271,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu+1;
                return &per_cpu(rt_cache_stat, cpu);
        }
+       (*pos)++;
        return NULL;
 
 }
index 7dfb78c..2857c85 100644 (file)
@@ -2525,6 +2525,7 @@ static void tcp_rtx_queue_purge(struct sock *sk)
 {
        struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
 
+       tcp_sk(sk)->highest_sack = NULL;
        while (p) {
                struct sk_buff *skb = rb_to_skb(p);
 
@@ -2615,7 +2616,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        WRITE_ONCE(tp->write_seq, seq);
 
        icsk->icsk_backoff = 0;
-       tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
index a6545ef..6c4d79b 100644 (file)
@@ -779,8 +779,7 @@ static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
         * bandwidth sample. Delivered is in packets and interval_us in uS and
         * ratio will be <<1 for most connections. So delivered is first scaled.
         */
-       bw = (u64)rs->delivered * BW_UNIT;
-       do_div(bw, rs->interval_us);
+       bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us);
 
        /* If this sample is application-limited, it is likely to have a very
         * low delivered count that represents application behavior rather than
index 2f475b8..4915de6 100644 (file)
@@ -3165,6 +3165,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->retransmit_skb_hint = NULL;
                if (unlikely(skb == tp->lost_skb_hint))
                        tp->lost_skb_hint = NULL;
+               tcp_highest_sack_replace(sk, skb, next);
                tcp_rtx_queue_unlink_and_free(skb, sk);
        }
 
index fec4b3a..306e25d 100644 (file)
@@ -3293,6 +3293,7 @@ int tcp_send_synack(struct sock *sk)
                        if (!nskb)
                                return -ENOMEM;
                        INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
+                       tcp_highest_sack_replace(sk, skb, nskb);
                        tcp_rtx_queue_unlink_and_free(skb, sk);
                        __skb_header_release(nskb);
                        tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
index e4fd440..db76b96 100644 (file)
@@ -1368,7 +1368,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
        if (likely(partial)) {
                up->forward_deficit += size;
                size = up->forward_deficit;
-               if (size < (sk->sk_rcvbuf >> 2))
+               if (size < (sk->sk_rcvbuf >> 2) &&
+                   !skb_queue_empty(&up->reader_queue))
                        return;
        } else {
                size += up->forward_deficit;
index e31626f..fd53505 100644 (file)
@@ -79,6 +79,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
                if (!x)
                        goto out_reset;
 
+               skb->mark = xfrm_smark_get(skb->mark, x);
+
                sp->xvec[sp->len++] = x;
                sp->olen++;
 
index b1e9a10..58fbde2 100644 (file)
@@ -2571,14 +2571,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
 
+       ++(*pos);
        if (!v)
                goto iter_table;
 
        n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
-       if (n) {
-               ++*pos;
+       if (n)
                return n;
-       }
 
 iter_table:
        ipv6_route_check_sernum(iter);
@@ -2586,8 +2585,6 @@ iter_table:
        r = fib6_walk_continue(&iter->w);
        spin_unlock_bh(&iter->tbl->tb6_lock);
        if (r > 0) {
-               if (v)
-                       ++*pos;
                return iter->w.leaf;
        } else if (r < 0) {
                fib6_walker_unlink(net, &iter->w);
index ee968d9..55bfc51 100644 (file)
@@ -1466,7 +1466,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
                dev->mtu -= 8;
 
        if (tunnel->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
                netif_keep_dst(dev);
        }
        ip6gre_tnl_init_features(dev);
@@ -1894,7 +1893,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
@@ -2197,7 +2195,6 @@ static void ip6erspan_tap_setup(struct net_device *dev)
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netif_keep_dst(dev);
index 2f376db..b5dd20c 100644 (file)
@@ -1877,10 +1877,8 @@ static int ip6_tnl_dev_init(struct net_device *dev)
        if (err)
                return err;
        ip6_tnl_link_config(t);
-       if (t->parms.collect_md) {
-               dev->features |= NETIF_F_NETNS_LOCAL;
+       if (t->parms.collect_md)
                netif_keep_dst(dev);
-       }
        return 0;
 }
 
index 6f08b76..524006a 100644 (file)
@@ -449,8 +449,17 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
+       if (!dst) {
+               fl->u.ip6.flowi6_oif = dev->ifindex;
+               fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+               dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
+               if (dst->error) {
+                       dst_release(dst);
+                       dst = NULL;
+                       goto tx_err_link_failure;
+               }
+               skb_dst_set(skb, dst);
+       }
 
        dst_hold(dst);
        dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
index 85a5447..7cbc197 100644 (file)
@@ -23,6 +23,7 @@
 #include <net/addrconf.h>
 #include <net/ip6_route.h>
 #include <net/dst_cache.h>
+#include <net/ip_tunnels.h>
 #ifdef CONFIG_IPV6_SEG6_HMAC
 #include <net/seg6_hmac.h>
 #endif
@@ -135,7 +136,8 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
 
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
-       skb->encapsulation = 0;
+       if (iptunnel_pull_offloads(skb))
+               return false;
 
        return true;
 }
index 077a2cb..26ab0e9 100644 (file)
@@ -75,7 +75,7 @@ mtype_flush(struct ip_set *set)
 
        if (set->extensions & IPSET_EXT_DESTROY)
                mtype_ext_cleanup(set);
-       memset(map->members, 0, map->memsize);
+       bitmap_zero(map->members, map->elements);
        set->elements = 0;
        set->ext_size = 0;
 }
index abe8f77..0a2196f 100644 (file)
@@ -37,7 +37,7 @@ MODULE_ALIAS("ip_set_bitmap:ip");
 
 /* Type structure */
 struct bitmap_ip {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -220,7 +220,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
            u32 first_ip, u32 last_ip,
            u32 elements, u32 hosts, u8 netmask)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -322,7 +322,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
index b618713..739e343 100644 (file)
@@ -42,7 +42,7 @@ enum {
 
 /* Type structure */
 struct bitmap_ipmac {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -299,7 +299,7 @@ static bool
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
               u32 first_ip, u32 last_ip, u32 elements)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_ip = first_ip;
@@ -360,7 +360,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (!map)
                return -ENOMEM;
 
-       map->memsize = bitmap_bytes(0, elements - 1);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
                kfree(map);
index 23d6095..b49978d 100644 (file)
@@ -30,7 +30,7 @@ MODULE_ALIAS("ip_set_bitmap:port");
 
 /* Type structure */
 struct bitmap_port {
-       void *members;          /* the set members */
+       unsigned long *members; /* the set members */
        u16 first_port;         /* host byte order, included in range */
        u16 last_port;          /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
@@ -231,7 +231,7 @@ static bool
 init_map_port(struct ip_set *set, struct bitmap_port *map,
              u16 first_port, u16 last_port)
 {
-       map->members = ip_set_alloc(map->memsize);
+       map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN);
        if (!map->members)
                return false;
        map->first_port = first_port;
@@ -271,7 +271,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                return -ENOMEM;
 
        map->elements = elements;
-       map->memsize = bitmap_bytes(0, map->elements);
+       map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
                kfree(map);
index 8dc892a..605e0f6 100644 (file)
@@ -1239,7 +1239,7 @@ static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer,
 
                        p = msg_end;
                        if (p + sizeof(s->v4) > buffer+buflen) {
-                               IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+                               IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n");
                                return;
                        }
                        s = (union ip_vs_sync_conn *)p;
index 0399ae8..4f897b1 100644 (file)
@@ -114,7 +114,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init         */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
 /* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
 /* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
 /* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
@@ -130,7 +130,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
 /*     REPLY   */
 /*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
 /* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* init_ack     */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
 /* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
 /* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
 /* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
@@ -316,7 +316,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                        ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
                }
 
-               ct->proto.sctp.state = new_state;
+               ct->proto.sctp.state = SCTP_CONNTRACK_NONE;
        }
 
        return true;
index 65f51a2..7e63b48 100644 (file)
@@ -552,48 +552,71 @@ static inline u64 nf_tables_alloc_handle(struct nft_table *table)
 
 static const struct nft_chain_type *chain_type[NFPROTO_NUMPROTO][NFT_CHAIN_T_MAX];
 
+static const struct nft_chain_type *
+__nft_chain_type_get(u8 family, enum nft_chain_types type)
+{
+       if (family >= NFPROTO_NUMPROTO ||
+           type >= NFT_CHAIN_T_MAX)
+               return NULL;
+
+       return chain_type[family][type];
+}
+
 static const struct nft_chain_type *
 __nf_tables_chain_type_lookup(const struct nlattr *nla, u8 family)
 {
+       const struct nft_chain_type *type;
        int i;
 
        for (i = 0; i < NFT_CHAIN_T_MAX; i++) {
-               if (chain_type[family][i] != NULL &&
-                   !nla_strcmp(nla, chain_type[family][i]->name))
-                       return chain_type[family][i];
+               type = __nft_chain_type_get(family, i);
+               if (!type)
+                       continue;
+               if (!nla_strcmp(nla, type->name))
+                       return type;
        }
        return NULL;
 }
 
-/*
- * Loading a module requires dropping mutex that guards the transaction.
- * A different client might race to start a new transaction meanwhile. Zap the
- * list of pending transaction and then restore it once the mutex is grabbed
- * again. Users of this function return EAGAIN which implicitly triggers the
- * transaction abort path to clean up the list of pending transactions.
- */
+struct nft_module_request {
+       struct list_head        list;
+       char                    module[MODULE_NAME_LEN];
+       bool                    done;
+};
+
 #ifdef CONFIG_MODULES
-static void nft_request_module(struct net *net, const char *fmt, ...)
+static int nft_request_module(struct net *net, const char *fmt, ...)
 {
        char module_name[MODULE_NAME_LEN];
-       LIST_HEAD(commit_list);
+       struct nft_module_request *req;
        va_list args;
        int ret;
 
-       list_splice_init(&net->nft.commit_list, &commit_list);
-
        va_start(args, fmt);
        ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
        va_end(args);
        if (ret >= MODULE_NAME_LEN)
-               return;
+               return 0;
 
-       mutex_unlock(&net->nft.commit_mutex);
-       request_module("%s", module_name);
-       mutex_lock(&net->nft.commit_mutex);
+       list_for_each_entry(req, &net->nft.module_list, list) {
+               if (!strcmp(req->module, module_name)) {
+                       if (req->done)
+                               return 0;
 
-       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
-       list_splice(&commit_list, &net->nft.commit_list);
+                       /* A request to load this module already exists. */
+                       return -EAGAIN;
+               }
+       }
+
+       req = kmalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       req->done = false;
+       strlcpy(req->module, module_name, MODULE_NAME_LEN);
+       list_add_tail(&req->list, &net->nft.module_list);
+
+       return -EAGAIN;
 }
 #endif
 
@@ -617,10 +640,9 @@ nf_tables_chain_type_lookup(struct net *net, const struct nlattr *nla,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (autoload) {
-               nft_request_module(net, "nft-chain-%u-%.*s", family,
-                                  nla_len(nla), (const char *)nla_data(nla));
-               type = __nf_tables_chain_type_lookup(nla, family);
-               if (type != NULL)
+               if (nft_request_module(net, "nft-chain-%u-%.*s", family,
+                                      nla_len(nla),
+                                      (const char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -1162,11 +1184,8 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 
 void nft_register_chain_type(const struct nft_chain_type *ctype)
 {
-       if (WARN_ON(ctype->family >= NFPROTO_NUMPROTO))
-               return;
-
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       if (WARN_ON(chain_type[ctype->family][ctype->type] != NULL)) {
+       if (WARN_ON(__nft_chain_type_get(ctype->family, ctype->type))) {
                nfnl_unlock(NFNL_SUBSYS_NFTABLES);
                return;
        }
@@ -1768,7 +1787,10 @@ static int nft_chain_parse_hook(struct net *net,
        hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
        hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
 
-       type = chain_type[family][NFT_CHAIN_T_DEFAULT];
+       type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT);
+       if (!type)
+               return -EOPNOTSUPP;
+
        if (nla[NFTA_CHAIN_TYPE]) {
                type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE],
                                                   family, autoload);
@@ -2328,9 +2350,8 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
 static int nft_expr_type_request_module(struct net *net, u8 family,
                                        struct nlattr *nla)
 {
-       nft_request_module(net, "nft-expr-%u-%.*s", family,
-                          nla_len(nla), (char *)nla_data(nla));
-       if (__nft_expr_type_get(family, nla))
+       if (nft_request_module(net, "nft-expr-%u-%.*s", family,
+                              nla_len(nla), (char *)nla_data(nla)) == -EAGAIN)
                return -EAGAIN;
 
        return 0;
@@ -2356,9 +2377,9 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
                if (nft_expr_type_request_module(net, family, nla) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
 
-               nft_request_module(net, "nft-expr-%.*s",
-                                  nla_len(nla), (char *)nla_data(nla));
-               if (__nft_expr_type_get(family, nla))
+               if (nft_request_module(net, "nft-expr-%.*s",
+                                      nla_len(nla),
+                                      (char *)nla_data(nla)) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -2449,9 +2470,10 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
                        err = PTR_ERR(ops);
 #ifdef CONFIG_MODULES
                        if (err == -EAGAIN)
-                               nft_expr_type_request_module(ctx->net,
-                                                            ctx->family,
-                                                            tb[NFTA_EXPR_NAME]);
+                               if (nft_expr_type_request_module(ctx->net,
+                                                                ctx->family,
+                                                                tb[NFTA_EXPR_NAME]) != -EAGAIN)
+                                       err = -ENOENT;
 #endif
                        goto err1;
                }
@@ -3288,8 +3310,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (list_empty(&nf_tables_set_types)) {
-               nft_request_module(ctx->net, "nft-set");
-               if (!list_empty(&nf_tables_set_types))
+               if (nft_request_module(ctx->net, "nft-set") == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -5415,8 +5436,7 @@ nft_obj_type_get(struct net *net, u32 objtype)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nft-obj-%u", objtype);
-               if (__nft_obj_type_get(objtype))
+               if (nft_request_module(net, "nft-obj-%u", objtype) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -5989,8 +6009,7 @@ nft_flowtable_type_get(struct net *net, u8 family)
        lockdep_nfnl_nft_mutex_not_held();
 #ifdef CONFIG_MODULES
        if (type == NULL) {
-               nft_request_module(net, "nf-flowtable-%u", family);
-               if (__nft_flowtable_type_get(family))
+               if (nft_request_module(net, "nf-flowtable-%u", family) == -EAGAIN)
                        return ERR_PTR(-EAGAIN);
        }
 #endif
@@ -6992,6 +7011,18 @@ static void nft_chain_del(struct nft_chain *chain)
        list_del_rcu(&chain->list);
 }
 
+static void nf_tables_module_autoload_cleanup(struct net *net)
+{
+       struct nft_module_request *req, *next;
+
+       WARN_ON_ONCE(!list_empty(&net->nft.commit_list));
+       list_for_each_entry_safe(req, next, &net->nft.module_list, list) {
+               WARN_ON_ONCE(!req->done);
+               list_del(&req->list);
+               kfree(req);
+       }
+}
+
 static void nf_tables_commit_release(struct net *net)
 {
        struct nft_trans *trans;
@@ -7004,6 +7035,7 @@ static void nf_tables_commit_release(struct net *net)
         * to prevent expensive synchronize_rcu() in commit phase.
         */
        if (list_empty(&net->nft.commit_list)) {
+               nf_tables_module_autoload_cleanup(net);
                mutex_unlock(&net->nft.commit_mutex);
                return;
        }
@@ -7018,6 +7050,7 @@ static void nf_tables_commit_release(struct net *net)
        list_splice_tail_init(&net->nft.commit_list, &nf_tables_destroy_list);
        spin_unlock(&nf_tables_destroy_list_lock);
 
+       nf_tables_module_autoload_cleanup(net);
        mutex_unlock(&net->nft.commit_mutex);
 
        schedule_work(&trans_destroy_work);
@@ -7209,6 +7242,26 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+static void nf_tables_module_autoload(struct net *net)
+{
+       struct nft_module_request *req, *next;
+       LIST_HEAD(module_list);
+
+       list_splice_init(&net->nft.module_list, &module_list);
+       mutex_unlock(&net->nft.commit_mutex);
+       list_for_each_entry_safe(req, next, &module_list, list) {
+               if (req->done) {
+                       list_del(&req->list);
+                       kfree(req);
+               } else {
+                       request_module("%s", req->module);
+                       req->done = true;
+               }
+       }
+       mutex_lock(&net->nft.commit_mutex);
+       list_splice(&module_list, &net->nft.module_list);
+}
+
 static void nf_tables_abort_release(struct nft_trans *trans)
 {
        switch (trans->msg_type) {
@@ -7238,7 +7291,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
        kfree(trans);
 }
 
-static int __nf_tables_abort(struct net *net)
+static int __nf_tables_abort(struct net *net, bool autoload)
 {
        struct nft_trans *trans, *next;
        struct nft_trans_elem *te;
@@ -7360,6 +7413,11 @@ static int __nf_tables_abort(struct net *net)
                nf_tables_abort_release(trans);
        }
 
+       if (autoload)
+               nf_tables_module_autoload(net);
+       else
+               nf_tables_module_autoload_cleanup(net);
+
        return 0;
 }
 
@@ -7368,9 +7426,9 @@ static void nf_tables_cleanup(struct net *net)
        nft_validate_state_update(net, NFT_VALIDATE_SKIP);
 }
 
-static int nf_tables_abort(struct net *net, struct sk_buff *skb)
+static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
 {
-       int ret = __nf_tables_abort(net);
+       int ret = __nf_tables_abort(net, autoload);
 
        mutex_unlock(&net->nft.commit_mutex);
 
@@ -7965,6 +8023,7 @@ static int __net_init nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.tables);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       INIT_LIST_HEAD(&net->nft.module_list);
        mutex_init(&net->nft.commit_mutex);
        net->nft.base_seq = 1;
        net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -7976,7 +8035,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
 {
        mutex_lock(&net->nft.commit_mutex);
        if (!list_empty(&net->nft.commit_list))
-               __nf_tables_abort(net);
+               __nf_tables_abort(net, false);
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
index a9ea29a..2bb2848 100644 (file)
@@ -564,7 +564,7 @@ static void nft_indr_block_cb(struct net_device *dev,
 
        mutex_lock(&net->nft.commit_mutex);
        chain = __nft_offload_get_chain(dev);
-       if (chain) {
+       if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
                struct nft_base_chain *basechain;
 
                basechain = nft_base_chain(chain);
index 4abbb45..99127e2 100644 (file)
@@ -476,7 +476,7 @@ ack:
        }
 done:
        if (status & NFNL_BATCH_REPLAY) {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, true);
                nfnl_err_reset(&err_list);
                kfree_skb(skb);
                module_put(ss->owner);
@@ -487,11 +487,11 @@ done:
                        status |= NFNL_BATCH_REPLAY;
                        goto done;
                } else if (err) {
-                       ss->abort(net, oskb);
+                       ss->abort(net, oskb, false);
                        netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
                }
        } else {
-               ss->abort(net, oskb);
+               ss->abort(net, oskb, false);
        }
        if (ss->cleanup)
                ss->cleanup(net);
index f54d6ae..b42247a 100644 (file)
@@ -61,6 +61,9 @@ static int nft_osf_init(const struct nft_ctx *ctx,
        int err;
        u8 ttl;
 
+       if (!tb[NFTA_OSF_DREG])
+               return -EINVAL;
+
        if (tb[NFTA_OSF_TTL]) {
                ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
                if (ttl > 2)
index 46b8ff2..1e8eeb0 100644 (file)
@@ -1475,7 +1475,7 @@ static int __init rose_proto_init(void)
        int rc;
 
        if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
-               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n");
+               printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
                rc = -EINVAL;
                goto out;
        }
index 76e0d12..c2cdd0f 100644 (file)
@@ -2055,9 +2055,8 @@ replay:
                                                               &chain_info));
 
                mutex_unlock(&chain->filter_chain_lock);
-               tp_new = tcf_proto_create(nla_data(tca[TCA_KIND]),
-                                         protocol, prio, chain, rtnl_held,
-                                         extack);
+               tp_new = tcf_proto_create(name, protocol, prio, chain,
+                                         rtnl_held, extack);
                if (IS_ERR(tp_new)) {
                        err = PTR_ERR(tp_new);
                        goto errout_tp;
index 8f2ad70..d0140a9 100644 (file)
@@ -263,12 +263,12 @@ static int tcf_em_validate(struct tcf_proto *tp,
                                }
                                em->data = (unsigned long) v;
                        }
+                       em->datalen = data_len;
                }
        }
 
        em->matchid = em_hdr->matchid;
        em->flags = em_hdr->flags;
-       em->datalen = data_len;
        em->net = net;
 
        err = 0;
index 7ac1542..dc651a6 100644 (file)
@@ -268,9 +268,6 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        int err = -1;
        int mtu;
 
-       if (!dst)
-               goto tx_err_link_failure;
-
        dst_hold(dst);
        dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
        if (IS_ERR(dst)) {
@@ -297,7 +294,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
        mtu = dst_mtu(dst);
        if (!skb->ignore_df && skb->len > mtu) {
-               skb_dst_update_pmtu(skb, mtu);
+               skb_dst_update_pmtu_no_confirm(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
                        if (mtu < IPV6_MIN_MTU)
@@ -343,6 +340,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
        struct net_device_stats *stats = &xi->dev->stats;
+       struct dst_entry *dst = skb_dst(skb);
        struct flowi fl;
        int ret;
 
@@ -352,10 +350,33 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
        case htons(ETH_P_IPV6):
                xfrm_decode_session(skb, &fl, AF_INET6);
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               if (!dst) {
+                       fl.u.ip6.flowi6_oif = dev->ifindex;
+                       fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+                       dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
+                       if (dst->error) {
+                               dst_release(dst);
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, dst);
+               }
                break;
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+               if (!dst) {
+                       struct rtable *rt;
+
+                       fl.u.ip4.flowi4_oif = dev->ifindex;
+                       fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
+                       rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
+                       if (IS_ERR(rt)) {
+                               stats->tx_carrier_errors++;
+                               goto tx_err;
+                       }
+                       skb_dst_set(skb, &rt->dst);
+               }
                break;
        default:
                goto tx_err;
@@ -563,12 +584,9 @@ static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
        dev->type               = ARPHRD_NONE;
-       dev->hard_header_len    = ETH_HLEN;
-       dev->min_header_len     = ETH_HLEN;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
-       dev->max_mtu            = ETH_DATA_LEN;
-       dev->addr_len           = ETH_ALEN;
+       dev->max_mtu            = IP_MAX_MTU;
        dev->flags              = IFF_NOARP;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = xfrmi_dev_free;
index 612268e..7225107 100644 (file)
 #define R_AARCH64_ABS64        257
 #endif
 
+#define R_ARM_PC24             1
+#define R_ARM_THM_CALL         10
+#define R_ARM_CALL             28
+
 static int fd_map;     /* File descriptor for file being modified. */
 static int mmap_failed; /* Boolean flag. */
 static char gpfx;      /* prefix for global symbol name (sometimes '_') */
@@ -418,6 +422,18 @@ static char const *already_has_rel_mcount = "success"; /* our work here is done!
 #define RECORD_MCOUNT_64
 #include "recordmcount.h"
 
+static int arm_is_fake_mcount(Elf32_Rel const *rp)
+{
+       switch (ELF32_R_TYPE(w(rp->r_info))) {
+       case R_ARM_THM_CALL:
+       case R_ARM_CALL:
+       case R_ARM_PC24:
+               return 0;
+       }
+
+       return 1;
+}
+
 /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
  * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf
  * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40]
@@ -523,6 +539,7 @@ static int do_file(char const *const fname)
                altmcount = "__gnu_mcount_nc";
                make_nop = make_nop_arm;
                rel_type_nop = R_ARM_NONE;
+               is_fake_mcount32 = arm_is_fake_mcount;
                gpfx = 0;
                break;
        case EM_AARCH64: