Merge tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Jan 2024 18:58:35 +0000 (10:58 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Jan 2024 18:58:35 +0000 (10:58 -0800)
Pull networking fixes from Paolo Abeni:
 "Including fixes from bpf, netfilter and WiFi.

  Jakub is doing a lot of work to include the self-tests in our CI, as a
  result a significant amount of self-tests related fixes is flowing in
  (and will likely continue in the next few weeks).

  Current release - regressions:

   - bpf: fix a kernel crash for the riscv 64 JIT

   - bnxt_en: fix memory leak in bnxt_hwrm_get_rings()

   - revert "net: macsec: use skb_ensure_writable_head_tail to expand
     the skb"

  Previous releases - regressions:

   - core: fix removing a namespace with conflicting altnames

   - tc/flower: fix chain template offload memory leak

   - tcp:
      - make sure init the accept_queue's spinlocks once
      - fix autocork on CPUs with weak memory model

   - udp: fix busy polling

   - mlx5e:
      - fix out-of-bound read in port timestamping
      - fix peer flow lists corruption

   - iwlwifi: fix a memory corruption

  Previous releases - always broken:

   - netfilter:
      - nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress
        basechain
      - nft_limit: reject configurations that cause integer overflow

   - bpf: fix bpf_xdp_adjust_tail() with XSK zero-copy mbuf, avoiding a
     NULL pointer dereference upon shrinking

   - llc: make llc_ui_sendmsg() more robust against bonding changes

   - smc: fix illegal rmb_desc access in SMC-D connection dump

   - dpll: fix pin dump crash for rebound module

   - bnxt_en: fix possible crash after creating sw mqprio TCs

   - hv_netvsc: calculate correct ring size when PAGE_SIZE is not 4kB

  Misc:

   - several self-tests fixes for better integration with the netdev CI

   - added several missing modules descriptions"

* tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits)
  tsnep: Fix XDP_RING_NEED_WAKEUP for empty fill ring
  tsnep: Remove FCS for XDP data path
  net: fec: fix the unhandled context fault from smmu
  selftests: bonding: do not test arp/ns target with mode balance-alb/tlb
  fjes: fix memleaks in fjes_hw_setup
  i40e: update xdp_rxq_info::frag_size for ZC enabled Rx queue
  i40e: set xdp_rxq_info::frag_size
  xdp: reflect tail increase for MEM_TYPE_XSK_BUFF_POOL
  ice: update xdp_rxq_info::frag_size for ZC enabled Rx queue
  intel: xsk: initialize skb_frag_t::bv_offset in ZC drivers
  ice: remove redundant xdp_rxq_info registration
  i40e: handle multi-buffer packets that are shrunk by xdp prog
  ice: work on pre-XDP prog frag count
  xsk: fix usage of multi-buffer BPF helpers for ZC XDP
  xsk: make xsk_buff_pool responsible for clearing xdp_buff::flags
  xsk: recycle buffer in case Rx queue was full
  net: fill in MODULE_DESCRIPTION()s for rvu_mbox
  net: fill in MODULE_DESCRIPTION()s for litex
  net: fill in MODULE_DESCRIPTION()s for fsl_pq_mdio
  net: fill in MODULE_DESCRIPTION()s for fec
  ...

120 files changed:
arch/riscv/net/bpf_jit_comp64.c
drivers/dpll/dpll_core.c
drivers/dpll/dpll_core.h
drivers/dpll/dpll_netlink.c
drivers/net/ethernet/8390/8390.c
drivers/net/ethernet/8390/8390p.c
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/8390/hydra.c
drivers/net/ethernet/8390/stnic.c
drivers/net/ethernet/8390/zorro8390.c
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/cavium/liquidio/lio_core.c
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/litex/litex_liteeth.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/fjes/fjes_hw.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macsec.c
drivers/net/phy/micrel.c
drivers/net/tun.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/debugfs.c
drivers/net/wireless/ath/ath11k/debugfs.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intersil/p54/fwio.c
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/vport.h
include/linux/skmsg.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/llc_pdu.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/sock.h
include/net/xdp_sock_drv.h
net/8021q/vlan_netlink.c
net/core/dev.c
net/core/dev.h
net/core/filter.c
net/core/request_sock.c
net/core/sock.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp.c
net/ipv6/af_inet6.c
net/llc/af_llc.c
net/llc/llc_core.c
net/mac80211/Kconfig
net/mac80211/sta_info.c
net/mac80211/tx.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_chain_filter.c
net/netfilter/nft_compat.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_limit.c
net/netfilter/nft_nat.c
net/netfilter/nft_rt.c
net/netfilter/nft_socket.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_xfrm.c
net/netlink/af_netlink.c
net/rds/af_rds.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/smc/smc_diag.c
net/wireless/Kconfig
net/wireless/nl80211.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
tools/testing/selftests/drivers/net/bonding/bond_options.sh
tools/testing/selftests/drivers/net/bonding/settings
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
tools/testing/selftests/net/config
tools/testing/selftests/net/rps_default_mask.sh
tools/testing/selftests/net/so_incoming_cpu.c

index 58dc64d..719a97e 100644 (file)
@@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
        struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
        struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
        struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+       bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
        void *orig_call = func_addr;
        bool save_ret;
        u32 insn;
@@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        stack_size = round_up(stack_size, 16);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* For the trampoline called from function entry,
                 * the frame of traced function and the frame of
                 * trampoline need to be considered.
@@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* trampoline called from function entry */
                emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
                emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
index 1eca8cc..5152bd1 100644 (file)
@@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
        WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
 #define ASSERT_DPLL_NOT_REGISTERED(d)  \
        WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
-#define ASSERT_PIN_REGISTERED(p)       \
-       WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
 
 struct dpll_device_registration {
        struct list_head list;
@@ -425,6 +423,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
 }
 EXPORT_SYMBOL_GPL(dpll_device_unregister);
 
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+{
+       kfree(prop->package_label);
+       kfree(prop->panel_label);
+       kfree(prop->board_label);
+       kfree(prop->freq_supported);
+}
+
+static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+                            struct dpll_pin_properties *dst)
+{
+       memcpy(dst, src, sizeof(*dst));
+       if (src->freq_supported && src->freq_supported_num) {
+               size_t freq_size = src->freq_supported_num *
+                                  sizeof(*src->freq_supported);
+               dst->freq_supported = kmemdup(src->freq_supported,
+                                             freq_size, GFP_KERNEL);
+               if (!src->freq_supported)
+                       return -ENOMEM;
+       }
+       if (src->board_label) {
+               dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
+               if (!dst->board_label)
+                       goto err_board_label;
+       }
+       if (src->panel_label) {
+               dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
+               if (!dst->panel_label)
+                       goto err_panel_label;
+       }
+       if (src->package_label) {
+               dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
+               if (!dst->package_label)
+                       goto err_package_label;
+       }
+
+       return 0;
+
+err_package_label:
+       kfree(dst->panel_label);
+err_panel_label:
+       kfree(dst->board_label);
+err_board_label:
+       kfree(dst->freq_supported);
+       return -ENOMEM;
+}
+
 static struct dpll_pin *
 dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
               const struct dpll_pin_properties *prop)
@@ -441,20 +486,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
        if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
                    prop->type > DPLL_PIN_TYPE_MAX)) {
                ret = -EINVAL;
-               goto err;
+               goto err_pin_prop;
        }
-       pin->prop = prop;
+       ret = dpll_pin_prop_dup(prop, &pin->prop);
+       if (ret)
+               goto err_pin_prop;
        refcount_set(&pin->refcount, 1);
        xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
        xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
        ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
                              &dpll_pin_xa_id, GFP_KERNEL);
        if (ret)
-               goto err;
+               goto err_xa_alloc;
        return pin;
-err:
+err_xa_alloc:
        xa_destroy(&pin->dpll_refs);
        xa_destroy(&pin->parent_refs);
+       dpll_pin_prop_free(&pin->prop);
+err_pin_prop:
        kfree(pin);
        return ERR_PTR(ret);
 }
@@ -514,6 +563,7 @@ void dpll_pin_put(struct dpll_pin *pin)
                xa_destroy(&pin->dpll_refs);
                xa_destroy(&pin->parent_refs);
                xa_erase(&dpll_pin_xa, pin->id);
+               dpll_pin_prop_free(&pin->prop);
                kfree(pin);
        }
        mutex_unlock(&dpll_lock);
@@ -564,8 +614,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
            WARN_ON(!ops->state_on_dpll_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_DPLL_REGISTERED(dpll))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        if (WARN_ON(!(dpll->module == pin->module &&
@@ -636,15 +684,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
        unsigned long i, stop;
        int ret;
 
-       if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+       if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
                return -EINVAL;
 
        if (WARN_ON(!ops) ||
            WARN_ON(!ops->state_on_pin_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_PIN_REGISTERED(parent))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
index 5585873..717f715 100644 (file)
@@ -44,7 +44,7 @@ struct dpll_device {
  * @module:            module of creator
  * @dpll_refs:         hold referencees to dplls pin was registered with
  * @parent_refs:       hold references to parent pins pin was registered with
- * @prop:              pointer to pin properties given by registerer
+ * @prop:              pin properties copied from the registerer
  * @rclk_dev_name:     holds name of device when pin can recover clock from it
  * @refcount:          refcount
  **/
@@ -55,7 +55,7 @@ struct dpll_pin {
        struct module *module;
        struct xarray dpll_refs;
        struct xarray parent_refs;
-       const struct dpll_pin_properties *prop;
+       struct dpll_pin_properties prop;
        refcount_t refcount;
 };
 
index 3370dbd..314bb37 100644 (file)
@@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
        if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
                          DPLL_A_PIN_PAD))
                return -EMSGSIZE;
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
                nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
                if (!nest)
                        return -EMSGSIZE;
-               freq = pin->prop->freq_supported[fs].min;
+               freq = pin->prop.freq_supported[fs].min;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
                        return -EMSGSIZE;
                }
-               freq = pin->prop->freq_supported[fs].max;
+               freq = pin->prop.freq_supported[fs].max;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
@@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
 {
        int fs;
 
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
-               if (freq >= pin->prop->freq_supported[fs].min &&
-                   freq <= pin->prop->freq_supported[fs].max)
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
+               if (freq >= pin->prop.freq_supported[fs].min &&
+                   freq <= pin->prop.freq_supported[fs].max)
                        return true;
        return false;
 }
@@ -421,7 +421,7 @@ static int
 dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
                     struct netlink_ext_ack *extack)
 {
-       const struct dpll_pin_properties *prop = pin->prop;
+       const struct dpll_pin_properties *prop = &pin->prop;
        struct dpll_pin_ref *ref;
        int ret;
 
@@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
        return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
 }
 
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+       struct dpll_pin_ref *par_ref;
+       unsigned long i;
+
+       if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+               return false;
+       xa_for_each(&pin->parent_refs, i, par_ref)
+               if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+                               DPLL_REGISTERED))
+                       return true;
+       xa_for_each(&pin->dpll_refs, i, par_ref)
+               if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+                               DPLL_REGISTERED))
+                       return true;
+       return false;
+}
+
 /**
  * dpll_device_change_ntf - notify that the dpll device has been changed
  * @dpll: registered dpll pointer
@@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
        int ret = -ENOMEM;
        void *hdr;
 
-       if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+       if (!dpll_pin_available(pin))
                return -ENODEV;
 
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "prio changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "direction changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
        int ret;
 
        phase_adj = nla_get_s32(phase_adj_attr);
-       if (phase_adj > pin->prop->phase_range.max ||
-           phase_adj < pin->prop->phase_range.min) {
+       if (phase_adj > pin->prop.phase_range.max ||
+           phase_adj < pin->prop.phase_range.min) {
                NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
                                    "phase adjust value not supported");
                return -EINVAL;
@@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
        unsigned long i;
 
        xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
-               prop = pin->prop;
+               prop = &pin->prop;
                cid_match = clock_id ? pin->clock_id == clock_id : true;
                mod_match = mod_name_attr && module_name(pin->module) ?
                        !nla_strcmp(mod_name_attr,
@@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
        }
        pin = dpll_pin_find_from_nlattr(info);
        if (!IS_ERR(pin)) {
+               if (!dpll_pin_available(pin)) {
+                       nlmsg_free(msg);
+                       return -ENODEV;
+               }
                ret = dpll_msg_add_pin_handle(msg, pin);
                if (ret) {
                        nlmsg_free(msg);
@@ -1179,6 +1201,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
        xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
                                 ctx->idx) {
+               if (!dpll_pin_available(pin))
+                       continue;
                hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  &dpll_nl_family, NLM_F_MULTI,
@@ -1441,7 +1465,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
        }
        info->user_ptr[0] = xa_load(&dpll_pin_xa,
                                    nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
-       if (!info->user_ptr[0]) {
+       if (!info->user_ptr[0] ||
+           !dpll_pin_available(info->user_ptr[0])) {
                NL_SET_ERR_MSG(info->extack, "pin not found");
                ret = -ENODEV;
                goto unlock_dev;
index 0e0aa40..c563624 100644 (file)
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
 module_init(ns8390_module_init);
 module_exit(ns8390_module_exit);
 #endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
 MODULE_LICENSE("GPL");
index 6834742..6d429b1 100644 (file)
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
 
 module_init(NS8390p_init_module);
 module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
 MODULE_LICENSE("GPL");
index a09f383..828edca 100644 (file)
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
        return 1;
 }
 
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
 MODULE_LICENSE("GPL");
index 24f49a8..fd9dcdc 100644 (file)
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
 module_init(hydra_init_module);
 module_exit(hydra_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
 MODULE_LICENSE("GPL");
index 265976e..6cc0e19 100644 (file)
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
 
 module_init(stnic_probe);
 module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
 MODULE_LICENSE("GPL");
index d70390e..c24dd4f 100644 (file)
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
 module_init(zorro8390_init_module);
 module_exit(zorro8390_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
 MODULE_LICENSE("GPL");
index 3e7c867..72df1bb 100644 (file)
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
 };
 module_platform_driver(bcm4908_enet_driver);
 
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
index 9b83d53..50b8e97 100644 (file)
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
 
 MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
 MODULE_LICENSE("GPL");
index 6e4f36a..36f9bad 100644 (file)
@@ -362,4 +362,5 @@ module_init(bgmac_init)
 module_exit(bgmac_exit)
 
 MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
 MODULE_LICENSE("GPL");
index 0b21fd5..77425c7 100644 (file)
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
 };
 
 module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
 MODULE_LICENSE("GPL");
index 448a1b9..6ffdc42 100644 (file)
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
 EXPORT_SYMBOL_GPL(bgmac_enet_resume);
 
 MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
 MODULE_LICENSE("GPL");
index 0aacd3c..39845d5 100644 (file)
@@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
 {
        bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
        int i, j, rc, ulp_base_vec, ulp_msix;
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
 
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
 {
-       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               if (!rx_rings)
+                       return 0;
+               return bnxt_calc_nr_ring_pages(rx_rings - 1,
+                                              BNXT_RSS_TABLE_ENTRIES_P5);
+       }
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return 2;
        return 1;
@@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                        if (cp < (rx + tx)) {
                                rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
                                if (rc)
-                                       return rc;
+                                       goto get_rings_exit;
                                if (bp->flags & BNXT_FLAG_AGG_RINGS)
                                        rx <<= 1;
                                hw_resc->resv_rx_rings = rx;
@@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                hw_resc->resv_cp_rings = cp;
                hw_resc->resv_stat_ctxs = stats;
        }
+get_rings_exit:
        hwrm_req_drop(bp, req);
-       return 0;
+       return rc;
 }
 
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+                       u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                        req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
                        req->num_msix = cpu_to_le16(cp_rings);
-                       req->num_rsscos_ctxs =
-                               cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+                       req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
                } else {
                        req->num_cmpl_rings = cpu_to_le16(cp_rings);
                        req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
        req->num_tx_rings = cpu_to_le16(tx_rings);
        req->num_rx_rings = cpu_to_le16(rx_rings);
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
-               req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+               req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
        } else {
                req->num_cmpl_rings = cpu_to_le16(cp_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
 
 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 
 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
               bp->tx_nr_rings_xdp;
@@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
        struct net_device *dev = bp->dev;
        int tcs, i;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        if (tcs) {
                int i, off, count;
 
@@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
 {
        const int len = sizeof(bp->irq_tbl[0].name);
 
-       if (netdev_get_num_tc(bp->dev))
+       if (bp->num_tc) {
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
+       }
 
        snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
                 0);
@@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
 
 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
        bool irq_cleared = false;
+       int tcs = bp->num_tc;
        int rc;
 
        if (!bnxt_need_reserve_rings(bp))
@@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
                    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
                netdev_err(bp->dev, "tx ring reservation failure\n");
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
                if (bp->tx_nr_rings_xdp)
                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
                else
@@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
                goto half_open_err;
        }
+       bnxt_init_napi(bp);
        set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
        rc = bnxt_init_nic(bp, true);
        if (rc) {
                clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+               bnxt_del_napi(bp);
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
                goto half_open_err;
        }
@@ -11586,6 +11599,7 @@ half_open_err:
 void bnxt_half_close_nic(struct bnxt *bp)
 {
        bnxt_hwrm_resource_free(bp, false, true);
+       bnxt_del_napi(bp);
        bnxt_free_skbs(bp);
        bnxt_free_mem(bp, true);
        clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
 
        bp->fw_cap = 0;
        rc = bnxt_hwrm_ver_get(bp);
+       /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+        * so wait before continuing with recovery.
+        */
+       if (rc)
+               msleep(100);
        bnxt_try_map_fw_health_reg(bp);
        if (rc) {
                rc = bnxt_try_recover_fw(bp);
@@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
-       if (netdev_get_num_tc(dev) == tc)
+       if (bp->num_tc == tc)
                return 0;
 
        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
        if (tc) {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
                netdev_set_num_tc(dev, tc);
+               bp->num_tc = tc;
        } else {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
                netdev_reset_tc(dev);
+               bp->num_tc = 0;
        }
        bp->tx_nr_rings += bp->tx_nr_rings_xdp;
        tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
index b8ef171..47338b4 100644 (file)
@@ -2225,6 +2225,7 @@ struct bnxt {
        u8                      tc_to_qidx[BNXT_MAX_QUEUE];
        u8                      q_ids[BNXT_MAX_QUEUE];
        u8                      max_q;
+       u8                      num_tc;
 
        unsigned int            current_interval;
 #define BNXT_TIMER_INTERVAL    HZ
index 63e0670..0dbb880 100644 (file)
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
                }
        }
        if (bp->ieee_ets) {
-               int tc = netdev_get_num_tc(bp->dev);
+               int tc = bp->num_tc;
 
                if (!tc)
                        tc = 1;
index 27b983c..dc4ca70 100644 (file)
@@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
        if (max_tx_sch_inputs)
                max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        tx_grps = max(tcs, 1);
        if (bp->tx_nr_rings_xdp)
                tx_grps++;
@@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
        if (channel->combined_count)
                sh = true;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
 
        req_tx_rings = sh ? channel->combined_count : channel->tx_count;
        req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
        struct bnxt *bp = netdev_priv(dev);
 
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+               return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+                      BNXT_RSS_TABLE_ENTRIES_P5;
        return HW_HASH_INDEX_SIZE;
 }
 
index c2b25fc..4079538 100644 (file)
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
        if (prog)
                tx_xdp = bp->rx_nr_rings;
 
-       tc = netdev_get_num_tc(dev);
+       tc = bp->num_tc;
        if (!tc)
                tc = 1;
        rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
index 9cc6303..f38d31b 100644 (file)
@@ -27,6 +27,7 @@
 #include "octeon_network.h"
 
 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
 MODULE_LICENSE("GPL");
 
 /* OOM task polling interval */
index 1c2a540..1f495cf 100644 (file)
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
 
 module_platform_driver(ep93xx_eth_driver);
 
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:ep93xx-eth");
index df40c72..9aeff2b 100644 (file)
@@ -1485,7 +1485,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 
                        xdp_prepare_buff(&xdp, page_address(entry->page),
                                         XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
-                                        length, false);
+                                        length - ETH_FCS_LEN, false);
 
                        consume = tsnep_xdp_run_prog(rx, prog, &xdp,
                                                     &xdp_status, tx_nq, tx);
@@ -1568,7 +1568,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
                prefetch(entry->xdp->data);
                length = __le32_to_cpu(entry->desc_wb->properties) &
                         TSNEP_DESC_LENGTH_MASK;
-               xsk_buff_set_size(entry->xdp, length);
+               xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
                xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
 
                /* RX metadata with timestamps is in front of actual data,
@@ -1762,6 +1762,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
                        allocated--;
                }
        }
+
+       /* set need wakeup flag immediately if ring is not filled completely,
+        * first polling would be too late as need wakeup signalisation would
+        * be delayed for an indefinite time
+        */
+       if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+               int desc_available = tsnep_rx_desc_available(rx);
+
+               if (desc_available)
+                       xsk_set_rx_need_wakeup(rx->xsk_pool);
+               else
+                       xsk_clear_rx_need_wakeup(rx->xsk_pool);
+       }
 }
 
 static bool tsnep_pending(struct tsnep_queue *queue)
index 07c2b70..9ebe751 100644 (file)
@@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
 module_platform_driver(nps_enet_driver);
 
 MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
 MODULE_LICENSE("GPL v2");
index cffbf27..bfdbdab 100644 (file)
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL_GPL(enetc_pci_remove);
 
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
 MODULE_LICENSE("Dual BSD/GPL");
index d42594f..432523b 100644 (file)
@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 
                /* if any of the above changed restart the FEC */
                if (status_change) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                }
        } else {
                if (fep->link) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_stop(ndev);
@@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
 
 module_platform_driver(fec_driver);
 
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
 MODULE_LICENSE("GPL");
index 70dd982..026f727 100644 (file)
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
 
 module_platform_driver(fsl_pq_mdio_driver);
 
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
 MODULE_LICENSE("GPL");
index ae8f9f1..6e7fd47 100644 (file)
@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        struct i40e_hmc_obj_rxq rx_ctx;
        int err = 0;
        bool ok;
-       int ret;
 
        bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
-       if (ring->vsi->type == I40E_VSI_MAIN)
-               xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+       ring->rx_buf_len = vsi->rx_buf_len;
+
+       /* XDP RX-queue info only needed for RX rings exposed to XDP */
+       if (ring->vsi->type != I40E_VSI_MAIN)
+               goto skip;
+
+       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+       }
 
        ring->xsk_pool = i40e_xsk_pool(ring);
        if (ring->xsk_pool) {
-               ring->rx_buf_len =
-                 xsk_pool_get_rx_frame_size(ring->xsk_pool);
-               ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+               xdp_rxq_info_unreg(&ring->xdp_rxq);
+               ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                 MEM_TYPE_XSK_BUFF_POOL,
                                                 NULL);
-               if (ret)
-                       return ret;
+               if (err)
+                       return err;
                dev_info(&vsi->back->pdev->dev,
                         "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                         ring->queue_index);
 
        } else {
-               ring->rx_buf_len = vsi->rx_buf_len;
-               if (ring->vsi->type == I40E_VSI_MAIN) {
-                       ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
-                                                        MEM_TYPE_PAGE_SHARED,
-                                                        NULL);
-                       if (ret)
-                               return ret;
-               }
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                MEM_TYPE_PAGE_SHARED,
+                                                NULL);
+               if (err)
+                       return err;
        }
 
+skip:
        xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
 
        rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
index 971ba33..0d71770 100644 (file)
@@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       int err;
 
        u64_stats_init(&rx_ring->syncp);
 
@@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_process = 0;
        rx_ring->next_to_use = 0;
 
-       /* XDP RX-queue info only needed for RX rings exposed to XDP */
-       if (rx_ring->vsi->type == I40E_VSI_MAIN) {
-               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                      rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
-               if (err < 0)
-                       return err;
-       }
-
        rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
        rx_ring->rx_bi =
@@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                                  struct xdp_buff *xdp)
 {
-       u32 next = rx_ring->next_to_clean;
+       u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 next = rx_ring->next_to_clean, i = 0;
        struct i40e_rx_buffer *rx_buffer;
 
        xdp->flags = 0;
@@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                if (!rx_buffer->page)
                        continue;
 
-               if (xdp_res == I40E_XDP_CONSUMED)
-                       rx_buffer->pagecnt_bias++;
-               else
+               if (xdp_res != I40E_XDP_CONSUMED)
                        i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+               else if (i++ <= nr_frags)
+                       rx_buffer->pagecnt_bias++;
 
                /* EOP buffer will be put in i40e_clean_rx_irq() */
                if (next == rx_ring->next_to_process)
@@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
  * i40e_construct_skb - Allocate skb and populate it
  * @rx_ring: rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function allocates an skb.  It then populates it with the page
  * data from the current receive descriptor, taking care to set up the
  * skb correctly.
  */
 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
-                                         struct xdp_buff *xdp,
-                                         u32 nr_frags)
+                                         struct xdp_buff *xdp)
 {
        unsigned int size = xdp->data_end - xdp->data;
        struct i40e_rx_buffer *rx_buffer;
+       struct skb_shared_info *sinfo;
        unsigned int headlen;
        struct sk_buff *skb;
+       u32 nr_frags = 0;
 
        /* prefetch first cache line of first page */
        net_prefetch(xdp->data);
@@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        memcpy(__skb_put(skb, headlen), xdp->data,
               ALIGN(headlen, sizeof(long)));
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
        rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
        /* update all of the pointers */
        size -= headlen;
@@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        }
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+               struct skb_shared_info *skinfo = skb_shinfo(skb);
 
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
                       sizeof(skb_frag_t) * nr_frags);
 
@@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
  * i40e_build_skb - Build skb around an existing buffer
  * @rx_ring: Rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function builds an skb around an existing Rx buffer, taking care
  * to set up the skb correctly and avoid any memcpy overhead.
  */
 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
-                                     struct xdp_buff *xdp,
-                                     u32 nr_frags)
+                                     struct xdp_buff *xdp)
 {
        unsigned int metasize = xdp->data - xdp->data_meta;
+       struct skb_shared_info *sinfo;
        struct sk_buff *skb;
+       u32 nr_frags;
 
        /* Prefetch first cache line of first page. If xdp->data_meta
         * is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
         */
        net_prefetch(xdp->data_meta);
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
+
        /* build an skb around the page buffer */
        skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
        if (unlikely(!skb))
@@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                skb_metadata_set(skb, metasize);
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo;
-
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                xdp_update_skb_shared_info(skb, nr_frags,
                                           sinfo->xdp_frags_size,
                                           nr_frags * xdp->frame_sz,
@@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
                        total_rx_bytes += size;
                } else {
                        if (ring_uses_build_skb(rx_ring))
-                               skb = i40e_build_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_build_skb(rx_ring, xdp);
                        else
-                               skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_construct_skb(rx_ring, xdp);
 
                        /* drop if we failed to retrieve a buffer */
                        if (!skb) {
index af7d5fa..1150000 100644 (file)
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
                i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
                                          &rx_bytes, xdp_res, &failure);
-               first->flags = 0;
                next_to_clean = next_to_process;
                if (failure)
                        break;
index 533b923..7ac8477 100644 (file)
@@ -547,19 +547,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
        ring->rx_buf_len = ring->vsi->rx_buf_len;
 
        if (ring->vsi->type == ICE_VSI_PF) {
-               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                       /* coverity[check_return] */
-                       __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
-                                          ring->q_index,
-                                          ring->q_vector->napi.napi_id,
-                                          ring->vsi->rx_buf_len);
+               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
+               }
 
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
-                       xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+                       xdp_rxq_info_unreg(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
                                xsk_pool_get_rx_frame_size(ring->xsk_pool);
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_XSK_BUFF_POOL,
                                                         NULL);
@@ -571,13 +579,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
                } else {
-                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                               /* coverity[check_return] */
-                               __xdp_rxq_info_reg(&ring->xdp_rxq,
-                                                  ring->netdev,
-                                                  ring->q_index,
-                                                  ring->q_vector->napi.napi_id,
-                                                  ring->vsi->rx_buf_len);
+                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                        ring->q_index,
+                                                        ring->q_vector->napi.napi_id,
+                                                        ring->rx_buf_len);
+                               if (err)
+                                       return err;
+                       }
 
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_PAGE_SHARED,
index 74d13cc..97d41d6 100644 (file)
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 
-       if (rx_ring->vsi->type == ICE_VSI_PF &&
-           !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
-               if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                    rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
-                       goto err;
        return 0;
 
 err:
@@ -603,9 +598,7 @@ out_failure:
                ret = ICE_XDP_CONSUMED;
        }
 exit:
-       rx_buf->act = ret;
-       if (unlikely(xdp_buff_has_frags(xdp)))
-               ice_set_rx_bufs_act(xdp, rx_ring, ret);
+       ice_set_rx_bufs_act(xdp, rx_ring, ret);
 }
 
 /**
@@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
        }
 
        if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
-               if (unlikely(xdp_buff_has_frags(xdp)))
-                       ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+               ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
                return -ENOMEM;
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
                                   rx_buf->page_offset, size);
        sinfo->xdp_frags_size += size;
+       /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+        * can pop off frags but driver has to handle it on its own
+        */
+       rx_ring->nr_frags = sinfo->nr_frags;
 
        if (page_is_pfmemalloc(rx_buf->page))
                xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
                continue;
 construct_skb:
                if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1266,10 +1263,12 @@ construct_skb:
                                                    ICE_XDP_CONSUMED);
                        xdp->data = NULL;
                        rx_ring->first_desc = ntc;
+                       rx_ring->nr_frags = 0;
                        break;
                }
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
 
                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
                if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
index b3379ff..af955b0 100644 (file)
@@ -358,6 +358,7 @@ struct ice_rx_ring {
        struct ice_tx_ring *xdp_ring;
        struct ice_rx_ring *next;       /* pointer to next ring in q_vector */
        struct xsk_buff_pool *xsk_pool;
+       u32 nr_frags;
        dma_addr_t dma;                 /* physical address of ring */
        u16 rx_buf_len;
        u8 dcb_tc;                      /* Traffic class of ring */
index 7620475..afcead4 100644 (file)
  * act: action to store onto Rx buffers related to XDP buffer parts
  *
  * Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
  */
 static inline void
 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
                    const unsigned int act)
 {
-       const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-       u32 first = rx_ring->first_desc;
-       u32 nr_frags = sinfo->nr_frags;
+       u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 nr_frags = rx_ring->nr_frags + 1;
+       u32 idx = rx_ring->first_desc;
        u32 cnt = rx_ring->count;
        struct ice_rx_buf *buf;
 
        for (int i = 0; i < nr_frags; i++) {
-               buf = &rx_ring->rx_buf[first];
+               buf = &rx_ring->rx_buf[idx];
                buf->act = act;
 
-               if (++first == cnt)
-                       first = 0;
+               if (++idx == cnt)
+                       idx = 0;
+       }
+
+       /* adjust pagecnt_bias on frags freed by XDP prog */
+       if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+               u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+               while (delta) {
+                       if (idx == 0)
+                               idx = cnt - 1;
+                       else
+                               idx--;
+                       buf = &rx_ring->rx_buf[idx];
+                       buf->pagecnt_bias--;
+                       delta--;
+               }
        }
 }
 
index 5d1ae8e..8b81a16 100644 (file)
@@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 
                if (!first) {
                        first = xdp;
-                       xdp_buff_clear_frags_flag(first);
                } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
                        break;
                }
index 5fea2fd..58179bd 100644 (file)
@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
        /* setup watchdog timeout value to be 5 second */
        netdev->watchdog_timeo = 5 * HZ;
 
+       netdev->dev_port = idx;
+
        /* configure default MTU size */
        netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = vport->max_mtu;
index 5182fe7..ff54fbe 100644 (file)
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
 module_platform_driver(liteeth_driver);
 
 MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
 MODULE_LICENSE("GPL");
index 820b1fa..23adf53 100644 (file)
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
        mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
 }
 
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+       unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+       u32 val;
+       int i;
+
+       /* Drain the BM from all possible residues left by firmware */
+       for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+               mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+       put_cpu();
+
+       /* Stop the BM pool */
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+       val |= MVPP2_BM_STOP_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
 {
        enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
        int i, err, poolnum = MVPP2_BM_POOLS_NUM;
        struct mvpp2_port *port;
 
+       if (priv->percpu_pools)
+               poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+       /* Clean up the pool state in case it contains stale state */
+       for (i = 0; i < poolnum; i++)
+               mvpp2_bm_pool_cleanup(priv, i);
+
        if (priv->percpu_pools) {
                for (i = 0; i < priv->port_count; i++) {
                        port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
                        }
                }
 
-               poolnum = mvpp2_get_nrxqs(priv) * 2;
                for (i = 0; i < poolnum; i++) {
                        /* the pool in use */
                        int pn = i / (poolnum / 2);
index 9690ac0..b92264d 100644 (file)
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
 EXPORT_SYMBOL(otx2_mbox_id2name);
 
 MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
 MODULE_LICENSE("GPL v2");
index a7b1f96..4957412 100644 (file)
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
 {
        const char *namep = mlx5_command_str(opcode);
        struct mlx5_cmd_stats *stats;
+       unsigned long flags;
 
        if (!err || !(strcmp(namep, "unknown command opcode")))
                return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
        stats = xa_load(&dev->cmd.stats, opcode);
        if (!stats)
                return;
-       spin_lock_irq(&stats->lock);
+       spin_lock_irqsave(&stats->lock, flags);
        stats->failed++;
        if (err < 0)
                stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                stats->last_failed_mbox_status = status;
                stats->last_failed_syndrome = syndrome;
        }
-       spin_unlock_irq(&stats->lock);
+       spin_unlock_irqrestore(&stats->lock, flags);
 }
 
 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
index 0bfe1ca..55c6ace 100644 (file)
@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 
 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
                       bool enable_mc_lb);
index e128353..671adba 100644 (file)
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
        in = kvzalloc(inlen, GFP_KERNEL);
        if  (!in || !ft->g) {
                kfree(ft->g);
+               ft->g = NULL;
                kvfree(in);
                return -ENOMEM;
        }
index 284253b..5d213a9 100644 (file)
@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
        bool allow_swp;
 
-       allow_swp =
-               mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+       allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+                   (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
index c206cc0..078f56a 100644 (file)
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
        mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
 out:
        napi_consume_skb(skb, budget);
-       md_buff[*md_buff_sz++] = metadata_id;
+       md_buff[(*md_buff_sz)++] = metadata_id;
        if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
            !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
                queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
index 161c519..05612d9 100644 (file)
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
        /* iv len */
        aes_gcm->icv_len = x->aead->alg_icv_len;
 
+       attrs->dir = x->xso.dir;
+
        /* esn */
        if (x->props.flags & XFRM_STATE_ESN) {
                attrs->replay_esn.trigger = true;
                attrs->replay_esn.esn = sa_entry->esn_state.esn;
                attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
                attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+               if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+                       goto skip_replay_window;
+
                switch (x->replay_esn->replay_window) {
                case 32:
                        attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
                }
        }
 
-       attrs->dir = x->xso.dir;
+skip_replay_window:
        /* spi */
        attrs->spi = be32_to_cpu(x->id.spi);
 
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
                        return -EINVAL;
                }
 
-               if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+               if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+                   x->replay_esn->replay_window != 32 &&
                    x->replay_esn->replay_window != 64 &&
                    x->replay_esn->replay_window != 128 &&
                    x->replay_esn->replay_window != 256) {
index bb7f86c..e66f486 100644 (file)
@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
 
        ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
                        sizeof(*ft->g), GFP_KERNEL);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if  (!in || !ft->g) {
-               kfree(ft->g);
-               kvfree(in);
+       if (!ft->g)
                return -ENOMEM;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_free_g;
        }
 
        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        switch (type) {
@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        memset(in, 0, inlen);
@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        kvfree(in);
        return 0;
 
-err:
+err_clean_group:
        err = PTR_ERR(ft->g[ft->num_groups]);
        ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
        kvfree(in);
-
+err_free_g:
+       kfree(ft->g);
+       ft->g = NULL;
        return err;
 }
 
index 67f5466..6ed3a32 100644 (file)
@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
 {
        int tc, i;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++)
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
                        mlx5e_destroy_tis(mdev, tisn[i][tc]);
 }
@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
        int tc, i;
        int err;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++) {
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
                        u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
                        void *tisc;
@@ -140,7 +140,7 @@ err_close_tises:
        return err;
 }
 
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
 {
        struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
        int err;
@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
                goto err_destroy_mkey;
        }
 
-       err = mlx5e_create_tises(mdev, res->tisn);
-       if (err) {
-               mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
-               goto err_destroy_bfreg;
+       if (create_tises) {
+               err = mlx5e_create_tises(mdev, res->tisn);
+               if (err) {
+                       mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+                       goto err_destroy_bfreg;
+               }
+               res->tisn_valid = true;
        }
+
        INIT_LIST_HEAD(&res->td.tirs_list);
        mutex_init(&res->td.list_lock);
 
@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
 
        mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
        mdev->mlx5e_res.dek_priv = NULL;
-       mlx5e_destroy_tises(mdev, res->tisn);
+       if (res->tisn_valid)
+               mlx5e_destroy_tises(mdev, res->tisn);
        mlx5_free_bfreg(mdev, &res->bfreg);
        mlx5_core_destroy_mkey(mdev, res->mkey);
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
index b5f1c4c..c8e8f51 100644 (file)
@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
        if (netif_device_present(netdev))
                return 0;
 
-       err = mlx5e_create_mdev_resources(mdev);
+       err = mlx5e_create_mdev_resources(mdev, true);
        if (err)
                return err;
 
index 30932c9..9fb2c05 100644 (file)
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
 
        err = mlx5e_rss_params_indir_init(&indir, mdev,
                                          mlx5e_rqt_size(mdev, hp->num_channels),
-                                         mlx5e_rqt_size(mdev, priv->max_nch));
+                                         mlx5e_rqt_size(mdev, hp->num_channels));
        if (err)
                return err;
 
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
        list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
                if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
                        continue;
+
+               list_del(&peer_flow->peer_flows);
                if (refcount_dec_and_test(&peer_flow->refcnt)) {
                        mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
-                       list_del(&peer_flow->peer_flows);
                        kfree(peer_flow);
                }
        }
index a7ed87e..22dd30c 100644 (file)
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
                i++;
        }
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
        ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
        if (!rule_spec)
                return ERR_PTR(-ENOMEM);
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
        flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
                dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
                dest.vport.vhca_id = port->esw_owner_vhca_id;
        }
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
 
        kvfree(rule_spec);
index 1616a61..9b8599c 100644 (file)
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                 fte->flow_context.flow_tag);
        MLX5_SET(flow_context, in_flow_context, flow_source,
                 fte->flow_context.flow_source);
+       MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+                !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
 
        MLX5_SET(flow_context, in_flow_context, extended_destination,
                 extended_dest);
index 5884512..d77be1b 100644 (file)
@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                }
 
                /* This should only be called once per mdev */
-               err = mlx5e_create_mdev_resources(mdev);
+               err = mlx5e_create_mdev_resources(mdev, false);
                if (err)
                        goto destroy_ht;
        }
index 40c7be1..58bd749 100644 (file)
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
-       MLX5_SET(cqc,   cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+       MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
        MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
index 6f9790e..2ebb61e 100644 (file)
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                switch (action_type) {
                case DR_ACTION_TYP_DROP:
                        attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                       attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
                        break;
                case DR_ACTION_TYP_FT:
                        dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                                        action->sampler->tx_icm_addr;
                        break;
                case DR_ACTION_TYP_VPORT:
-                       attr.hit_gvmi = action->vport->caps->vhca_gvmi;
-                       dest_action = action;
-                       attr.final_icm_addr = rx_rule ?
-                               action->vport->caps->icm_address_rx :
-                               action->vport->caps->icm_address_tx;
+                       if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+                               /* can't go to uplink on RX rule - dropping instead */
+                               attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                               attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+                       } else {
+                               attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+                               dest_action = action;
+                               attr.final_icm_addr = rx_rule ?
+                                                     action->vport->caps->icm_address_rx :
+                                                     action->vport->caps->icm_address_tx;
+                       }
                        break;
                case DR_ACTION_TYP_POP_VLAN:
                        if (!rx_rule && !(dmn->ste_ctx->actions_caps &
index 21753f3..1005bb6 100644 (file)
@@ -440,6 +440,27 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
 
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u32 *out;
+       int err;
+
+       out = kvzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_query_nic_vport_context(mdev, 0, out);
+       if (err)
+               goto out;
+
+       *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+                            nic_vport_context.sd_group);
+out:
+       kvfree(out);
+       return err;
+}
+
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 {
        u32 *out;
index a0e4636..b334eb1 100644 (file)
@@ -7542,6 +7542,9 @@ int stmmac_dvr_probe(struct device *device,
                dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
                        ERR_PTR(ret));
 
+       /* Wait a bit for the reset to take effect */
+       udelay(10);
+
        /* Init MAC and get the capabilities */
        ret = stmmac_hw_init(priv);
        if (ret)
index 704e949..b9b5554 100644 (file)
@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
        mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
        hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.req_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.req_buf)) {
+               result = -ENOMEM;
+               goto free_ep_info;
+       }
 
        hw->hw_info.req_buf_size = mem_size;
 
        mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
        hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.res_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.res_buf)) {
+               result = -ENOMEM;
+               goto free_req_buf;
+       }
 
        hw->hw_info.res_buf_size = mem_size;
 
        result = fjes_hw_alloc_shared_status_region(hw);
        if (result)
-               return result;
+               goto free_res_buf;
 
        hw->hw_info.buffer_share_bit = 0;
        hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->tx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->rx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
        fjes_hw_init_command_registers(hw, &param);
 
        return 0;
+
+free_epbuf:
+       for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+               if (epidx == hw->my_epid)
+                       continue;
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+       }
+       fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+       kfree(hw->hw_info.res_buf);
+       hw->hw_info.res_buf = NULL;
+free_req_buf:
+       kfree(hw->hw_info.req_buf);
+       hw->hw_info.req_buf = NULL;
+free_ep_info:
+       kfree(hw->ep_shm_info);
+       hw->ep_shm_info = NULL;
+       return result;
 }
 
 static void fjes_hw_cleanup(struct fjes_hw *hw)
index 4406427..273bd8a 100644 (file)
@@ -44,7 +44,7 @@
 
 static unsigned int ring_size __ro_after_init = 128;
 module_param(ring_size, uint, 0444);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
 unsigned int netvsc_ring_bytes __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -2807,7 +2807,7 @@ static int __init netvsc_drv_init(void)
                pr_info("Increased ring_size to %u (min allowed)\n",
                        ring_size);
        }
-       netvsc_ring_bytes = ring_size * PAGE_SIZE;
+       netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
 
        register_netdevice_notifier(&netvsc_netdev_notifier);
 
index e348166..7f54262 100644 (file)
@@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       ret = skb_ensure_writable_head_tail(skb, dev);
-       if (unlikely(ret < 0)) {
-               macsec_txsa_put(tx_sa);
-               kfree_skb(skb);
-               return ERR_PTR(ret);
+       if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+                    skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+               struct sk_buff *nskb = skb_copy_expand(skb,
+                                                      MACSEC_NEEDED_HEADROOM,
+                                                      MACSEC_NEEDED_TAILROOM,
+                                                      GFP_ATOMIC);
+               if (likely(nskb)) {
+                       consume_skb(skb);
+                       skb = nskb;
+               } else {
+                       macsec_txsa_put(tx_sa);
+                       kfree_skb(skb);
+                       return ERR_PTR(-ENOMEM);
+               }
+       } else {
+               skb = skb_unshare(skb, GFP_ATOMIC);
+               if (!skb) {
+                       macsec_txsa_put(tx_sa);
+                       return ERR_PTR(-ENOMEM);
+               }
        }
 
        unprotected_len = skb->len;
index 81c20eb..dad7201 100644 (file)
  */
 #define LAN8814_1PPM_FORMAT                    17179
 
+#define PTP_RX_VERSION                         0x0248
+#define PTP_TX_VERSION                         0x0288
+#define PTP_MAX_VERSION(x)                     (((x) & GENMASK(7, 0)) << 8)
+#define PTP_MIN_VERSION(x)                     ((x) & GENMASK(7, 0))
+
 #define PTP_RX_MOD                             0x024F
 #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
 #define PTP_RX_TIMESTAMP_EN                    0x024D
@@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
        lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
        lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
 
+       /* Disable checking for minorVersionPTP field */
+       lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+       lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+
        skb_queue_head_init(&ptp_priv->tx_queue);
        skb_queue_head_init(&ptp_priv->rx_queue);
        INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
index afa5497..4a4f8c8 100644 (file)
@@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
        switch (act) {
        case XDP_REDIRECT:
                err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
-               if (err)
+               if (err) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_TX:
                err = tun_xdp_tx(tun->dev, xdp);
-               if (err < 0)
+               if (err < 0) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_PASS:
                break;
index 7e3b677..02e160d 100644 (file)
@@ -368,10 +368,6 @@ struct ath11k_vif {
        struct ieee80211_chanctx_conf chanctx;
        struct ath11k_arp_ns_offload arp_ns_offload;
        struct ath11k_rekey_data rekey_data;
-
-#ifdef CONFIG_ATH11K_DEBUGFS
-       struct dentry *debugfs_twt;
-#endif /* CONFIG_ATH11K_DEBUGFS */
 };
 
 struct ath11k_vif_iter {
index a847bc0..a48e737 100644 (file)
@@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
        .open = simple_open
 };
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif)
 {
+       struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        struct ath11k_base *ab = arvif->ar->ab;
+       struct dentry *debugfs_twt;
 
        if (arvif->vif->type != NL80211_IFTYPE_AP &&
            !(arvif->vif->type == NL80211_IFTYPE_STATION &&
              test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
                return;
 
-       arvif->debugfs_twt = debugfs_create_dir("twt",
-                                               arvif->vif->debugfs_dir);
-       debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+       debugfs_twt = debugfs_create_dir("twt",
+                                        arvif->vif->debugfs_dir);
+       debugfs_create_file("add_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_add_dialog);
 
-       debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("del_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_del_dialog);
 
-       debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("pause_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_pause_dialog);
 
-       debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("resume_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_resume_dialog);
 }
 
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-       if (!arvif->debugfs_twt)
-               return;
-
-       debugfs_remove_recursive(arvif->debugfs_twt);
-       arvif->debugfs_twt = NULL;
-}
index 44d1584..a39e458 100644 (file)
@@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
        return ar->debug.rx_filter;
 }
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif);
 void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                     enum wmi_direct_buffer_module id,
                                     enum ath11k_dbg_dbr_event event,
@@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
        return 0;
 }
 
-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
-{
-}
-
-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-}
-
 static inline void
 ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                enum wmi_direct_buffer_module id,
index db24158..b13525b 100644 (file)
@@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
                goto err;
        }
 
-       /* In the case of hardware recovery, debugfs files are
-        * not deleted since ieee80211_ops.remove_interface() is
-        * not invoked. In such cases, try to delete the files.
-        * These will be re-created later.
-        */
-       ath11k_debugfs_remove_interface(arvif);
-
        memset(arvif, 0, sizeof(*arvif));
 
        arvif->ar = ar;
@@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
 
        ath11k_dp_vdev_tx_attach(ar, arvif);
 
-       ath11k_debugfs_add_interface(arvif);
-
        if (vif->type != NL80211_IFTYPE_MONITOR &&
            test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
                ret = ath11k_mac_monitor_vdev_create(ar);
@@ -7056,8 +7047,6 @@ err_vdev_del:
        /* Recalc txpower for remaining vdev */
        ath11k_mac_txpower_recalc(ar);
 
-       ath11k_debugfs_remove_interface(arvif);
-
        /* TODO: recal traffic pause state based on the available vdevs */
 
        mutex_unlock(&ar->conf_mutex);
@@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
 #endif
 
 #ifdef CONFIG_ATH11K_DEBUGFS
+       .vif_add_debugfs                = ath11k_debugfs_op_vif_add,
        .sta_add_debugfs                = ath11k_debugfs_sta_op_add,
 #endif
 
index 3b14f64..7207572 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 #include <linux/firmware.h>
 #include "iwl-drv.h"
@@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
                node_trig = (void *)node_tlv->data;
        }
 
-       memcpy(node_trig->data + offset, trig->data, trig_data_len);
+       memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
        node_tlv->length = cpu_to_le32(size);
 
        if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
index b52cce3..c4fe70e 100644 (file)
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
                           "FW rev %s - Softmac protocol %x.%x\n",
                           fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
                snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
-                               "%s - %x.%x", fw_version,
+                               "%.19s - %x.%x", fw_version,
                                priv->fw_var >> 8, priv->fw_var & 0xff);
        }
 
index 8c55ff3..41f03b3 100644 (file)
@@ -681,6 +681,7 @@ struct mlx5e_resources {
                struct mlx5_sq_bfreg       bfreg;
 #define MLX5_MAX_NUM_TC 8
                u32                        tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+               bool                       tisn_valid;
        } hw_objs;
        struct net_device *uplink_netdev;
        struct mutex uplink_netdev_lock;
index 6f77252..3fb428c 100644 (file)
@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
 
 enum {
        FLOW_CONTEXT_HAS_TAG = BIT(0),
+       FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
 };
 
 struct mlx5_flow_context {
index bf5320b..c726f90 100644 (file)
@@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
        u8         action[0x10];
 
        u8         extended_destination[0x1];
-       u8         reserved_at_81[0x1];
+       u8         uplink_hairpin_en[0x1];
        u8         flow_source[0x2];
        u8         encrypt_decrypt_type[0x4];
        u8         destination_list_size[0x18];
@@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
        u8         affiliation_criteria[0x4];
        u8         affiliated_vhca_id[0x10];
 
-       u8         reserved_at_60[0xd0];
+       u8         reserved_at_60[0xa0];
+
+       u8         reserved_at_100[0x1];
+       u8         sd_group[0x3];
+       u8         reserved_at_104[0x1c];
 
+       u8         reserved_at_120[0x10];
        u8         mtu[0x10];
 
        u8         system_image_guid[0x40];
@@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
        u8         reserved_at_20[0x20];
 
        u8         local_port[0x8];
-       u8         reserved_at_28[0x15];
-       u8         sd_group[0x3];
+       u8         reserved_at_28[0x18];
 
        u8         reserved_at_60[0x20];
 };
index fbb9bf4..c36cc6d 100644 (file)
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                                    u16 vport, u64 node_guid);
index 888a4b2..e65ec3f 100644 (file)
@@ -505,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
        return !!psock->saved_data_ready;
 }
 
-static inline bool sk_is_udp(const struct sock *sk)
-{
-       return sk->sk_type == SOCK_DGRAM &&
-              sk->sk_protocol == IPPROTO_UDP;
-}
-
 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
 
 #define BPF_F_STRPARSER        (1UL << 1)
index d0a2f82..9ab4bf7 100644 (file)
@@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
        return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
 }
 
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+       spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
+}
+
 #endif /* _INET_CONNECTION_SOCK_H */
index aa86453..d94c242 100644 (file)
@@ -307,11 +307,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
 #define inet_assign_bit(nr, sk, val)           \
        assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
 
-static inline bool sk_is_inet(struct sock *sk)
-{
-       return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
-}
-
 /**
  * sk_to_full_sk - Access to a full socket
  * @sk: pointer to a socket
index 7e73f8e..1d55ba7 100644 (file)
@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
  */
 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+       memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
 }
 
 /**
@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
  */
 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+       memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
 }
 
 /**
index b157c5c..4e1ea18 100644 (file)
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
  *     @nla: netlink attributes
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
+ *     @flags: modifiers to new request
  *     @family: protocol family
  *     @level: depth of the chains
  *     @report: notify via unicast netlink message
@@ -282,6 +283,7 @@ struct nft_elem_priv { };
  *
  *     @key: element key
  *     @key_end: closing element key
+ *     @data: element data
  *     @priv: element private data and extensions
  */
 struct nft_set_elem {
@@ -325,10 +327,10 @@ struct nft_set_iter {
  *     @dtype: data type
  *     @dlen: data length
  *     @objtype: object type
- *     @flags: flags
  *     @size: number of set elements
  *     @policy: set policy
  *     @gc_int: garbage collector interval
+ *     @timeout: element timeout
  *     @field_len: length of each field in concatenation, bytes
  *     @field_count: number of concatenated fields in element
  *     @expr: set must support for expressions
@@ -351,9 +353,9 @@ struct nft_set_desc {
 /**
  *     enum nft_set_class - performance class
  *
- *     @NFT_LOOKUP_O_1: constant, O(1)
- *     @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- *     @NFT_LOOKUP_O_N: linear, O(N)
+ *     @NFT_SET_CLASS_O_1: constant, O(1)
+ *     @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ *     @NFT_SET_CLASS_O_N: linear, O(N)
  */
 enum nft_set_class {
        NFT_SET_CLASS_O_1,
@@ -422,9 +424,13 @@ struct nft_set_ext;
  *     @remove: remove element from set
  *     @walk: iterate over all set elements
  *     @get: get set elements
+ *     @commit: commit set elements
+ *     @abort: abort set elements
  *     @privsize: function to return size of set private data
+ *     @estimate: estimate the required memory size and the lookup complexity class
  *     @init: initialize private data of new set instance
  *     @destroy: destroy private data of set instance
+ *     @gc_init: initialize garbage collection
  *     @elemsize: element private size
  *
  *     Operations lookup, update and delete have simpler interfaces, are faster
@@ -540,13 +546,16 @@ struct nft_set_elem_expr {
  *     @policy: set parameterization (see enum nft_set_policies)
  *     @udlen: user data length
  *     @udata: user data
- *     @expr: stateful expression
+ *     @pending_update: list of pending update set element
  *     @ops: set ops
  *     @flags: set flags
  *     @dead: set will be freed, never cleared
  *     @genmask: generation mask
  *     @klen: key length
  *     @dlen: data length
+ *     @num_exprs: numbers of exprs
+ *     @exprs: stateful expression
+ *     @catchall_list: list of catch-all set element
  *     @data: private set data
  */
 struct nft_set {
@@ -692,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
  *
  *     @len: length of extension area
  *     @offset: offsets of individual extension types
+ *     @ext_len: length of the expected extension(used to sanity check)
  */
 struct nft_set_ext_tmpl {
        u16     len;
@@ -840,6 +850,7 @@ struct nft_expr_ops;
  *     @select_ops: function to select nft_expr_ops
  *     @release_ops: release nft_expr_ops
  *     @ops: default ops, used when no select_ops functions is present
+ *     @inner_ops: inner ops, used for inner packet operation
  *     @list: used internally
  *     @name: Identifier
  *     @owner: module reference
@@ -881,14 +892,22 @@ struct nft_offload_ctx;
  *     struct nft_expr_ops - nf_tables expression operations
  *
  *     @eval: Expression evaluation function
+ *     @clone: Expression clone function
  *     @size: full expression size, including private data size
  *     @init: initialization function
  *     @activate: activate expression in the next generation
  *     @deactivate: deactivate expression in next generation
  *     @destroy: destruction function, called after synchronize_rcu
+ *     @destroy_clone: destruction clone function
  *     @dump: function to dump parameters
- *     @type: expression type
  *     @validate: validate expression, called during loop detection
+ *     @reduce: reduce expression
+ *     @gc: garbage collection expression
+ *     @offload: hardware offload expression
+ *     @offload_action: function to report true/false to allocate one slot or not in the flow
+ *                      offload array
+ *     @offload_stats: function to synchronize hardware stats via updating the counter expression
+ *     @type: expression type
  *     @data: extra data to attach to this expression operation
  */
 struct nft_expr_ops {
@@ -1041,14 +1060,21 @@ struct nft_rule_blob {
 /**
  *     struct nft_chain - nf_tables chain
  *
+ *     @blob_gen_0: rule blob pointer to the current generation
+ *     @blob_gen_1: rule blob pointer to the future generation
  *     @rules: list of rules in the chain
  *     @list: used internally
  *     @rhlhead: used internally
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @flags: bitmask of enum nft_chain_flags
+ *     @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ *     @bound: bind or not
+ *     @genmask: generation mask
  *     @name: name of the chain
+ *     @udlen: user data length
+ *     @udata: user data in the chain
+ *     @blob_next: rule blob pointer to the next in the chain
  */
 struct nft_chain {
        struct nft_rule_blob            __rcu *blob_gen_0;
@@ -1146,6 +1172,7 @@ struct nft_hook {
  *     @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
  *     @type: chain type
  *     @policy: default policy
+ *     @flags: indicate the base chain disabled or not
  *     @stats: per-cpu chain stats
  *     @chain: the chain
  *     @flow_block: flow block (for hardware offload)
@@ -1274,11 +1301,13 @@ struct nft_object_hash_key {
  *     struct nft_object - nf_tables stateful object
  *
  *     @list: table stateful object list node
- *     @key:  keys that identify this object
  *     @rhlhead: nft_objname_ht node
+ *     @key: keys that identify this object
  *     @genmask: generation mask
  *     @use: number of references to this stateful object
  *     @handle: unique object handle
+ *     @udlen: length of user data
+ *     @udata: user data
  *     @ops: object operations
  *     @data: object data, layout depends on type
  */
@@ -1344,6 +1373,7 @@ struct nft_object_type {
  *     @destroy: release existing stateful object
  *     @dump: netlink dump stateful object
  *     @update: update stateful object
+ *     @type: pointer to object type
  */
 struct nft_object_ops {
        void                            (*eval)(struct nft_object *obj,
@@ -1379,9 +1409,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
  *     @genmask: generation mask
  *     @use: number of references to this flow table
  *     @handle: unique object handle
- *     @dev_name: array of device names
+ *     @hook_list: hook list for hooks per net_device in flowtables
  *     @data: rhashtable and garbage collector
- *     @ops: array of hooks
  */
 struct nft_flowtable {
        struct list_head                list;
index ba3e1b3..934fdb9 100644 (file)
@@ -375,6 +375,10 @@ struct tcf_proto_ops {
                                                struct nlattr **tca,
                                                struct netlink_ext_ack *extack);
        void                    (*tmplt_destroy)(void *tmplt_priv);
+       void                    (*tmplt_reoffload)(struct tcf_chain *chain,
+                                                  bool add,
+                                                  flow_setup_cb_t *cb,
+                                                  void *cb_priv);
        struct tcf_exts *       (*get_exts)(const struct tcf_proto *tp,
                                            u32 handle);
 
index a7f815c..54ca8dc 100644 (file)
@@ -2765,9 +2765,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
                           &skb_shinfo(skb)->tskey);
 }
 
+static inline bool sk_is_inet(const struct sock *sk)
+{
+       int family = READ_ONCE(sk->sk_family);
+
+       return family == AF_INET || family == AF_INET6;
+}
+
 static inline bool sk_is_tcp(const struct sock *sk)
 {
-       return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_STREAM &&
+              sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_DGRAM &&
+              sk->sk_protocol == IPPROTO_UDP;
 }
 
 static inline bool sk_is_stream_unix(const struct sock *sk)
index 526c1e7..c9aec9a 100644 (file)
@@ -159,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return ret;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+       struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+       list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+       struct xdp_buff_xsk *frag;
+
+       frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+                              xskb_list_node);
+       return &frag->xdp;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
        xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
        xdp->data_meta = xdp->data;
        xdp->data_end = xdp->data + size;
+       xdp->flags = 0;
 }
 
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@@ -350,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return NULL;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       return NULL;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
 }
index 2145321..a3b6824 100644 (file)
@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        }
        if (data[IFLA_VLAN_INGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        vlan_dev_set_ingress_priority(dev, m->to, m->from);
                }
        }
        if (data[IFLA_VLAN_EGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        err = vlan_dev_set_egress_priority(dev, m->from, m->to);
                        if (err)
index f01a9b8..cb2dab0 100644 (file)
@@ -11551,6 +11551,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
 
 static void __net_exit default_device_exit_net(struct net *net)
 {
+       struct netdev_name_node *name_node, *tmp;
        struct net_device *dev, *aux;
        /*
         * Push all migratable network devices back to the
@@ -11573,6 +11574,14 @@ static void __net_exit default_device_exit_net(struct net *net)
                snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
                if (netdev_name_in_use(&init_net, fb_name))
                        snprintf(fb_name, IFNAMSIZ, "dev%%d");
+
+               netdev_for_each_altname_safe(dev, name_node, tmp)
+                       if (netdev_name_in_use(&init_net, name_node->name)) {
+                               netdev_name_node_del(name_node);
+                               synchronize_rcu();
+                               __netdev_name_node_alt_destroy(name_node);
+                       }
+
                err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
                        pr_emerg("%s: failed to move %s to init_net: %d\n",
index cf93e18..7480b4c 100644 (file)
@@ -63,6 +63,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
 
 #define netdev_for_each_altname(dev, namenode)                         \
        list_for_each_entry((namenode), &(dev)->name_node->list, list)
+#define netdev_for_each_altname_safe(dev, namenode, next)              \
+       list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
+                                list)
 
 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
index 24061f2..ef3e78b 100644 (file)
@@ -83,6 +83,7 @@
 #include <net/netfilter/nf_conntrack_bpf.h>
 #include <net/netkit.h>
 #include <linux/un.h>
+#include <net/xdp_sock_drv.h>
 
 #include "dev.h"
 
@@ -4092,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
        memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
        skb_frag_size_add(frag, offset);
        sinfo->xdp_frags_size += offset;
+       if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+               xsk_buff_get_tail(xdp)->data_end += offset;
 
        return 0;
 }
 
+static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
+                                  struct xdp_mem_info *mem_info, bool release)
+{
+       struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
+
+       if (release) {
+               xsk_buff_del_tail(zc_frag);
+               __xdp_return(NULL, mem_info, false, zc_frag);
+       } else {
+               zc_frag->data_end -= shrink;
+       }
+}
+
+static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
+                               int shrink)
+{
+       struct xdp_mem_info *mem_info = &xdp->rxq->mem;
+       bool release = skb_frag_size(frag) == shrink;
+
+       if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
+               bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
+               goto out;
+       }
+
+       if (release) {
+               struct page *page = skb_frag_page(frag);
+
+               __xdp_return(page_address(page), mem_info, false, NULL);
+       }
+
+out:
+       return release;
+}
+
 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 {
        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -4110,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 
                len_free += shrink;
                offset -= shrink;
-
-               if (skb_frag_size(frag) == shrink) {
-                       struct page *page = skb_frag_page(frag);
-
-                       __xdp_return(page_address(page), &xdp->rxq->mem,
-                                    false, NULL);
+               if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
                        n_frags_free++;
                } else {
                        skb_frag_size_sub(frag, shrink);
index f35c2e9..63de5c6 100644 (file)
@@ -33,9 +33,6 @@
 
 void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
-       spin_lock_init(&queue->rskq_lock);
-
-       spin_lock_init(&queue->fastopenq.lock);
        queue->fastopenq.rskq_rst_head = NULL;
        queue->fastopenq.rskq_rst_tail = NULL;
        queue->fastopenq.qlen = 0;
index 158dbde..0a7f46c 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/poll.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
@@ -4144,8 +4145,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
-              sk_busy_loop_timeout(sk, start_time);
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               return true;
+
+       if (sk_is_udp(sk) &&
+           !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+               return true;
+
+       return sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
 #endif /* CONFIG_NET_RX_BUSY_POLL */
index 835f4f9..4e635dd 100644 (file)
@@ -330,6 +330,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
index 8e2eb17..459af1f 100644 (file)
@@ -727,6 +727,10 @@ out:
        }
        if (req)
                reqsk_put(req);
+
+       if (newsk)
+               inet_init_csk_locks(newsk);
+
        return newsk;
 out_err:
        newsk = NULL;
index 1baa484..a1c6de3 100644 (file)
@@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
                if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
                        set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+                       smp_mb__after_atomic();
                }
                /* It is possible TX completion already happened
                 * before we set TSQ_THROTTLED.
index 13a1833..959bfd9 100644 (file)
@@ -199,6 +199,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
index 9b06c38..20551cf 100644 (file)
@@ -928,14 +928,15 @@ copy_uaddr:
  */
 static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
+       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        int flags = msg->msg_flags;
        int noblock = flags & MSG_DONTWAIT;
+       int rc = -EINVAL, copied = 0, hdrlen, hh_len;
        struct sk_buff *skb = NULL;
+       struct net_device *dev;
        size_t size = 0;
-       int rc = -EINVAL, copied = 0, hdrlen;
 
        dprintk("%s: sending from %02X to %02X\n", __func__,
                llc->laddr.lsap, llc->daddr.lsap);
@@ -955,22 +956,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                if (rc)
                        goto out;
        }
-       hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+       dev = llc->dev;
+       hh_len = LL_RESERVED_SPACE(dev);
+       hdrlen = llc_ui_header_len(sk, addr);
        size = hdrlen + len;
-       if (size > llc->dev->mtu)
-               size = llc->dev->mtu;
+       size = min_t(size_t, size, READ_ONCE(dev->mtu));
        copied = size - hdrlen;
        rc = -EINVAL;
        if (copied < 0)
                goto out;
        release_sock(sk);
-       skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+       skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
        lock_sock(sk);
        if (!skb)
                goto out;
-       skb->dev      = llc->dev;
+       if (sock_flag(sk, SOCK_ZAPPED) ||
+           llc->dev != dev ||
+           hdrlen != llc_ui_header_len(sk, addr) ||
+           hh_len != LL_RESERVED_SPACE(dev) ||
+           size > READ_ONCE(dev->mtu))
+               goto out;
+       skb->dev      = dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
-       skb_reserve(skb, hdrlen);
+       skb_reserve(skb, hh_len + hdrlen);
        rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
        if (rc)
                goto out;
index 6e387aa..4f16d9c 100644 (file)
@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
        .func = llc_rcv,
 };
 
-static struct packet_type llc_tr_packet_type __read_mostly = {
-       .type = cpu_to_be16(ETH_P_TR_802_2),
-       .func = llc_rcv,
-};
-
 static int __init llc_init(void)
 {
        dev_add_pack(&llc_packet_type);
-       dev_add_pack(&llc_tr_packet_type);
        return 0;
 }
 
 static void __exit llc_exit(void)
 {
        dev_remove_pack(&llc_packet_type);
-       dev_remove_pack(&llc_tr_packet_type);
 }
 
 module_init(llc_init);
index cb0291d..13438cc 100644 (file)
@@ -62,7 +62,6 @@ config MAC80211_KUNIT_TEST
        depends on KUNIT
        depends on MAC80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test mac80211 internals with kunit.
 
index bf1adcd..4391d8d 100644 (file)
@@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
-               if (!(sta->sta.valid_links & BIT(i)))
+               struct link_sta_info *link_sta;
+
+               link_sta = rcu_access_pointer(sta->link[i]);
+               if (!link_sta)
                        continue;
 
                sta_remove_link(sta, i, false);
@@ -910,6 +913,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        if (ieee80211_vif_is_mesh(&sdata->vif))
                mesh_accept_plinks_update(sdata);
 
+       ieee80211_check_fast_xmit(sta);
+
        return 0;
  out_remove:
        if (sta->sta.valid_links)
index 314998f..68a48ab 100644 (file)
@@ -3048,7 +3048,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
            sdata->vif.type == NL80211_IFTYPE_STATION)
                goto out;
 
-       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
                goto out;
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
index 4b55533..c537104 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/sock.h>
 
 #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+#define NFT_SET_MAX_ANONLEN 16
 
 unsigned int nf_tables_net_id __read_mostly;
 
@@ -4413,6 +4414,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                if (p[1] != 'd' || strchr(p + 2, '%'))
                        return -EINVAL;
 
+               if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
+                       return -EINVAL;
+
                inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
                if (inuse == NULL)
                        return -ENOMEM;
@@ -10988,16 +10992,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
        data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
 
        switch (data->verdict.code) {
-       default:
-               switch (data->verdict.code & NF_VERDICT_MASK) {
-               case NF_ACCEPT:
-               case NF_DROP:
-               case NF_QUEUE:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               fallthrough;
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+               break;
        case NFT_CONTINUE:
        case NFT_BREAK:
        case NFT_RETURN:
@@ -11032,6 +11030,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
 
                data->verdict.chain = chain;
                break;
+       default:
+               return -EINVAL;
        }
 
        desc->len = sizeof(data->verdict);
index 680fe55..274b6f7 100644 (file)
@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                                  unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct nft_base_chain *basechain;
        struct nftables_pernet *nft_net;
-       struct nft_table *table;
        struct nft_chain *chain, *nr;
+       struct nft_table *table;
        struct nft_ctx ctx = {
                .net    = dev_net(dev),
        };
@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
        nft_net = nft_pernet(ctx.net);
        mutex_lock(&nft_net->commit_mutex);
        list_for_each_entry(table, &nft_net->tables, list) {
-               if (table->family != NFPROTO_NETDEV)
+               if (table->family != NFPROTO_NETDEV &&
+                   table->family != NFPROTO_INET)
                        continue;
 
                ctx.family = table->family;
@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                        if (!nft_is_base_chain(chain))
                                continue;
 
+                       basechain = nft_base_chain(chain);
+                       if (table->family == NFPROTO_INET &&
+                           basechain->ops.hooknum != NF_INET_INGRESS)
+                               continue;
+
                        ctx.chain = chain;
                        nft_netdev_event(event, dev, &ctx);
                }
index 5284cd2..f0eeda9 100644 (file)
@@ -350,6 +350,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
@@ -595,6 +601,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
index ab3362c..397351f 100644 (file)
@@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
 {
        unsigned int hook_mask = (1 << NF_INET_FORWARD);
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
index 79039af..cefa25e 100644 (file)
@@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
 static int nft_limit_init(struct nft_limit_priv *priv,
                          const struct nlattr * const tb[], bool pkts)
 {
+       u64 unit, tokens, rate_with_burst;
        bool invert = false;
-       u64 unit, tokens;
 
        if (tb[NFTA_LIMIT_RATE] == NULL ||
            tb[NFTA_LIMIT_UNIT] == NULL)
                return -EINVAL;
 
        priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       if (priv->rate == 0)
+               return -EINVAL;
+
        unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-       priv->nsecs = unit * NSEC_PER_SEC;
-       if (priv->rate == 0 || priv->nsecs < unit)
+       if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
                return -EOVERFLOW;
 
        if (tb[NFTA_LIMIT_BURST])
@@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
        if (pkts && priv->burst == 0)
                priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
 
-       if (priv->rate + priv->burst < priv->rate)
+       if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
                return -EOVERFLOW;
 
        if (pkts) {
-               tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
+               u64 tmp = div64_u64(priv->nsecs, priv->rate);
+
+               if (check_mul_overflow(tmp, priv->burst, &tokens))
+                       return -EOVERFLOW;
        } else {
+               u64 tmp;
+
                /* The token bucket size limits the number of tokens can be
                 * accumulated. tokens_max specifies the bucket size.
                 * tokens_max = unit * (rate + burst) / rate.
                 */
-               tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
-                                priv->rate);
+               if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
+                       return -EOVERFLOW;
+
+               tokens = div64_u64(tmp, priv->rate);
        }
 
        if (tb[NFTA_LIMIT_FLAGS]) {
index 583885c..808f580 100644 (file)
@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
        struct nft_nat *priv = nft_expr_priv(expr);
        int err;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
index 35a2c28..24d9771 100644 (file)
@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
        const struct nft_rt *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->key) {
        case NFT_RT_NEXTHOP4:
        case NFT_RT_NEXTHOP6:
index 9ed85be..f30163e 100644 (file)
@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain,
                                        (1 << NF_INET_PRE_ROUTING) |
                                        (1 << NF_INET_LOCAL_IN) |
index 13da882..1d737f8 100644 (file)
@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                err = nf_synproxy_ipv4_init(snet, ctx->net);
                if (err)
                        goto nf_ct_failure;
@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                nf_synproxy_ipv4_fini(snet, ctx->net);
                nf_synproxy_ipv6_fini(snet, ctx->net);
                break;
@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
                                 const struct nft_expr *expr,
                                 const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
                                                    (1 << NF_INET_FORWARD));
 }
index ae15cd6..71412ad 100644 (file)
@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
 }
 
index 452f858..1c86675 100644 (file)
@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
        const struct nft_xfrm *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->dir) {
        case XFRM_POLICY_IN:
                hooks = (1 << NF_INET_FORWARD) |
index 4ed8ffd..9c96234 100644 (file)
@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
        if (is_vmalloc_addr(skb->head)) {
                if (!skb->cloned ||
                    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
-                       vfree(skb->head);
+                       vfree_atomic(skb->head);
 
                skb->head = NULL;
        }
index 01c4cdf..8435a20 100644 (file)
@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
 
        rs->rs_rx_traces = trace.rx_traces;
        for (i = 0; i < rs->rs_rx_traces; i++) {
-               if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+               if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
                        rs->rs_rx_traces = 0;
                        return -EFAULT;
                }
index 92a12e3..ff3d396 100644 (file)
@@ -1560,6 +1560,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
             chain_prev = chain,
                     chain = __tcf_get_next_chain(block, chain),
                     tcf_chain_put(chain_prev)) {
+               if (chain->tmplt_ops && add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
+                                                         cb_priv);
                for (tp = __tcf_get_next_proto(chain, NULL); tp;
                     tp_prev = tp,
                             tp = __tcf_get_next_proto(chain, tp),
@@ -1575,6 +1578,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
                                goto err_playback_remove;
                        }
                }
+               if (chain->tmplt_ops && !add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
+                                                         cb_priv);
        }
 
        return 0;
@@ -3000,7 +3006,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
        ops = tcf_proto_lookup_ops(name, true, extack);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
+           !ops->tmplt_reoffload) {
                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
                module_put(ops->owner);
                return -EOPNOTSUPP;
index e5314a3..efb9d28 100644 (file)
@@ -2721,6 +2721,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
        kfree(tmplt);
 }
 
+static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
+                              flow_setup_cb_t *cb, void *cb_priv)
+{
+       struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
+       struct flow_cls_offload cls_flower = {};
+
+       cls_flower.rule = flow_rule_alloc(0);
+       if (!cls_flower.rule)
+               return;
+
+       cls_flower.common.chain_index = chain->index;
+       cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
+                                  FLOW_CLS_TMPLT_DESTROY;
+       cls_flower.cookie = (unsigned long) tmplt;
+       cls_flower.rule->match.dissector = &tmplt->dissector;
+       cls_flower.rule->match.mask = &tmplt->mask;
+       cls_flower.rule->match.key = &tmplt->dummy_key;
+
+       cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+       kfree(cls_flower.rule);
+}
+
 static int fl_dump_key_val(struct sk_buff *skb,
                           void *val, int val_type,
                           void *mask, int mask_type, int len)
@@ -3628,6 +3650,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .bind_class     = fl_bind_class,
        .tmplt_create   = fl_tmplt_create,
        .tmplt_destroy  = fl_tmplt_destroy,
+       .tmplt_reoffload = fl_tmplt_reoffload,
        .tmplt_dump     = fl_tmplt_dump,
        .get_exts       = fl_get_exts,
        .owner          = THIS_MODULE,
index 52f7c4f..5a33908 100644 (file)
@@ -164,7 +164,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
        }
        if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
            (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
-           !list_empty(&smc->conn.lgr->list)) {
+           !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
                struct smc_connection *conn = &smc->conn;
                struct smcd_diag_dmbinfo dinfo;
                struct smcd_dev *smcd = conn->lgr->smcd;
index a9ac85e..1034538 100644 (file)
@@ -206,7 +206,6 @@ config CFG80211_KUNIT_TEST
        depends on KUNIT
        depends on CFG80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test cfg80211 functions with kunit.
 
index 60877b5..b097004 100644 (file)
@@ -4020,6 +4020,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                }
                wiphy_unlock(&rdev->wiphy);
 
+               if_start = 0;
                wp_idx++;
        }
  out:
index 9f13aa3..1eadfac 100644 (file)
@@ -167,8 +167,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                contd = XDP_PKT_CONTD;
 
        err = __xsk_rcv_zc(xs, xskb, len, contd);
-       if (err || likely(!frags))
-               goto out;
+       if (err)
+               goto err;
+       if (likely(!frags))
+               return 0;
 
        xskb_list = &xskb->pool->xskb_list;
        list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
@@ -177,11 +179,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                len = pos->xdp.data_end - pos->xdp.data;
                err = __xsk_rcv_zc(xs, pos, len, contd);
                if (err)
-                       return err;
+                       goto err;
                list_del(&pos->xskb_list_node);
        }
 
-out:
+       return 0;
+err:
+       xsk_buff_free(xdp);
        return err;
 }
 
index 28711cc..ce60ecd 100644 (file)
@@ -555,6 +555,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
+       xskb->xdp.flags = 0;
 
        if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
index c54d169..d508486 100755 (executable)
@@ -162,7 +162,7 @@ prio_arp()
        local mode=$1
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
        done
 }
@@ -178,7 +178,7 @@ prio_ns()
        fi
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
        done
 }
@@ -194,9 +194,9 @@ prio()
 
        for mode in $modes; do
                prio_miimon $mode
-               prio_arp $mode
-               prio_ns $mode
        done
+       prio_arp "active-backup"
+       prio_ns "active-backup"
 }
 
 arp_validate_test()
index 4855ef5..f98435c 100755 (executable)
@@ -270,6 +270,7 @@ for port in 0 1; do
        echo 1 > $NSIM_DEV_SYS/new_port
     fi
     NSIM_NETDEV=`get_netdev_name old_netdevs`
+    ifconfig $NSIM_NETDEV up
 
     msg="new NIC device created"
     exp0=( 0 0 0 0 )
@@ -431,6 +432,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -488,6 +490,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -544,6 +547,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "destroy NIC"
@@ -573,6 +577,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -633,6 +638,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
@@ -688,6 +694,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -747,6 +754,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -877,6 +885,7 @@ msg="re-add a port"
 
 echo 2 > $NSIM_DEV_SYS/del_port
 echo 2 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
 check_tables
 
 msg="replace VxLAN in overflow table"
index 8da562a..19ff750 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_USER_NS=y
 CONFIG_NET_NS=y
+CONFIG_BONDING=m
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
 CONFIG_NUMA=y
@@ -14,9 +15,13 @@ CONFIG_VETH=y
 CONFIG_NET_IPVTI=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
 CONFIG_BRIDGE=y
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_VLAN_8021Q=y
 CONFIG_IFB=y
+CONFIG_INET_DIAG=y
+CONFIG_IP_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_ADVANCED=y
 CONFIG_NF_CONNTRACK=m
@@ -25,15 +30,36 @@ CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP_NF_NAT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_L2TP_ETH=m
+CONFIG_L2TP_IP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_MACSEC=m
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MPLS=y
+CONFIG_MPTCP=y
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_IPV6=y
 CONFIG_NF_TABLES_IPV4=y
 CONFIG_NFT_NAT=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_FQ=m
 CONFIG_NET_SCH_ETF=m
 CONFIG_NET_SCH_NETEM=y
+CONFIG_PSAMPLE=m
+CONFIG_TCP_MD5SIG=y
 CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_KALLSYMS=y
+CONFIG_TLS=m
 CONFIG_TRACEPOINTS=y
 CONFIG_NET_DROP_MONITOR=m
 CONFIG_NETDEVSIM=m
@@ -48,7 +74,9 @@ CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
 CONFIG_CRYPTO_SM4_GENERIC=y
 CONFIG_AMT=m
+CONFIG_TUN=y
 CONFIG_VXLAN=m
 CONFIG_IP_SCTP=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_CRYPTO_ARIA=y
+CONFIG_XFRM_INTERFACE=m
index a26c562..4287a85 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 readonly ksft_skip=4
@@ -33,6 +33,10 @@ chk_rps() {
 
        rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
        printf "%-60s" "$msg"
+
+       # In case there is more than 32 CPUs we need to remove commas from masks
+       rps_mask=${rps_mask//,}
+       expected_rps_mask=${expected_rps_mask//,}
        if [ $rps_mask -eq $expected_rps_mask ]; then
                echo "[ ok ]"
        else
index a148181..e9fa14e 100644 (file)
@@ -3,19 +3,16 @@
 #define _GNU_SOURCE
 #include <sched.h>
 
+#include <fcntl.h>
+
 #include <netinet/in.h>
 #include <sys/socket.h>
 #include <sys/sysinfo.h>
 
 #include "../kselftest_harness.h"
 
-#define CLIENT_PER_SERVER      32 /* More sockets, more reliable */
-#define NR_SERVER              self->nproc
-#define NR_CLIENT              (CLIENT_PER_SERVER * NR_SERVER)
-
 FIXTURE(so_incoming_cpu)
 {
-       int nproc;
        int *servers;
        union {
                struct sockaddr addr;
@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
        .when_to_set = AFTER_ALL_LISTEN,
 };
 
+static void write_sysctl(struct __test_metadata *_metadata,
+                        char *filename, char *string)
+{
+       int fd, len, ret;
+
+       fd = open(filename, O_WRONLY);
+       ASSERT_NE(fd, -1);
+
+       len = strlen(string);
+       ret = write(fd, string, len);
+       ASSERT_EQ(ret, len);
+}
+
+static void setup_netns(struct __test_metadata *_metadata)
+{
+       ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+       ASSERT_EQ(system("ip link set lo up"), 0);
+
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
+}
+
+#define NR_PORT                                (60001 - 10000 - 1)
+#define NR_CLIENT_PER_SERVER_DEFAULT   32
+static int nr_client_per_server, nr_server, nr_client;
+
 FIXTURE_SETUP(so_incoming_cpu)
 {
-       self->nproc = get_nprocs();
-       ASSERT_LE(2, self->nproc);
+       setup_netns(_metadata);
+
+       nr_server = get_nprocs();
+       ASSERT_LE(2, nr_server);
+
+       if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
+               nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
+       else
+               nr_client_per_server = NR_PORT / nr_server;
+
+       nr_client = nr_client_per_server * nr_server;
 
-       self->servers = malloc(sizeof(int) * NR_SERVER);
+       self->servers = malloc(sizeof(int) * nr_server);
        ASSERT_NE(self->servers, NULL);
 
        self->in_addr.sin_family = AF_INET;
@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
 {
        int i;
 
-       for (i = 0; i < NR_SERVER; i++)
+       for (i = 0; i < nr_server; i++)
                close(self->servers[i]);
 
        free(self->servers);
@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
        if (variant->when_to_set == BEFORE_LISTEN)
                set_so_incoming_cpu(_metadata, fd, cpu);
 
-       /* We don't use CLIENT_PER_SERVER here not to block
+       /* We don't use nr_client_per_server here not to block
         * this test at connect() if SO_INCOMING_CPU is broken.
         */
-       ret = listen(fd, NR_CLIENT);
+       ret = listen(fd, nr_client);
        ASSERT_EQ(ret, 0);
 
        if (variant->when_to_set == AFTER_LISTEN)
@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
 {
        int i, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                self->servers[i] = create_server(_metadata, self, variant, i);
 
                if (i == 0) {
@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
        }
 
        if (variant->when_to_set == AFTER_ALL_LISTEN) {
-               for (i = 0; i < NR_SERVER; i++)
+               for (i = 0; i < nr_server; i++)
                        set_so_incoming_cpu(_metadata, self->servers[i], i);
        }
 }
@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
        cpu_set_t cpu_set;
        int i, j, fd, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                CPU_ZERO(&cpu_set);
 
                CPU_SET(i, &cpu_set);
@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
                ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
                ASSERT_EQ(ret, 0);
 
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        fd  = socket(AF_INET, SOCK_STREAM, 0);
                        ASSERT_NE(fd, -1);
 
@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
        int i, j, fd, cpu, ret, total = 0;
        socklen_t len = sizeof(int);
 
-       for (i = 0; i < NR_SERVER; i++) {
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+       for (i = 0; i < nr_server; i++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
                        fd = accept(self->servers[i], &self->addr, &self->addrlen);
                        ASSERT_NE(fd, -1);
@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
                }
        }
 
-       ASSERT_EQ(total, NR_CLIENT);
+       ASSERT_EQ(total, nr_client);
        TH_LOG("SO_INCOMING_CPU is very likely to be "
               "working correctly with %d sockets.", total);
 }