Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
authorJakub Kicinski <kuba@kernel.org>
Thu, 28 Apr 2022 16:55:59 +0000 (09:55 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 28 Apr 2022 16:56:00 +0000 (09:56 -0700)
Pablo Neira Ayuso says:

====================
Netfilter fixes for net

1) Fix incorrect TCP connection tracking window reset for non-syn
   packets, from Florian Westphal.

2) Incorrect dependency on CONFIG_NFT_FLOW_OFFLOAD, from Volodymyr Mytnyk.

3) Fix nft_socket from the output path, from Florian Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: nft_socket: only do sk lookups when indev is available
  netfilter: conntrack: fix udp offload timeout sysctl
  netfilter: nf_conntrack_tcp: re-init for syn packets only
====================

Link: https://lore.kernel.org/r/20220428142109.38726-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
19 files changed:
MAINTAINERS
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/freescale/enetc/enetc_qos.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/xsk_buff_pool.h
kernel/kprobes.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bpf/test_run.c
net/core/lwt_bpf.c
net/tls/tls_device.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c

index d21963b..e86a8e2 100644 (file)
@@ -3913,7 +3913,9 @@ BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
+F:     drivers/firmware/broadcom/tee_bnxt_fw.c
 F:     drivers/net/ethernet/broadcom/bnxt/
+F:     include/linux/firmware/broadcom/tee_bnxt_fw.h
 
 BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
 M:     Arend van Spriel <aspriel@gmail.com>
@@ -13624,6 +13626,7 @@ F:      net/core/drop_monitor.c
 
 NETWORKING DRIVERS
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Eric Dumazet <edumazet@google.com>
 M:     Jakub Kicinski <kuba@kernel.org>
 M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
@@ -13671,6 +13674,7 @@ F:      tools/testing/selftests/drivers/net/dsa/
 
 NETWORKING [GENERAL]
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Eric Dumazet <edumazet@google.com>
 M:     Jakub Kicinski <kuba@kernel.org>
 M:     Paolo Abeni <pabeni@redhat.com>
 L:     netdev@vger.kernel.org
index c19b072..962253d 100644 (file)
@@ -14153,10 +14153,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
 
        /* Stop Tx */
        bnx2x_tx_disable(bp);
-       /* Delete all NAPI objects */
-       bnx2x_del_all_napi(bp);
-       if (CNIC_LOADED(bp))
-               bnx2x_del_all_napi_cnic(bp);
        netdev_reset_tc(bp->dev);
 
        del_timer_sync(&bp->timer);
@@ -14261,6 +14257,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
                bnx2x_drain_tx_queues(bp);
                bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
                bnx2x_netif_stop(bp, 1);
+               bnx2x_del_all_napi(bp);
+
+               if (CNIC_LOADED(bp))
+                       bnx2x_del_all_napi_cnic(bp);
+
                bnx2x_free_irq(bp);
 
                /* Report UNLOAD_DONE to MCP */
index 79afb1d..9182631 100644 (file)
@@ -297,10 +297,6 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
        if (tc < 0 || tc >= priv->num_tx_rings)
                return -EINVAL;
 
-       /* Do not support TXSTART and TX CSUM offload simutaniously */
-       if (ndev->features & NETIF_F_CSUM_MASK)
-               return -EBUSY;
-
        /* TSD and Qbv are mutually exclusive in hardware */
        if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
                return -EBUSY;
index 11227f5..9f33ec8 100644 (file)
@@ -3731,7 +3731,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
                                         ARRAY_SIZE(out_val));
        if (ret) {
                dev_dbg(&fep->pdev->dev, "no stop mode property\n");
-               return ret;
+               goto out;
        }
 
        fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
index 7768390..5c5931d 100644 (file)
@@ -3210,13 +3210,8 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
-               ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
-               ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
-       } else {
-               ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
-               ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
-       }
+       ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+       ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
        ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -3231,23 +3226,21 @@ static int ibmvnic_set_ringparam(struct net_device *netdev,
                                 struct netlink_ext_ack *extack)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       int ret;
 
-       ret = 0;
+       if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq  ||
+           ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+               netdev_err(netdev, "Invalid request.\n");
+               netdev_err(netdev, "Max tx buffers = %llu\n",
+                          adapter->max_rx_add_entries_per_subcrq);
+               netdev_err(netdev, "Max rx buffers = %llu\n",
+                          adapter->max_tx_entries_per_subcrq);
+               return -EINVAL;
+       }
+
        adapter->desired.rx_entries = ring->rx_pending;
        adapter->desired.tx_entries = ring->tx_pending;
 
-       ret = wait_for_reset(adapter);
-
-       if (!ret &&
-           (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
-            adapter->req_tx_entries_per_subcrq != ring->tx_pending))
-               netdev_info(netdev,
-                           "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
-                           ring->rx_pending, ring->tx_pending,
-                           adapter->req_rx_add_entries_per_subcrq,
-                           adapter->req_tx_entries_per_subcrq);
-       return ret;
+       return wait_for_reset(adapter);
 }
 
 static void ibmvnic_get_channels(struct net_device *netdev,
@@ -3255,14 +3248,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
-       if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
-               channels->max_rx = adapter->max_rx_queues;
-               channels->max_tx = adapter->max_tx_queues;
-       } else {
-               channels->max_rx = IBMVNIC_MAX_QUEUES;
-               channels->max_tx = IBMVNIC_MAX_QUEUES;
-       }
-
+       channels->max_rx = adapter->max_rx_queues;
+       channels->max_tx = adapter->max_tx_queues;
        channels->max_other = 0;
        channels->max_combined = 0;
        channels->rx_count = adapter->req_rx_queues;
@@ -3275,22 +3262,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
                                struct ethtool_channels *channels)
 {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       int ret;
 
-       ret = 0;
        adapter->desired.rx_queues = channels->rx_count;
        adapter->desired.tx_queues = channels->tx_count;
 
-       ret = wait_for_reset(adapter);
-
-       if (!ret &&
-           (adapter->req_rx_queues != channels->rx_count ||
-            adapter->req_tx_queues != channels->tx_count))
-               netdev_info(netdev,
-                           "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
-                           channels->rx_count, channels->tx_count,
-                           adapter->req_rx_queues, adapter->req_tx_queues);
-       return ret;
+       return wait_for_reset(adapter);
 }
 
 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3298,43 +3274,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
        struct ibmvnic_adapter *adapter = netdev_priv(dev);
        int i;
 
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
-                               i++, data += ETH_GSTRING_LEN)
-                       memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+       if (stringset != ETH_SS_STATS)
+               return;
 
-               for (i = 0; i < adapter->req_tx_queues; i++) {
-                       snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
-                       data += ETH_GSTRING_LEN;
+       for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+               memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
 
-                       snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
-                       data += ETH_GSTRING_LEN;
+       for (i = 0; i < adapter->req_tx_queues; i++) {
+               snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+               data += ETH_GSTRING_LEN;
 
-                       snprintf(data, ETH_GSTRING_LEN,
-                                "tx%d_dropped_packets", i);
-                       data += ETH_GSTRING_LEN;
-               }
+               snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+               data += ETH_GSTRING_LEN;
 
-               for (i = 0; i < adapter->req_rx_queues; i++) {
-                       snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
-                       data += ETH_GSTRING_LEN;
+               snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+               data += ETH_GSTRING_LEN;
+       }
 
-                       snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
-                       data += ETH_GSTRING_LEN;
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+               data += ETH_GSTRING_LEN;
 
-                       snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
-                       data += ETH_GSTRING_LEN;
-               }
-               break;
+               snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+               data += ETH_GSTRING_LEN;
 
-       case ETH_SS_PRIV_FLAGS:
-               for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
-                       strcpy(data + i * ETH_GSTRING_LEN,
-                              ibmvnic_priv_flags[i]);
-               break;
-       default:
-               return;
+               snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+               data += ETH_GSTRING_LEN;
        }
 }
 
@@ -3347,8 +3312,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
                return ARRAY_SIZE(ibmvnic_stats) +
                       adapter->req_tx_queues * NUM_TX_STATS +
                       adapter->req_rx_queues * NUM_RX_STATS;
-       case ETH_SS_PRIV_FLAGS:
-               return ARRAY_SIZE(ibmvnic_priv_flags);
        default:
                return -EOPNOTSUPP;
        }
@@ -3401,26 +3364,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
        }
 }
 
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
-       return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-       bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
-       if (which_maxes)
-               adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
-       else
-               adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
-       return 0;
-}
-
 static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_drvinfo            = ibmvnic_get_drvinfo,
        .get_msglevel           = ibmvnic_get_msglevel,
@@ -3434,8 +3377,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_sset_count         = ibmvnic_get_sset_count,
        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
        .get_link_ksettings     = ibmvnic_get_link_ksettings,
-       .get_priv_flags         = ibmvnic_get_priv_flags,
-       .set_priv_flags         = ibmvnic_set_priv_flags,
 };
 
 /* Routines for managing CRQs/sCRQs  */
index 8f5cefb..1310c86 100644 (file)
 
 #define IBMVNIC_RESET_DELAY 100
 
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
-       "use-server-maxes"
-};
-
 struct ibmvnic_login_buffer {
        __be32 len;
        __be32 version;
@@ -883,7 +878,6 @@ struct ibmvnic_adapter {
        struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
        dma_addr_t ip_offload_ctrl_tok;
        u32 msg_enable;
-       u32 priv_flags;
 
        /* Vital Product Data (VPD) */
        struct ibmvnic_vpd *vpd;
index e596e1a..69d11ff 100644 (file)
@@ -903,7 +903,8 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
        /* Tx IPsec offload doesn't seem to work on this
         * device, so block these requests for now.
         */
-       if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+       sam->flags = sam->flags & ~XFRM_OFFLOAD_IPV6;
+       if (sam->flags != XFRM_OFFLOAD_INBOUND) {
                err = -EOPNOTSUPP;
                goto err_out;
        }
index 5cb095b..69ef31c 100644 (file)
@@ -578,6 +578,7 @@ enum {
 #define HCI_ERROR_CONNECTION_TIMEOUT   0x08
 #define HCI_ERROR_REJ_LIMITED_RESOURCES        0x0d
 #define HCI_ERROR_REJ_BAD_ADDR         0x0f
+#define HCI_ERROR_INVALID_PARAMETERS   0x12
 #define HCI_ERROR_REMOTE_USER_TERM     0x13
 #define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
 #define HCI_ERROR_REMOTE_POWER_OFF     0x15
index d537774..8abd082 100644 (file)
@@ -1156,7 +1156,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
 
-void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+void hci_conn_failed(struct hci_conn *conn, u8 status);
 
 /*
  * hci_conn_get() and hci_conn_put() are used to control the life-time of an
index 5554ee7..647722e 100644 (file)
@@ -97,6 +97,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
                  u16 queue_id, u16 flags);
 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
                         struct net_device *dev, u16 queue_id);
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs);
 void xp_destroy(struct xsk_buff_pool *pool);
 void xp_get_pool(struct xsk_buff_pool *pool);
 bool xp_put_pool(struct xsk_buff_pool *pool);
index dbe57df..dd58c0b 100644 (file)
@@ -2126,7 +2126,7 @@ static void kretprobe_rethook_handler(struct rethook_node *rh, void *data,
        struct kprobe_ctlblk *kcb;
 
        /* The data must NOT be null. This means rethook data structure is broken. */
-       if (WARN_ON_ONCE(!data))
+       if (WARN_ON_ONCE(!data) || !rp->handler)
                return;
 
        __this_cpu_write(current_kprobe, &rp->kp);
index 84312c8..fe803be 100644 (file)
@@ -670,7 +670,7 @@ static void le_conn_timeout(struct work_struct *work)
                /* Disable LE Advertising */
                le_disable_advertising(hdev);
                hci_dev_lock(hdev);
-               hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
+               hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
                hci_dev_unlock(hdev);
                return;
        }
@@ -873,7 +873,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
 EXPORT_SYMBOL(hci_get_route);
 
 /* This function requires the caller holds hdev->lock */
-void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
 {
        struct hci_dev *hdev = conn->hdev;
        struct hci_conn_params *params;
@@ -886,8 +886,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
                params->conn = NULL;
        }
 
-       conn->state = BT_CLOSED;
-
        /* If the status indicates successful cancellation of
         * the attempt (i.e. Unknown Connection Id) there's no point of
         * notifying failure since we'll go back to keep trying to
@@ -899,10 +897,6 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
                mgmt_connect_failed(hdev, &conn->dst, conn->type,
                                    conn->dst_type, status);
 
-       hci_connect_cfm(conn, status);
-
-       hci_conn_del(conn);
-
        /* Since we may have temporarily stopped the background scanning in
         * favor of connection establishment, we should restart it.
         */
@@ -914,6 +908,28 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
        hci_enable_advertising(hdev);
 }
 
+/* This function requires the caller holds hdev->lock */
+void hci_conn_failed(struct hci_conn *conn, u8 status)
+{
+       struct hci_dev *hdev = conn->hdev;
+
+       bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+       switch (conn->type) {
+       case LE_LINK:
+               hci_le_conn_failed(conn, status);
+               break;
+       case ACL_LINK:
+               mgmt_connect_failed(hdev, &conn->dst, conn->type,
+                                   conn->dst_type, status);
+               break;
+       }
+
+       conn->state = BT_CLOSED;
+       hci_connect_cfm(conn, status);
+       hci_conn_del(conn);
+}
+
 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
 {
        struct hci_conn *conn = data;
index abaabfa..6645166 100644 (file)
@@ -2834,7 +2834,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
        bt_dev_dbg(hdev, "status 0x%2.2x", status);
 
        /* All connection failure handling is taken care of by the
-        * hci_le_conn_failed function which is triggered by the HCI
+        * hci_conn_failed function which is triggered by the HCI
         * request completion callbacks used for connecting.
         */
        if (status)
@@ -2859,7 +2859,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
        bt_dev_dbg(hdev, "status 0x%2.2x", status);
 
        /* All connection failure handling is taken care of by the
-        * hci_le_conn_failed function which is triggered by the HCI
+        * hci_conn_failed function which is triggered by the HCI
         * request completion callbacks used for connecting.
         */
        if (status)
@@ -3067,18 +3067,20 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
 {
        struct hci_ev_conn_complete *ev = data;
        struct hci_conn *conn;
+       u8 status = ev->status;
 
-       if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
-               bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for invalid handle");
-               return;
-       }
-
-       bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+       bt_dev_dbg(hdev, "status 0x%2.2x", status);
 
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
        if (!conn) {
+               /* In case of error status and there is no connection pending
+                * just unlock as there is nothing to cleanup.
+                */
+               if (ev->status)
+                       goto unlock;
+
                /* Connection may not exist if auto-connected. Check the bredr
                 * allowlist to see if this device is allowed to auto connect.
                 * If link is an ACL type, create a connection class
@@ -3122,8 +3124,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       if (!ev->status) {
+       if (!status) {
                conn->handle = __le16_to_cpu(ev->handle);
+               if (conn->handle > HCI_CONN_HANDLE_MAX) {
+                       bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+                                  conn->handle, HCI_CONN_HANDLE_MAX);
+                       status = HCI_ERROR_INVALID_PARAMETERS;
+                       goto done;
+               }
 
                if (conn->type == ACL_LINK) {
                        conn->state = BT_CONFIG;
@@ -3164,19 +3172,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
                        hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
                                     &cp);
                }
-       } else {
-               conn->state = BT_CLOSED;
-               if (conn->type == ACL_LINK)
-                       mgmt_connect_failed(hdev, &conn->dst, conn->type,
-                                           conn->dst_type, ev->status);
        }
 
        if (conn->type == ACL_LINK)
                hci_sco_setup(conn, ev->status);
 
-       if (ev->status) {
-               hci_connect_cfm(conn, ev->status);
-               hci_conn_del(conn);
+done:
+       if (status) {
+               hci_conn_failed(conn, status);
        } else if (ev->link_type == SCO_LINK) {
                switch (conn->setting & SCO_AIRMODE_MASK) {
                case SCO_AIRMODE_CVSD:
@@ -3185,7 +3188,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
                        break;
                }
 
-               hci_connect_cfm(conn, ev->status);
+               hci_connect_cfm(conn, status);
        }
 
 unlock:
@@ -4676,6 +4679,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
 {
        struct hci_ev_sync_conn_complete *ev = data;
        struct hci_conn *conn;
+       u8 status = ev->status;
 
        switch (ev->link_type) {
        case SCO_LINK:
@@ -4690,12 +4694,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
                return;
        }
 
-       if (__le16_to_cpu(ev->handle) > HCI_CONN_HANDLE_MAX) {
-               bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete for invalid handle");
-               return;
-       }
-
-       bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
+       bt_dev_dbg(hdev, "status 0x%2.2x", status);
 
        hci_dev_lock(hdev);
 
@@ -4729,9 +4728,17 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       switch (ev->status) {
+       switch (status) {
        case 0x00:
                conn->handle = __le16_to_cpu(ev->handle);
+               if (conn->handle > HCI_CONN_HANDLE_MAX) {
+                       bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
+                                  conn->handle, HCI_CONN_HANDLE_MAX);
+                       status = HCI_ERROR_INVALID_PARAMETERS;
+                       conn->state = BT_CLOSED;
+                       break;
+               }
+
                conn->state  = BT_CONNECTED;
                conn->type   = ev->link_type;
 
@@ -4775,8 +4782,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
                }
        }
 
-       hci_connect_cfm(conn, ev->status);
-       if (ev->status)
+       hci_connect_cfm(conn, status);
+       if (status)
                hci_conn_del(conn);
 
 unlock:
@@ -5527,11 +5534,6 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
        struct smp_irk *irk;
        u8 addr_type;
 
-       if (handle > HCI_CONN_HANDLE_MAX) {
-               bt_dev_err(hdev, "Ignoring HCI_LE_Connection_Complete for invalid handle");
-               return;
-       }
-
        hci_dev_lock(hdev);
 
        /* All controllers implicitly stop advertising in the event of a
@@ -5541,6 +5543,12 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
 
        conn = hci_lookup_le_connect(hdev);
        if (!conn) {
+               /* In case of error status and there is no connection pending
+                * just unlock as there is nothing to cleanup.
+                */
+               if (status)
+                       goto unlock;
+
                conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
                if (!conn) {
                        bt_dev_err(hdev, "no memory for new connection");
@@ -5603,8 +5611,14 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
 
        conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
 
+       if (handle > HCI_CONN_HANDLE_MAX) {
+               bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
+                          HCI_CONN_HANDLE_MAX);
+               status = HCI_ERROR_INVALID_PARAMETERS;
+       }
+
        if (status) {
-               hci_le_conn_failed(conn, status);
+               hci_conn_failed(conn, status);
                goto unlock;
        }
 
index 8f4c569..13600bf 100644 (file)
@@ -4408,12 +4408,21 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
 static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
                               u8 reason)
 {
+       int err;
+
        switch (conn->state) {
        case BT_CONNECTED:
        case BT_CONFIG:
                return hci_disconnect_sync(hdev, conn, reason);
        case BT_CONNECT:
-               return hci_connect_cancel_sync(hdev, conn);
+               err = hci_connect_cancel_sync(hdev, conn);
+               /* Cleanup hci_conn object if it cannot be cancelled as it
+                * likelly means the controller and host stack are out of sync.
+                */
+               if (err)
+                       hci_conn_failed(conn, err);
+
+               return err;
        case BT_CONNECT2:
                return hci_reject_conn_sync(hdev, conn, reason);
        default:
index e7b9c26..af709c1 100644 (file)
@@ -108,6 +108,7 @@ struct xdp_test_data {
        struct page_pool *pp;
        struct xdp_frame **frames;
        struct sk_buff **skbs;
+       struct xdp_mem_info mem;
        u32 batch_size;
        u32 frame_cnt;
 };
@@ -147,7 +148,6 @@ static void xdp_test_run_init_page(struct page *page, void *arg)
 
 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
 {
-       struct xdp_mem_info mem = {};
        struct page_pool *pp;
        int err = -ENOMEM;
        struct page_pool_params pp_params = {
@@ -174,7 +174,7 @@ static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_c
        }
 
        /* will copy 'mem.id' into pp->xdp_mem_id */
-       err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
+       err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
        if (err)
                goto err_mmodel;
 
@@ -202,6 +202,7 @@ err_skbs:
 
 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
 {
+       xdp_unreg_mem_model(&xdp->mem);
        page_pool_destroy(xdp->pp);
        kfree(xdp->frames);
        kfree(xdp->skbs);
index 349480e..8b6b5e7 100644 (file)
@@ -159,10 +159,8 @@ static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        return dst->lwtstate->orig_output(net, sk, skb);
 }
 
-static int xmit_check_hhlen(struct sk_buff *skb)
+static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
 {
-       int hh_len = skb_dst(skb)->dev->hard_header_len;
-
        if (skb_headroom(skb) < hh_len) {
                int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
 
@@ -274,6 +272,7 @@ static int bpf_xmit(struct sk_buff *skb)
 
        bpf = bpf_lwt_lwtunnel(dst->lwtstate);
        if (bpf->xmit.prog) {
+               int hh_len = dst->dev->hard_header_len;
                __be16 proto = skb->protocol;
                int ret;
 
@@ -291,7 +290,7 @@ static int bpf_xmit(struct sk_buff *skb)
                        /* If the header was expanded, headroom might be too
                         * small for L2 header to come, expand as needed.
                         */
-                       ret = xmit_check_hhlen(skb);
+                       ret = xmit_check_hhlen(skb, hh_len);
                        if (unlikely(ret))
                                return ret;
 
index 12f7b56..af875ad 100644 (file)
@@ -483,11 +483,13 @@ handle_error:
                copy = min_t(size_t, size, (pfrag->size - pfrag->offset));
                copy = min_t(size_t, copy, (max_open_record_len - record->len));
 
-               rc = tls_device_copy_data(page_address(pfrag->page) +
-                                         pfrag->offset, copy, msg_iter);
-               if (rc)
-                       goto handle_error;
-               tls_append_frag(record, pfrag, copy);
+               if (copy) {
+                       rc = tls_device_copy_data(page_address(pfrag->page) +
+                                                 pfrag->offset, copy, msg_iter);
+                       if (rc)
+                               goto handle_error;
+                       tls_append_frag(record, pfrag, copy);
+               }
 
                size -= copy;
                if (!size) {
index 2c34cae..3a93480 100644 (file)
@@ -639,7 +639,7 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
        if (sk_can_busy_loop(sk))
                sk_busy_loop(sk, 1); /* only support non-blocking sockets */
 
-       if (xsk_no_wakeup(sk))
+       if (xs->zc && xsk_no_wakeup(sk))
                return 0;
 
        pool = xs->pool;
@@ -967,6 +967,19 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 
                        xp_get_pool(umem_xs->pool);
                        xs->pool = umem_xs->pool;
+
+                       /* If underlying shared umem was created without Tx
+                        * ring, allocate Tx descs array that Tx batching API
+                        * utilizes
+                        */
+                       if (xs->tx && !xs->pool->tx_descs) {
+                               err = xp_alloc_tx_descs(xs->pool, xs);
+                               if (err) {
+                                       xp_put_pool(xs->pool);
+                                       sockfd_put(sock);
+                                       goto out_unlock;
+                               }
+                       }
                }
 
                xdp_get_umem(umem_xs->umem);
index af040ff..87bdd71 100644 (file)
@@ -42,6 +42,16 @@ void xp_destroy(struct xsk_buff_pool *pool)
        kvfree(pool);
 }
 
+int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs)
+{
+       pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs),
+                                 GFP_KERNEL);
+       if (!pool->tx_descs)
+               return -ENOMEM;
+
+       return 0;
+}
+
 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
                                                struct xdp_umem *umem)
 {
@@ -59,11 +69,9 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
        if (!pool->heads)
                goto out;
 
-       if (xs->tx) {
-               pool->tx_descs = kcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), GFP_KERNEL);
-               if (!pool->tx_descs)
+       if (xs->tx)
+               if (xp_alloc_tx_descs(pool, xs))
                        goto out;
-       }
 
        pool->chunk_mask = ~((u64)umem->chunk_size - 1);
        pool->addrs_cnt = umem->size;