qed: Add driver infrastucture for handling mfw requests.
authorSudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
Tue, 22 May 2018 07:28:41 +0000 (00:28 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 23 May 2018 03:29:54 +0000 (23:29 -0400)
MFW requests the TLVs in interrupt context. Extracting of the required
data from upper layers and populating of the TLVs require process context.
The patch adds work-queues for processing the tlv requests. It also adds
the implementation for requesting the tlv values from appropriate protocol
driver.

Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
Signed-off-by: Ariel Elior <ariel.elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
include/linux/qed/qed_if.h

index dfdbe52..00db340 100644 (file)
@@ -515,6 +515,10 @@ struct qed_simd_fp_handler {
        void    (*func)(void *);
 };
 
+enum qed_slowpath_wq_flag {
+       QED_SLOWPATH_MFW_TLV_REQ,
+};
+
 struct qed_hwfn {
        struct qed_dev                  *cdev;
        u8                              my_id;          /* ID inside the PF */
@@ -644,6 +648,9 @@ struct qed_hwfn {
 #endif
 
        struct z_stream_s               *stream;
+       struct workqueue_struct *slowpath_wq;
+       struct delayed_work slowpath_task;
+       unsigned long slowpath_task_flags;
 };
 
 struct pci_params {
@@ -908,6 +915,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                            union qed_mcp_protocol_stats *stats);
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
 
 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
                          enum qed_mfw_tlv_type type,
index cbf0ea9..68c4399 100644 (file)
@@ -946,6 +946,68 @@ static void qed_update_pf_params(struct qed_dev *cdev,
        }
 }
 
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+       int i;
+
+       if (IS_VF(cdev))
+               return;
+
+       for_each_hwfn(cdev, i) {
+               if (!cdev->hwfns[i].slowpath_wq)
+                       continue;
+
+               flush_workqueue(cdev->hwfns[i].slowpath_wq);
+               destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+       }
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+       struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+                                            slowpath_task.work);
+       struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+       if (!ptt) {
+               queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+               return;
+       }
+
+       if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+                              &hwfn->slowpath_task_flags))
+               qed_mfw_process_tlv_req(hwfn, ptt);
+
+       qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+       struct qed_hwfn *hwfn;
+       char name[NAME_SIZE];
+       int i;
+
+       if (IS_VF(cdev))
+               return 0;
+
+       for_each_hwfn(cdev, i) {
+               hwfn = &cdev->hwfns[i];
+
+               snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+                        cdev->pdev->bus->number,
+                        PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+               hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+               if (!hwfn->slowpath_wq) {
+                       DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+                       return -ENOMEM;
+               }
+
+               INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+       }
+
+       return 0;
+}
+
 static int qed_slowpath_start(struct qed_dev *cdev,
                              struct qed_slowpath_params *params)
 {
@@ -961,6 +1023,9 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        if (qed_iov_wq_start(cdev))
                goto err;
 
+       if (qed_slowpath_wq_start(cdev))
+               goto err;
+
        if (IS_PF(cdev)) {
                rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
                                      &cdev->pdev->dev);
@@ -1095,6 +1160,8 @@ err:
 
        qed_iov_wq_stop(cdev, false);
 
+       qed_slowpath_wq_stop(cdev);
+
        return rc;
 }
 
@@ -1103,6 +1170,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
        if (!cdev)
                return -ENODEV;
 
+       qed_slowpath_wq_stop(cdev);
+
        qed_ll2_dealloc_if(cdev);
 
        if (IS_PF(cdev)) {
@@ -2089,8 +2158,88 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
        }
 }
 
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+       DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+                  "Scheduling slowpath task [Flag: %d]\n",
+                  QED_SLOWPATH_MFW_TLV_REQ);
+       smp_mb__before_atomic();
+       set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+       smp_mb__after_atomic();
+       queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+       return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+       struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+       struct qed_eth_stats_common *p_common;
+       struct qed_generic_tlvs gen_tlvs;
+       struct qed_eth_stats stats;
+       int i;
+
+       memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+       op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+       if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+               tlv->flags.ipv4_csum_offload = true;
+       if (gen_tlvs.feat_flags & QED_TLV_LSO)
+               tlv->flags.lso_supported = true;
+       tlv->flags.b_set = true;
+
+       for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+               if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+                       ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+                       tlv->mac_set[i] = true;
+               }
+       }
+
+       qed_get_vport_stats(cdev, &stats);
+       p_common = &stats.common;
+       tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+                        p_common->rx_bcast_pkts;
+       tlv->rx_frames_set = true;
+       tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+                       p_common->rx_bcast_bytes;
+       tlv->rx_bytes_set = true;
+       tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+                        p_common->tx_bcast_pkts;
+       tlv->tx_frames_set = true;
+       tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+                       p_common->tx_bcast_bytes;
+       tlv->rx_bytes_set = true;
+}
+
 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
                          union qed_mfw_tlv_data *tlv_buf)
 {
-       return -EINVAL;
+       struct qed_dev *cdev = hwfn->cdev;
+       struct qed_common_cb_ops *ops;
+
+       ops = cdev->protocol_ops.common;
+       if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+               DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+               return -EINVAL;
+       }
+
+       switch (type) {
+       case QED_MFW_TLV_GENERIC:
+               qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+               break;
+       case QED_MFW_TLV_ETH:
+               ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+               break;
+       case QED_MFW_TLV_FCOE:
+               ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+               break;
+       case QED_MFW_TLV_ISCSI:
+               ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
 }
index e80f5e7..2612e3e 100644 (file)
@@ -1622,6 +1622,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
                case MFW_DRV_MSG_S_TAG_UPDATE:
                        qed_mcp_update_stag(p_hwfn, p_ptt);
                        break;
+               case MFW_DRV_MSG_GET_TLV_REQ:
+                       qed_mfw_tlv_req(p_hwfn);
                        break;
                default:
                        DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
index 92b5352..44af652 100644 (file)
@@ -751,6 +751,14 @@ struct qed_int_info {
        u8                      used_cnt;
 };
 
+struct qed_generic_tlvs {
+#define QED_TLV_IP_CSUM         BIT(0)
+#define QED_TLV_LSO             BIT(1)
+       u16 feat_flags;
+#define QED_TLV_MAC_COUNT      3
+       u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
+};
+
 #define QED_NVM_SIGNATURE 0x12435687
 
 enum qed_nvm_flash_cmd {
@@ -765,6 +773,8 @@ struct qed_common_cb_ops {
        void    (*link_update)(void                     *dev,
                               struct qed_link_output   *link);
        void    (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+       void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
+       void (*get_protocol_tlv_data)(void *dev, void *data);
 };
 
 struct qed_selftest_ops {