cxgb4: collect hardware queue descriptors
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Mon, 3 Sep 2018 12:11:29 +0000 (17:41 +0530)
committerDavid S. Miller <davem@davemloft.net>
Tue, 4 Sep 2018 05:10:37 +0000 (22:10 -0700)
Collect descriptors of all ULD and LLD hardware queues managed
by LLD.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c

index 36d2588..b2d617a 100644 (file)
@@ -315,6 +315,48 @@ struct cudbg_pbt_tables {
        u32 pbt_data[CUDBG_PBT_DATA_ENTRIES];
 };
 
+enum cudbg_qdesc_qtype {
+       CUDBG_QTYPE_UNKNOWN = 0,
+       CUDBG_QTYPE_NIC_TXQ,
+       CUDBG_QTYPE_NIC_RXQ,
+       CUDBG_QTYPE_NIC_FLQ,
+       CUDBG_QTYPE_CTRLQ,
+       CUDBG_QTYPE_FWEVTQ,
+       CUDBG_QTYPE_INTRQ,
+       CUDBG_QTYPE_PTP_TXQ,
+       CUDBG_QTYPE_OFLD_TXQ,
+       CUDBG_QTYPE_RDMA_RXQ,
+       CUDBG_QTYPE_RDMA_FLQ,
+       CUDBG_QTYPE_RDMA_CIQ,
+       CUDBG_QTYPE_ISCSI_RXQ,
+       CUDBG_QTYPE_ISCSI_FLQ,
+       CUDBG_QTYPE_ISCSIT_RXQ,
+       CUDBG_QTYPE_ISCSIT_FLQ,
+       CUDBG_QTYPE_CRYPTO_TXQ,
+       CUDBG_QTYPE_CRYPTO_RXQ,
+       CUDBG_QTYPE_CRYPTO_FLQ,
+       CUDBG_QTYPE_TLS_RXQ,
+       CUDBG_QTYPE_TLS_FLQ,
+       CUDBG_QTYPE_MAX,
+};
+
+#define CUDBG_QDESC_REV 1
+
+struct cudbg_qdesc_entry {
+       u32 data_size;
+       u32 qtype;
+       u32 qid;
+       u32 desc_size;
+       u32 num_desc;
+       u8 data[0]; /* Must be last */
+};
+
+struct cudbg_qdesc_info {
+       u32 qdesc_entry_size;
+       u32 num_queues;
+       u8 data[0]; /* Must be last */
+};
+
 #define IREG_NUM_ELEM 4
 
 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
index 215fe62..dec63c1 100644 (file)
@@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type {
        CUDBG_MBOX_LOG = 66,
        CUDBG_HMA_INDIRECT = 67,
        CUDBG_HMA = 68,
-       CUDBG_MAX_ENTITY = 70,
+       CUDBG_QDESC = 70,
+       CUDBG_MAX_ENTITY = 71,
 };
 
 struct cudbg_init {
index d97e0d7..7c49681 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "t4_regs.h"
 #include "cxgb4.h"
+#include "cxgb4_cudbg.h"
 #include "cudbg_if.h"
 #include "cudbg_lib_common.h"
 #include "cudbg_entity.h"
@@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
        }
        return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 }
+
+void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
+                                  u32 *num, u32 *size)
+{
+       u32 tot_entries = 0, tot_size = 0;
+
+       /* NIC TXQ, RXQ, FLQ, and CTRLQ */
+       tot_entries += MAX_ETH_QSETS * 3;
+       tot_entries += MAX_CTRL_QUEUES;
+
+       tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+       tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
+       tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
+       tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
+                   MAX_CTRL_TXQ_DESC_SIZE;
+
+       /* FW_EVTQ and INTRQ */
+       tot_entries += INGQ_EXTRAS;
+       tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
+
+       /* PTP_TXQ */
+       tot_entries += 1;
+       tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
+
+       /* ULD TXQ, RXQ, and FLQ */
+       tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
+       tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
+
+       tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
+                   MAX_TXQ_DESC_SIZE;
+       tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
+                   MAX_RXQ_DESC_SIZE;
+       tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
+                   MAX_FL_DESC_SIZE;
+
+       /* ULD CIQ */
+       tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
+       tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
+                   MAX_RXQ_DESC_SIZE;
+
+       tot_size += sizeof(struct cudbg_ver_hdr) +
+                   sizeof(struct cudbg_qdesc_info) +
+                   sizeof(struct cudbg_qdesc_entry) * tot_entries;
+
+       if (num)
+               *num = tot_entries;
+
+       if (size)
+               *size = tot_size;
+}
+
+int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
+                       struct cudbg_buffer *dbg_buff,
+                       struct cudbg_error *cudbg_err)
+{
+       u32 num_queues = 0, tot_entries = 0, size = 0;
+       struct adapter *padap = pdbg_init->adap;
+       struct cudbg_buffer temp_buff = { 0 };
+       struct cudbg_qdesc_entry *qdesc_entry;
+       struct cudbg_qdesc_info *qdesc_info;
+       struct cudbg_ver_hdr *ver_hdr;
+       struct sge *s = &padap->sge;
+       u32 i, j, cur_off, tot_len;
+       u8 *data;
+       int rc;
+
+       cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
+       size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
+       tot_len = size;
+       data = kvzalloc(size, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       ver_hdr = (struct cudbg_ver_hdr *)data;
+       ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
+       ver_hdr->revision = CUDBG_QDESC_REV;
+       ver_hdr->size = sizeof(struct cudbg_qdesc_info);
+       size -= sizeof(*ver_hdr);
+
+       qdesc_info = (struct cudbg_qdesc_info *)(data +
+                                                sizeof(*ver_hdr));
+       size -= sizeof(*qdesc_info);
+       qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
+
+#define QDESC_GET(q, desc, type, label) do { \
+       if (size <= 0) { \
+               goto label; \
+       } \
+       if (desc) { \
+               cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
+               size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
+               num_queues++; \
+               qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
+       } \
+} while (0)
+
+#define QDESC_GET_TXQ(q, type, label) do { \
+       struct sge_txq *txq = (struct sge_txq *)q; \
+       QDESC_GET(txq, txq->desc, type, label); \
+} while (0)
+
+#define QDESC_GET_RXQ(q, type, label) do { \
+       struct sge_rspq *rxq = (struct sge_rspq *)q; \
+       QDESC_GET(rxq, rxq->desc, type, label); \
+} while (0)
+
+#define QDESC_GET_FLQ(q, type, label) do { \
+       struct sge_fl *flq = (struct sge_fl *)q; \
+       QDESC_GET(flq, flq->desc, type, label); \
+} while (0)
+
+       /* NIC TXQ */
+       for (i = 0; i < s->ethqsets; i++)
+               QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
+
+       /* NIC RXQ */
+       for (i = 0; i < s->ethqsets; i++)
+               QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
+
+       /* NIC FLQ */
+       for (i = 0; i < s->ethqsets; i++)
+               QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
+
+       /* NIC CTRLQ */
+       for (i = 0; i < padap->params.nports; i++)
+               QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
+
+       /* FW_EVTQ */
+       QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
+
+       /* INTRQ */
+       QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
+
+       /* PTP_TXQ */
+       QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
+
+       /* ULD Queues */
+       mutex_lock(&uld_mutex);
+
+       if (s->uld_txq_info) {
+               struct sge_uld_txq_info *utxq;
+
+               /* ULD TXQ */
+               for (j = 0; j < CXGB4_TX_MAX; j++) {
+                       if (!s->uld_txq_info[j])
+                               continue;
+
+                       utxq = s->uld_txq_info[j];
+                       for (i = 0; i < utxq->ntxq; i++)
+                               QDESC_GET_TXQ(&utxq->uldtxq[i].q,
+                                             cudbg_uld_txq_to_qtype(j),
+                                             out_unlock);
+               }
+       }
+
+       if (s->uld_rxq_info) {
+               struct sge_uld_rxq_info *urxq;
+               u32 base;
+
+               /* ULD RXQ */
+               for (j = 0; j < CXGB4_ULD_MAX; j++) {
+                       if (!s->uld_rxq_info[j])
+                               continue;
+
+                       urxq = s->uld_rxq_info[j];
+                       for (i = 0; i < urxq->nrxq; i++)
+                               QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
+                                             cudbg_uld_rxq_to_qtype(j),
+                                             out_unlock);
+               }
+
+               /* ULD FLQ */
+               for (j = 0; j < CXGB4_ULD_MAX; j++) {
+                       if (!s->uld_rxq_info[j])
+                               continue;
+
+                       urxq = s->uld_rxq_info[j];
+                       for (i = 0; i < urxq->nrxq; i++)
+                               QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
+                                             cudbg_uld_flq_to_qtype(j),
+                                             out_unlock);
+               }
+
+               /* ULD CIQ */
+               for (j = 0; j < CXGB4_ULD_MAX; j++) {
+                       if (!s->uld_rxq_info[j])
+                               continue;
+
+                       urxq = s->uld_rxq_info[j];
+                       base = urxq->nrxq;
+                       for (i = 0; i < urxq->nciq; i++)
+                               QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
+                                             cudbg_uld_ciq_to_qtype(j),
+                                             out_unlock);
+               }
+       }
+
+out_unlock:
+       mutex_unlock(&uld_mutex);
+
+out:
+       qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
+       qdesc_info->num_queues = num_queues;
+       cur_off = 0;
+       while (tot_len) {
+               u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
+
+               rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
+                                   &temp_buff);
+               if (rc) {
+                       cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+                       goto out_free;
+               }
+
+               memcpy(temp_buff.data, data + cur_off, chunk_size);
+               tot_len -= chunk_size;
+               cur_off += chunk_size;
+               rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+                                                 dbg_buff);
+               if (rc) {
+                       cudbg_put_buff(pdbg_init, &temp_buff);
+                       cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+                       goto out_free;
+               }
+       }
+
+out_free:
+       if (data)
+               kvfree(data);
+
+#undef QDESC_GET_FLQ
+#undef QDESC_GET_RXQ
+#undef QDESC_GET_TXQ
+#undef QDESC_GET
+
+       return rc;
+}
index eebefe7..f047a01 100644 (file)
@@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
                              struct cudbg_buffer *dbg_buff,
                              struct cudbg_error *cudbg_err);
+int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
+                       struct cudbg_buffer *dbg_buff,
+                       struct cudbg_error *cudbg_err);
 
 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -182,4 +185,107 @@ int cudbg_fill_meminfo(struct adapter *padap,
                       struct cudbg_meminfo *meminfo_buff);
 void cudbg_fill_le_tcam_info(struct adapter *padap,
                             struct cudbg_tcam *tcam_region);
+void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
+                                  u32 *num, u32 *size);
+
+static inline u32 cudbg_uld_txq_to_qtype(u32 uld)
+{
+       switch (uld) {
+       case CXGB4_TX_OFLD:
+               return CUDBG_QTYPE_OFLD_TXQ;
+       case CXGB4_TX_CRYPTO:
+               return CUDBG_QTYPE_CRYPTO_TXQ;
+       }
+
+       return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_rxq_to_qtype(u32 uld)
+{
+       switch (uld) {
+       case CXGB4_ULD_RDMA:
+               return CUDBG_QTYPE_RDMA_RXQ;
+       case CXGB4_ULD_ISCSI:
+               return CUDBG_QTYPE_ISCSI_RXQ;
+       case CXGB4_ULD_ISCSIT:
+               return CUDBG_QTYPE_ISCSIT_RXQ;
+       case CXGB4_ULD_CRYPTO:
+               return CUDBG_QTYPE_CRYPTO_RXQ;
+       case CXGB4_ULD_TLS:
+               return CUDBG_QTYPE_TLS_RXQ;
+       }
+
+       return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_flq_to_qtype(u32 uld)
+{
+       switch (uld) {
+       case CXGB4_ULD_RDMA:
+               return CUDBG_QTYPE_RDMA_FLQ;
+       case CXGB4_ULD_ISCSI:
+               return CUDBG_QTYPE_ISCSI_FLQ;
+       case CXGB4_ULD_ISCSIT:
+               return CUDBG_QTYPE_ISCSIT_FLQ;
+       case CXGB4_ULD_CRYPTO:
+               return CUDBG_QTYPE_CRYPTO_FLQ;
+       case CXGB4_ULD_TLS:
+               return CUDBG_QTYPE_TLS_FLQ;
+       }
+
+       return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline u32 cudbg_uld_ciq_to_qtype(u32 uld)
+{
+       switch (uld) {
+       case CXGB4_ULD_RDMA:
+               return CUDBG_QTYPE_RDMA_CIQ;
+       }
+
+       return CUDBG_QTYPE_UNKNOWN;
+}
+
+static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq,
+                                       enum cudbg_qdesc_qtype type,
+                                       struct cudbg_qdesc_entry *entry)
+{
+       entry->qtype = type;
+       entry->qid = txq->cntxt_id;
+       entry->desc_size = sizeof(struct tx_desc);
+       entry->num_desc = txq->size;
+       entry->data_size = txq->size * sizeof(struct tx_desc);
+       memcpy(entry->data, txq->desc, entry->data_size);
+}
+
+static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq,
+                                       enum cudbg_qdesc_qtype type,
+                                       struct cudbg_qdesc_entry *entry)
+{
+       entry->qtype = type;
+       entry->qid = rxq->cntxt_id;
+       entry->desc_size = rxq->iqe_len;
+       entry->num_desc = rxq->size;
+       entry->data_size = rxq->size * rxq->iqe_len;
+       memcpy(entry->data, rxq->desc, entry->data_size);
+}
+
+static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq,
+                                       enum cudbg_qdesc_qtype type,
+                                       struct cudbg_qdesc_entry *entry)
+{
+       entry->qtype = type;
+       entry->qid = flq->cntxt_id;
+       entry->desc_size = sizeof(__be64);
+       entry->num_desc = flq->size;
+       entry->data_size = flq->size * sizeof(__be64);
+       memcpy(entry->data, flq->desc, entry->data_size);
+}
+
+static inline
+struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e)
+{
+       return (struct cudbg_qdesc_entry *)
+              ((u8 *)e + sizeof(*e) + e->data_size);
+}
 #endif /* __CUDBG_LIB_H__ */
index 76d1674..298701e 100644 (file)
@@ -532,6 +532,13 @@ enum {
        MIN_FL_ENTRIES       = 16
 };
 
+enum {
+       MAX_TXQ_DESC_SIZE      = 64,
+       MAX_RXQ_DESC_SIZE      = 128,
+       MAX_FL_DESC_SIZE       = 8,
+       MAX_CTRL_TXQ_DESC_SIZE = 64,
+};
+
 enum {
        INGQ_EXTRAS = 2,        /* firmware event queue and */
                                /*   forwarded interrupts */
index 5f01c0a..972f0a1 100644 (file)
@@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
 
 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
        { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
+       { CUDBG_QDESC, cudbg_collect_qdesc },
        { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
        { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
        { CUDBG_CIM_LA, cudbg_collect_cim_la },
@@ -311,6 +312,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
                }
                len = cudbg_mbytes_to_bytes(len);
                break;
+       case CUDBG_QDESC:
+               cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
+               break;
        default:
                break;
        }