cxgb4: add stats for MQPRIO QoS offload Tx path
authorRahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Fri, 22 Nov 2019 01:00:03 +0000 (06:30 +0530)
committerJakub Kicinski <jakub.kicinski@netronome.com>
Sat, 23 Nov 2019 00:44:40 +0000 (16:44 -0800)
Export necessary stats for traffic flowing through MQPRIO QoS offload
Tx path.

v2:
- No change.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/sge.c

index 04cb890..a70ac20 100644 (file)
@@ -850,6 +850,7 @@ struct sge_eohw_txq {
        struct sge_txq q; /* HW Txq */
        struct adapter *adap; /* Backpointer to adapter */
        unsigned long tso; /* # of TSO requests */
+       unsigned long uso; /* # of USO requests */
        unsigned long tx_cso; /* # of Tx checksum offloads */
        unsigned long vlan_ins; /* # of Tx VLAN insertions */
        unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
index fa229d0..93868dc 100644 (file)
@@ -2797,6 +2797,7 @@ do { \
                RL("RxAN", stats.an);
                RL("RxNoMem", stats.nomem);
                TL("TSO:", tso);
+               TL("USO:", uso);
                TL("TxCSO:", tx_cso);
                TL("VLANins:", vlan_ins);
                TL("TxQFull:", q.stops);
index f574574..20ab3b6 100644 (file)
@@ -242,9 +242,10 @@ static void collect_sge_port_stats(const struct adapter *adap,
                                   const struct port_info *p,
                                   struct queue_port_stats *s)
 {
-       int i;
        const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
        const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+       struct sge_eohw_txq *eohw_tx;
+       unsigned int i;
 
        memset(s, 0, sizeof(*s));
        for (i = 0; i < p->nqsets; i++, rx++, tx++) {
@@ -257,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap,
                s->gro_pkts += rx->stats.lro_pkts;
                s->gro_merged += rx->stats.lro_merged;
        }
+
+       if (adap->sge.eohw_txq) {
+               eohw_tx = &adap->sge.eohw_txq[p->first_qset];
+               for (i = 0; i < p->nqsets; i++, eohw_tx++) {
+                       s->tso += eohw_tx->tso;
+                       s->uso += eohw_tx->uso;
+                       s->tx_csum += eohw_tx->tx_cso;
+                       s->vlan_ins += eohw_tx->vlan_ins;
+               }
+       }
 }
 
 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
index 53f9a82..97cda50 100644 (file)
@@ -2262,6 +2262,19 @@ write_wr_headers:
                                d->addr);
        }
 
+       if (skb_shinfo(skb)->gso_size) {
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+                       eohw_txq->uso++;
+               else
+                       eohw_txq->tso++;
+               eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               eohw_txq->tx_cso++;
+       }
+
+       if (skb_vlan_tag_present(skb))
+               eohw_txq->vlan_ins++;
+
        txq_advance(&eohw_txq->q, ndesc);
        cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
        eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@@ -4546,6 +4559,7 @@ int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
        spin_lock_init(&txq->lock);
        txq->adap = adap;
        txq->tso = 0;
+       txq->uso = 0;
        txq->tx_cso = 0;
        txq->vlan_ins = 0;
        txq->mapping_err = 0;