{
struct enic *enic = vnic_dev_priv(wq->vdev);
- enic->wq_stats[wq->index].cq_work++;
- enic->wq_stats[wq->index].cq_bytes += buf->len;
+ enic->wq[wq->index].stats.cq_work++;
+ enic->wq[wq->index].stats.cq_bytes += buf->len;
enic_free_wq_buf(wq, buf);
}
{
struct enic *enic = vnic_dev_priv(vdev);
- spin_lock(&enic->wq_lock[q_number]);
+ spin_lock(&enic->wq[q_number].lock);
- vnic_wq_service(&enic->wq[q_number], cq_desc,
+ vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
completed_index, enic_wq_free_buf,
opaque);
if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
- vnic_wq_desc_avail(&enic->wq[q_number]) >=
+ vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
netif_wake_subqueue(enic->netdev, q_number);
- enic->wq_stats[q_number].wake++;
+ enic->wq[q_number].stats.wake++;
}
- spin_unlock(&enic->wq_lock[q_number]);
+ spin_unlock(&enic->wq[q_number].lock);
return 0;
}
bool err = false;
for (i = 0; i < enic->wq_count; i++) {
- error_status = vnic_wq_error_status(&enic->wq[i]);
+ error_status = vnic_wq_error_status(&enic->wq[i].vwq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
}
for (i = 0; i < enic->rq_count; i++) {
- error_status = vnic_rq_error_status(&enic->rq[i]);
+ error_status = vnic_rq_error_status(&enic->rq[i].vrq);
err |= error_status;
if (error_status)
netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
/* The enic_queue_wq_desc() above does not do HW checksum */
- enic->wq_stats[wq->index].csum_none++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_none++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].csum_partial++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.csum_partial++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
if (skb->encapsulation) {
hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
- enic->wq_stats[wq->index].encap_tso++;
+ enic->wq[wq->index].stats.encap_tso++;
} else {
hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
- enic->wq_stats[wq->index].tso++;
+ enic->wq[wq->index].stats.tso++;
}
/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
pkts = len / mss;
if ((len % mss) > 0)
pkts++;
- enic->wq_stats[wq->index].packets += pkts;
- enic->wq_stats[wq->index].bytes += (len + (pkts * hdr_len));
+ enic->wq[wq->index].stats.packets += pkts;
+ enic->wq[wq->index].stats.bytes += (len + (pkts * hdr_len));
return 0;
}
if (!eop)
err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
- enic->wq_stats[wq->index].encap_csum++;
- enic->wq_stats[wq->index].packets++;
- enic->wq_stats[wq->index].bytes += skb->len;
+ enic->wq[wq->index].stats.encap_csum++;
+ enic->wq[wq->index].stats.packets++;
+ enic->wq[wq->index].stats.bytes += skb->len;
return err;
}
/* VLAN tag from trunking driver */
vlan_tag_insert = 1;
vlan_tag = skb_vlan_tag_get(skb);
- enic->wq_stats[wq->index].add_vlan++;
+ enic->wq[wq->index].stats.add_vlan++;
} else if (enic->loop_enable) {
vlan_tag = enic->loop_tag;
loopback = 1;
struct netdev_queue *txq;
txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
- wq = &enic->wq[txq_map];
+ wq = &enic->wq[txq_map].vwq;
if (skb->len <= 0) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].null_pkt++;
+ enic->wq[wq->index].stats.null_pkt++;
return NETDEV_TX_OK;
}
skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
skb_linearize(skb)) {
dev_kfree_skb_any(skb);
- enic->wq_stats[wq->index].skb_linear_fail++;
+ enic->wq[wq->index].stats.skb_linear_fail++;
return NETDEV_TX_OK;
}
- spin_lock(&enic->wq_lock[txq_map]);
+ spin_lock(&enic->wq[txq_map].lock);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock(&enic->wq_lock[txq_map]);
- enic->wq_stats[wq->index].desc_full_awake++;
+ spin_unlock(&enic->wq[txq_map].lock);
+ enic->wq[wq->index].stats.desc_full_awake++;
return NETDEV_TX_BUSY;
}
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(txq);
- enic->wq_stats[wq->index].stopped++;
+ enic->wq[wq->index].stats.stopped++;
}
skb_tx_timestamp(skb);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
- spin_unlock(&enic->wq_lock[txq_map]);
+ spin_unlock(&enic->wq[txq_map].lock);
return NETDEV_TX_OK;
}
net_stats->multicast = stats->rx.rx_multicast_frames_ok;
for (i = 0; i < ENIC_RQ_MAX; i++) {
- struct enic_rq_stats *rqs = &enic->rq_stats[i];
+ struct enic_rq_stats *rqs = &enic->rq[i].stats;
- if (!enic->rq->ctrl)
+ if (!enic->rq[i].vrq.ctrl)
break;
pkt_truncated += rqs->pkt_truncated;
bad_fcs += rqs->bad_fcs;
}
skb = netdev_alloc_skb_ip_align(netdev, len);
if (!skb) {
- enic->rq_stats[rq->index].no_skb++;
+ enic->rq[rq->index].stats.no_skb++;
return -ENOMEM;
}
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
- struct enic_rq_stats *rqstats = &enic->rq_stats[rq->index];
+ struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
{
struct enic *enic = vnic_dev_priv(vdev);
- vnic_rq_service(&enic->rq[q_number], cq_desc,
+ vnic_rq_service(&enic->rq[q_number].vrq, cq_desc,
completed_index, VNIC_RQ_RETURN_DESC,
enic_rq_indicate_buf, opaque);
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[0]);
+ enic_calc_int_moderation(enic, &enic->rq[0].vrq);
if ((rq_work_done < budget) && napi_complete_done(napi, rq_work_done)) {
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[0]);
+ enic_set_int_moderation(enic, &enic->rq[0].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[0].napi_complete++;
+ enic->rq[0].stats.napi_complete++;
} else {
- enic->rq_stats[0].napi_repoll++;
+ enic->rq[0].stats.napi_repoll++;
}
return rq_work_done;
struct net_device *netdev = napi->dev;
struct enic *enic = netdev_priv(netdev);
unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
- struct vnic_wq *wq = &enic->wq[wq_index];
+ struct vnic_wq *wq = &enic->wq[wq_index].vwq;
unsigned int cq;
unsigned int intr;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq].vrq, enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
/* Call the function which refreshes the intr coalescing timer
* value based on the traffic.
*/
- enic_calc_int_moderation(enic, &enic->rq[rq]);
+ enic_calc_int_moderation(enic, &enic->rq[rq].vrq);
if ((work_done < budget) && napi_complete_done(napi, work_done)) {
*/
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
- enic_set_int_moderation(enic, &enic->rq[rq]);
+ enic_set_int_moderation(enic, &enic->rq[rq].vrq);
vnic_intr_unmask(&enic->intr[intr]);
- enic->rq_stats[rq].napi_complete++;
+ enic->rq[rq].stats.napi_complete++;
} else {
- enic->rq_stats[rq].napi_repoll++;
+ enic->rq[rq].stats.napi_repoll++;
}
return work_done;
for (i = 0; i < enic->rq_count; i++) {
/* enable rq before updating rq desc */
- vnic_rq_enable(&enic->rq[i]);
- vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
+ vnic_rq_enable(&enic->rq[i].vrq);
+ vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
+ if (vnic_rq_desc_used(&enic->rq[i].vrq) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
goto err_out_free_rq;
}
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_enable(&enic->wq[i]);
+ vnic_wq_enable(&enic->wq[i].vwq);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- ret = vnic_rq_disable(&enic->rq[i]);
+ ret = vnic_rq_disable(&enic->rq[i].vrq);
if (!ret)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
}
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
- err = vnic_wq_disable(&enic->wq[i]);
+ err = vnic_wq_disable(&enic->wq[i].vwq);
if (err)
return err;
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
+ err = vnic_rq_disable(&enic->rq[i].vrq);
if (err)
return err;
}
enic_free_intr(enic);
for (i = 0; i < enic->wq_count; i++)
- vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+ vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
for (i = 0; i < enic->intr_count; i++)
struct netdev_queue_stats_rx *rxs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_rq_stats *rqstats = &enic->rq_stats[idx];
+ struct enic_rq_stats *rqstats = &enic->rq[idx].stats;
rxs->bytes = rqstats->bytes;
rxs->packets = rqstats->packets;
struct netdev_queue_stats_tx *txs)
{
struct enic *enic = netdev_priv(dev);
- struct enic_wq_stats *wqstats = &enic->wq_stats[idx];
+ struct enic_wq_stats *wqstats = &enic->wq[idx].stats;
txs->bytes = wqstats->bytes;
txs->packets = wqstats->packets;
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
for (i = 0; i < enic->wq_count; i++)
- spin_lock_init(&enic->wq_lock[i]);
+ spin_lock_init(&enic->wq[i].lock);
/* Register net device
*/