1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
9 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
17 #include <linux/netdevice.h>
20 const struct aq_hw_ops *aq_hw_ops;
21 struct aq_hw_s *aq_hw;
22 struct aq_nic_s *aq_nic;
23 unsigned int tx_rings;
24 unsigned int rx_rings;
25 struct aq_ring_param_s aq_ring_param;
26 struct napi_struct napi;
27 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
30 #define AQ_VEC_TX_ID 0
31 #define AQ_VEC_RX_ID 1
33 static int aq_vec_poll(struct napi_struct *napi, int budget)
35 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
36 unsigned int sw_tail_old = 0U;
37 struct aq_ring_s *ring = NULL;
38 bool was_tx_cleaned = true;
46 for (i = 0U, ring = self->ring[0];
47 self->tx_rings > i; ++i, ring = self->ring[i]) {
48 u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
49 ring[AQ_VEC_RX_ID].stats.rx.polls++;
50 u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
51 if (self->aq_hw_ops->hw_ring_tx_head_update) {
52 err = self->aq_hw_ops->hw_ring_tx_head_update(
59 if (ring[AQ_VEC_TX_ID].sw_head !=
60 ring[AQ_VEC_TX_ID].hw_head) {
61 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
62 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
65 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
70 if (ring[AQ_VEC_RX_ID].sw_head !=
71 ring[AQ_VEC_RX_ID].hw_head) {
72 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
79 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
81 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
85 err = self->aq_hw_ops->hw_ring_rx_fill(
87 &ring[AQ_VEC_RX_ID], sw_tail_old);
97 if (work_done < budget) {
98 napi_complete_done(napi, work_done);
99 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
100 1U << self->aq_ring_param.vec_idx);
107 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
108 struct aq_nic_cfg_s *aq_nic_cfg)
110 struct aq_vec_s *self = NULL;
112 self = kzalloc(sizeof(*self), GFP_KERNEL);
116 self->aq_nic = aq_nic;
117 self->aq_ring_param.vec_idx = idx;
118 self->aq_ring_param.cpu =
119 idx + aq_nic_cfg->aq_rss.base_cpu_number;
121 cpumask_set_cpu(self->aq_ring_param.cpu,
122 &self->aq_ring_param.affinity_mask);
127 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
128 aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
134 int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
135 unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg)
137 struct aq_ring_s *ring = NULL;
141 for (i = 0; i < aq_nic_cfg->tcs; ++i) {
142 const unsigned int idx_ring = AQ_NIC_CFG_TCVEC2RING(aq_nic_cfg,
145 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
146 idx_ring, aq_nic_cfg);
154 aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
156 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
157 idx_ring, aq_nic_cfg);
168 aq_vec_ring_free(self);
175 int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
176 struct aq_hw_s *aq_hw)
178 struct aq_ring_s *ring = NULL;
182 self->aq_hw_ops = aq_hw_ops;
185 for (i = 0U, ring = self->ring[0];
186 self->tx_rings > i; ++i, ring = self->ring[i]) {
187 err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
191 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
193 &self->aq_ring_param);
197 err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
201 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
203 &self->aq_ring_param);
207 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
211 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
212 &ring[AQ_VEC_RX_ID], 0U);
221 int aq_vec_start(struct aq_vec_s *self)
223 struct aq_ring_s *ring = NULL;
227 for (i = 0U, ring = self->ring[0];
228 self->tx_rings > i; ++i, ring = self->ring[i]) {
229 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
230 &ring[AQ_VEC_TX_ID]);
234 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
235 &ring[AQ_VEC_RX_ID]);
240 napi_enable(&self->napi);
246 void aq_vec_stop(struct aq_vec_s *self)
248 struct aq_ring_s *ring = NULL;
251 for (i = 0U, ring = self->ring[0];
252 self->tx_rings > i; ++i, ring = self->ring[i]) {
253 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
254 &ring[AQ_VEC_TX_ID]);
256 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
257 &ring[AQ_VEC_RX_ID]);
260 napi_disable(&self->napi);
263 void aq_vec_deinit(struct aq_vec_s *self)
265 struct aq_ring_s *ring = NULL;
271 for (i = 0U, ring = self->ring[0];
272 self->tx_rings > i; ++i, ring = self->ring[i]) {
273 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
274 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
280 void aq_vec_free(struct aq_vec_s *self)
285 netif_napi_del(&self->napi);
292 void aq_vec_ring_free(struct aq_vec_s *self)
294 struct aq_ring_s *ring = NULL;
300 for (i = 0U, ring = self->ring[0];
301 self->tx_rings > i; ++i, ring = self->ring[i]) {
302 aq_ring_free(&ring[AQ_VEC_TX_ID]);
303 if (i < self->rx_rings)
304 aq_ring_free(&ring[AQ_VEC_RX_ID]);
312 irqreturn_t aq_vec_isr(int irq, void *private)
314 struct aq_vec_s *self = private;
321 napi_schedule(&self->napi);
324 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
327 irqreturn_t aq_vec_isr_legacy(int irq, void *private)
329 struct aq_vec_s *self = private;
335 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
340 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
341 1U << self->aq_ring_param.vec_idx);
342 napi_schedule(&self->napi);
344 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
351 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
353 return &self->aq_ring_param.affinity_mask;
356 bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
358 return tc < self->rx_rings && tc < self->tx_rings;
361 unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
365 if (!aq_vec_is_valid_tc(self, tc))
368 count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
369 count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);