1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
13 #include "aq_hw_utils.h"
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
19 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
21 unsigned int len = PAGE_SIZE << rxpage->order;
23 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
25 /* Drop the ref for being in the ring. */
26 __free_pages(rxpage->page, rxpage->order);
30 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
37 page = dev_alloc_pages(order);
41 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
44 if (unlikely(dma_mapping_error(dev, daddr)))
48 rxpage->daddr = daddr;
49 rxpage->order = order;
55 __free_pages(page, order);
61 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
66 if (rxbuf->rxdata.page) {
67 /* One means ring is the only user and can reuse */
68 if (page_ref_count(rxbuf->rxdata.page) > 1) {
69 /* Try reuse buffer */
70 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
71 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
72 (PAGE_SIZE << order)) {
73 self->stats.rx.pg_flips++;
75 /* Buffer exhausted. We have other users and
76 * should release this page and realloc
78 aq_free_rxpage(&rxbuf->rxdata,
79 aq_nic_get_dev(self->aq_nic));
80 self->stats.rx.pg_losts++;
83 rxbuf->rxdata.pg_off = 0;
84 self->stats.rx.pg_reuses++;
88 if (!rxbuf->rxdata.page) {
89 ret = aq_get_rxpage(&rxbuf->rxdata, order,
90 aq_nic_get_dev(self->aq_nic));
97 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
98 struct aq_nic_s *aq_nic)
103 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
105 if (!self->buff_ring) {
109 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
110 self->size * self->dx_size,
111 &self->dx_ring_pa, GFP_KERNEL);
112 if (!self->dx_ring) {
126 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
127 struct aq_nic_s *aq_nic,
129 struct aq_nic_cfg_s *aq_nic_cfg)
133 self->aq_nic = aq_nic;
135 self->size = aq_nic_cfg->txds;
136 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
138 self = aq_ring_alloc(self, aq_nic);
153 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
154 struct aq_nic_s *aq_nic,
156 struct aq_nic_cfg_s *aq_nic_cfg)
160 self->aq_nic = aq_nic;
162 self->size = aq_nic_cfg->rxds;
163 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
164 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
165 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
167 if (aq_nic_cfg->rxpageorder > self->page_order)
168 self->page_order = aq_nic_cfg->rxpageorder;
170 self = aq_ring_alloc(self, aq_nic);
186 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
187 unsigned int idx, unsigned int size, unsigned int dx_size)
189 struct device *dev = aq_nic_get_dev(aq_nic);
190 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
192 memset(self, 0, sizeof(*self));
194 self->aq_nic = aq_nic;
197 self->dx_size = dx_size;
199 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
201 if (!self->dx_ring) {
209 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
214 self->ring_type = ring_type;
219 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
222 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
225 void aq_ring_update_queue_state(struct aq_ring_s *ring)
227 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
228 aq_ring_queue_stop(ring);
229 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
230 aq_ring_queue_wake(ring);
233 void aq_ring_queue_wake(struct aq_ring_s *ring)
235 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
237 if (__netif_subqueue_stopped(ndev,
238 AQ_NIC_RING2QMAP(ring->aq_nic,
240 netif_wake_subqueue(ndev,
241 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
242 ring->stats.tx.queue_restarts++;
246 void aq_ring_queue_stop(struct aq_ring_s *ring)
248 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
250 if (!__netif_subqueue_stopped(ndev,
251 AQ_NIC_RING2QMAP(ring->aq_nic,
253 netif_stop_subqueue(ndev,
254 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
257 bool aq_ring_tx_clean(struct aq_ring_s *self)
259 struct device *dev = aq_nic_get_dev(self->aq_nic);
262 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
263 budget && self->sw_head != self->hw_head; budget--) {
264 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
266 if (likely(buff->is_mapped)) {
267 if (unlikely(buff->is_sop)) {
269 buff->eop_index != 0xffffU &&
270 (!aq_ring_dx_in_range(self->sw_head,
275 dma_unmap_single(dev, buff->pa, buff->len,
278 dma_unmap_page(dev, buff->pa, buff->len,
283 if (unlikely(buff->is_eop)) {
284 ++self->stats.tx.packets;
285 self->stats.tx.bytes += buff->skb->len;
287 dev_kfree_skb_any(buff->skb);
290 buff->eop_index = 0xffffU;
291 self->sw_head = aq_ring_next_dx(self, self->sw_head);
297 static void aq_rx_checksum(struct aq_ring_s *self,
298 struct aq_ring_buff_s *buff,
301 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
304 if (unlikely(buff->is_cso_err)) {
305 ++self->stats.rx.errors;
306 skb->ip_summed = CHECKSUM_NONE;
309 if (buff->is_ip_cso) {
310 __skb_incr_checksum_unnecessary(skb);
312 skb->ip_summed = CHECKSUM_NONE;
315 if (buff->is_udp_cso || buff->is_tcp_cso)
316 __skb_incr_checksum_unnecessary(skb);
319 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
320 int aq_ring_rx_clean(struct aq_ring_s *self,
321 struct napi_struct *napi,
325 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
326 bool is_rsc_completed = true;
329 for (; (self->sw_head != self->hw_head) && budget;
330 self->sw_head = aq_ring_next_dx(self, self->sw_head),
331 --budget, ++(*work_done)) {
332 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
333 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
334 struct aq_ring_buff_s *buff_ = NULL;
335 struct sk_buff *skb = NULL;
336 unsigned int next_ = 0U;
340 if (buff->is_cleaned)
347 buff_ = &self->buff_ring[next_];
349 aq_ring_dx_in_range(self->sw_head,
353 if (unlikely(!is_rsc_completed))
356 buff->is_error |= buff_->is_error;
357 buff->is_cso_err |= buff_->is_cso_err;
359 } while (!buff_->is_eop);
361 if (!is_rsc_completed) {
365 if (buff->is_error ||
366 (buff->is_lro && buff->is_cso_err)) {
370 buff_ = &self->buff_ring[next_];
372 buff_->is_cleaned = true;
373 } while (!buff_->is_eop);
375 ++self->stats.rx.errors;
380 if (buff->is_error) {
381 ++self->stats.rx.errors;
385 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
388 buff->len, DMA_FROM_DEVICE);
390 /* for single fragment packets use build_skb() */
392 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
393 skb = build_skb(aq_buf_vaddr(&buff->rxdata),
394 AQ_CFG_RX_FRAME_MAX);
395 if (unlikely(!skb)) {
401 aq_ptp_extract_ts(self->aq_nic, skb,
402 aq_buf_vaddr(&buff->rxdata),
404 skb_put(skb, buff->len);
405 page_ref_inc(buff->rxdata.page);
407 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
408 if (unlikely(!skb)) {
414 aq_ptp_extract_ts(self->aq_nic, skb,
415 aq_buf_vaddr(&buff->rxdata),
419 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
420 hdr_len = eth_get_headlen(skb->dev,
421 aq_buf_vaddr(&buff->rxdata),
424 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
425 ALIGN(hdr_len, sizeof(long)));
427 if (buff->len - hdr_len > 0) {
428 skb_add_rx_frag(skb, 0, buff->rxdata.page,
429 buff->rxdata.pg_off + hdr_len,
431 AQ_CFG_RX_FRAME_MAX);
432 page_ref_inc(buff->rxdata.page);
440 buff_ = &self->buff_ring[next_];
442 dma_sync_single_range_for_cpu(
443 aq_nic_get_dev(self->aq_nic),
445 buff_->rxdata.pg_off,
448 skb_add_rx_frag(skb, i++,
450 buff_->rxdata.pg_off,
452 AQ_CFG_RX_FRAME_MAX);
453 page_ref_inc(buff_->rxdata.page);
454 buff_->is_cleaned = 1;
456 buff->is_ip_cso &= buff_->is_ip_cso;
457 buff->is_udp_cso &= buff_->is_udp_cso;
458 buff->is_tcp_cso &= buff_->is_tcp_cso;
459 buff->is_cso_err |= buff_->is_cso_err;
461 } while (!buff_->is_eop);
466 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
469 skb->protocol = eth_type_trans(skb, ndev);
471 aq_rx_checksum(self, buff, skb);
473 skb_set_hash(skb, buff->rss_hash,
474 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
476 /* Send all PTP traffic to 0 queue */
477 skb_record_rx_queue(skb,
479 : AQ_NIC_RING2QMAP(self->aq_nic,
482 ++self->stats.rx.packets;
483 self->stats.rx.bytes += skb->len;
485 napi_gro_receive(napi, skb);
492 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
494 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
495 while (self->sw_head != self->hw_head) {
498 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
500 (self->sw_head * self->dx_size),
502 aq_ptp_tx_hwtstamp(aq_nic, ns);
504 self->sw_head = aq_ring_next_dx(self, self->sw_head);
509 int aq_ring_rx_fill(struct aq_ring_s *self)
511 unsigned int page_order = self->page_order;
512 struct aq_ring_buff_s *buff = NULL;
516 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
520 for (i = aq_ring_avail_dx(self); i--;
521 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
522 buff = &self->buff_ring[self->sw_tail];
525 buff->len = AQ_CFG_RX_FRAME_MAX;
527 err = aq_get_rxpages(self, buff, page_order);
531 buff->pa = aq_buf_daddr(&buff->rxdata);
539 void aq_ring_rx_deinit(struct aq_ring_s *self)
544 for (; self->sw_head != self->sw_tail;
545 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
546 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
548 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
552 void aq_ring_free(struct aq_ring_s *self)
557 kfree(self->buff_ring);
560 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
561 self->size * self->dx_size, self->dx_ring,
565 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
567 unsigned int count = 0U;
569 if (self->ring_type == ATL_RING_RX) {
570 /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
571 data[count] = self->stats.rx.packets;
572 data[++count] = self->stats.rx.jumbo_packets;
573 data[++count] = self->stats.rx.lro_packets;
574 data[++count] = self->stats.rx.errors;
576 /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
577 data[count] = self->stats.tx.packets;
578 data[++count] = self->stats.tx.queue_restarts;