1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
7 #include <linux/vmalloc.h>
9 /* ENETC overhead: optional extension BD + 1 BD gap */
10 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
11 /* max # of chained Tx BDs is 15, including head and extension BD */
12 #define ENETC_MAX_SKB_FRAGS 13
13 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
20 struct enetc_ndev_priv *priv = netdev_priv(ndev);
21 struct enetc_bdr *tx_ring;
24 tx_ring = priv->tx_ring[skb->queue_mapping];
26 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27 if (unlikely(skb_linearize(skb)))
30 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32 netif_stop_subqueue(ndev, tx_ring->index);
33 return NETDEV_TX_BUSY;
36 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
40 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
41 netif_stop_subqueue(ndev, tx_ring->index);
46 dev_kfree_skb_any(skb);
50 static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
52 int l3_start, l3_hsize;
53 u16 l3_flags, l4_flags;
55 if (skb->ip_summed != CHECKSUM_PARTIAL)
58 switch (skb->csum_offset) {
59 case offsetof(struct tcphdr, check):
60 l4_flags = ENETC_TXBD_L4_TCP;
62 case offsetof(struct udphdr, check):
63 l4_flags = ENETC_TXBD_L4_UDP;
66 skb_checksum_help(skb);
70 l3_start = skb_network_offset(skb);
71 l3_hsize = skb_network_header_len(skb);
74 if (skb->protocol == htons(ETH_P_IPV6))
75 l3_flags = ENETC_TXBD_L3_IPV6;
78 txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
79 txbd->l4_csoff = l4_flags;
84 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
85 struct enetc_tx_swbd *tx_swbd)
87 if (tx_swbd->is_dma_page)
88 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
89 tx_swbd->len, DMA_TO_DEVICE);
91 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
92 tx_swbd->len, DMA_TO_DEVICE);
96 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
97 struct enetc_tx_swbd *tx_swbd)
100 enetc_unmap_tx_buff(tx_ring, tx_swbd);
103 dev_kfree_skb_any(tx_swbd->skb);
108 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
111 struct enetc_tx_swbd *tx_swbd;
113 int len = skb_headlen(skb);
114 union enetc_tx_bd temp_bd;
115 union enetc_tx_bd *txbd;
116 bool do_vlan, do_tstamp;
122 i = tx_ring->next_to_use;
123 txbd = ENETC_TXBD(*tx_ring, i);
126 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
127 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
130 temp_bd.addr = cpu_to_le64(dma);
131 temp_bd.buf_len = cpu_to_le16(len);
134 tx_swbd = &tx_ring->tx_swbd[i];
137 tx_swbd->is_dma_page = 0;
140 do_vlan = skb_vlan_tag_present(skb);
141 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
142 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
143 tx_swbd->do_tstamp = do_tstamp;
144 tx_swbd->check_wb = tx_swbd->do_tstamp;
146 if (do_vlan || do_tstamp)
147 flags |= ENETC_TXBD_FLAGS_EX;
149 if (enetc_tx_csum(skb, &temp_bd))
150 flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
151 else if (tx_ring->tsd_enable)
152 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
154 /* first BD needs frm_len and offload flags set */
155 temp_bd.frm_len = cpu_to_le16(skb->len);
156 temp_bd.flags = flags;
158 if (flags & ENETC_TXBD_FLAGS_TSE) {
161 temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
162 | (flags << ENETC_TXBD_FLAGS_OFFSET);
163 temp_bd.txstart = cpu_to_le32(temp);
166 if (flags & ENETC_TXBD_FLAGS_EX) {
169 enetc_clear_tx_bd(&temp_bd);
171 /* add extension BD for VLAN and/or timestamping */
176 if (unlikely(i == tx_ring->bd_count)) {
178 tx_swbd = tx_ring->tx_swbd;
179 txbd = ENETC_TXBD(*tx_ring, 0);
184 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
185 temp_bd.ext.tpid = 0; /* < C-TAG */
186 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
190 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
191 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
194 temp_bd.ext.e_flags = e_flags;
198 frag = &skb_shinfo(skb)->frags[0];
199 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
200 len = skb_frag_size(frag);
201 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
203 if (dma_mapping_error(tx_ring->dev, dma))
207 enetc_clear_tx_bd(&temp_bd);
213 if (unlikely(i == tx_ring->bd_count)) {
215 tx_swbd = tx_ring->tx_swbd;
216 txbd = ENETC_TXBD(*tx_ring, 0);
220 temp_bd.addr = cpu_to_le64(dma);
221 temp_bd.buf_len = cpu_to_le16(len);
225 tx_swbd->is_dma_page = 1;
229 /* last BD needs 'F' bit set */
230 flags |= ENETC_TXBD_FLAGS_F;
231 temp_bd.flags = flags;
234 tx_ring->tx_swbd[i].skb = skb;
236 enetc_bdr_idx_inc(tx_ring, &i);
237 tx_ring->next_to_use = i;
239 skb_tx_timestamp(skb);
241 /* let H/W know BD ring has been updated */
242 enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
247 dev_err(tx_ring->dev, "DMA map error");
250 tx_swbd = &tx_ring->tx_swbd[i];
251 enetc_free_tx_skb(tx_ring, tx_swbd);
253 i = tx_ring->bd_count;
260 static irqreturn_t enetc_msix(int irq, void *data)
262 struct enetc_int_vector *v = data;
265 /* disable interrupts */
266 enetc_wr_reg(v->rbier, 0);
267 enetc_wr_reg(v->ricr1, v->rx_ictt);
269 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
270 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
272 napi_schedule(&v->napi);
277 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
278 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
279 struct napi_struct *napi, int work_limit);
281 static void enetc_rx_dim_work(struct work_struct *w)
283 struct dim *dim = container_of(w, struct dim, work);
284 struct dim_cq_moder moder =
285 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
286 struct enetc_int_vector *v =
287 container_of(dim, struct enetc_int_vector, rx_dim);
289 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
290 dim->state = DIM_START_MEASURE;
293 static void enetc_rx_net_dim(struct enetc_int_vector *v)
295 struct dim_sample dim_sample;
299 if (!v->rx_napi_work)
302 dim_update_sample(v->comp_cnt,
303 v->rx_ring.stats.packets,
304 v->rx_ring.stats.bytes,
306 net_dim(&v->rx_dim, dim_sample);
309 static int enetc_poll(struct napi_struct *napi, int budget)
311 struct enetc_int_vector
312 *v = container_of(napi, struct enetc_int_vector, napi);
313 bool complete = true;
317 for (i = 0; i < v->count_tx_rings; i++)
318 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
321 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
322 if (work_done == budget)
325 v->rx_napi_work = true;
330 napi_complete_done(napi, work_done);
332 if (likely(v->rx_dim_en))
335 v->rx_napi_work = false;
337 /* enable interrupts */
338 enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
340 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
341 enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
347 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
349 int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
351 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
354 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
357 u32 lo, hi, tstamp_lo;
359 lo = enetc_rd(hw, ENETC_SICTR0);
360 hi = enetc_rd(hw, ENETC_SICTR1);
361 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
364 *tstamp = (u64)hi << 32 | tstamp_lo;
367 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
369 struct skb_shared_hwtstamps shhwtstamps;
371 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
372 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
373 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
374 skb_tstamp_tx(skb, &shhwtstamps);
378 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
380 struct net_device *ndev = tx_ring->ndev;
381 int tx_frm_cnt = 0, tx_byte_cnt = 0;
382 struct enetc_tx_swbd *tx_swbd;
387 i = tx_ring->next_to_clean;
388 tx_swbd = &tx_ring->tx_swbd[i];
389 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
393 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
394 bool is_eof = !!tx_swbd->skb;
396 if (unlikely(tx_swbd->check_wb)) {
397 struct enetc_ndev_priv *priv = netdev_priv(ndev);
398 union enetc_tx_bd *txbd;
400 txbd = ENETC_TXBD(*tx_ring, i);
402 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
403 tx_swbd->do_tstamp) {
404 enetc_get_tx_tstamp(&priv->si->hw, txbd,
410 if (likely(tx_swbd->dma))
411 enetc_unmap_tx_buff(tx_ring, tx_swbd);
414 if (unlikely(do_tstamp)) {
415 enetc_tstamp_tx(tx_swbd->skb, tstamp);
418 napi_consume_skb(tx_swbd->skb, napi_budget);
422 tx_byte_cnt += tx_swbd->len;
427 if (unlikely(i == tx_ring->bd_count)) {
429 tx_swbd = tx_ring->tx_swbd;
432 /* BD iteration loop end */
435 /* re-arm interrupt source */
436 enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
437 BIT(16 + tx_ring->index));
440 if (unlikely(!bds_to_clean))
441 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
444 tx_ring->next_to_clean = i;
445 tx_ring->stats.packets += tx_frm_cnt;
446 tx_ring->stats.bytes += tx_byte_cnt;
448 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
449 __netif_subqueue_stopped(ndev, tx_ring->index) &&
450 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
451 netif_wake_subqueue(ndev, tx_ring->index);
454 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
457 static bool enetc_new_page(struct enetc_bdr *rx_ring,
458 struct enetc_rx_swbd *rx_swbd)
463 page = dev_alloc_page();
467 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
468 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
475 rx_swbd->page = page;
476 rx_swbd->page_offset = ENETC_RXB_PAD;
481 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
483 struct enetc_rx_swbd *rx_swbd;
484 union enetc_rx_bd *rxbd;
487 i = rx_ring->next_to_use;
488 rx_swbd = &rx_ring->rx_swbd[i];
489 rxbd = enetc_rxbd(rx_ring, i);
491 for (j = 0; j < buff_cnt; j++) {
493 if (unlikely(!rx_swbd->page)) {
494 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
495 rx_ring->stats.rx_alloc_errs++;
501 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
502 rx_swbd->page_offset);
503 /* clear 'R" as well */
506 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
509 if (unlikely(i == rx_ring->bd_count)) {
511 rx_swbd = rx_ring->rx_swbd;
516 rx_ring->next_to_alloc = i; /* keep track from page reuse */
517 rx_ring->next_to_use = i;
518 /* update ENETC's consumer index */
519 enetc_wr_reg(rx_ring->rcir, i);
525 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
526 static void enetc_get_rx_tstamp(struct net_device *ndev,
527 union enetc_rx_bd *rxbd,
530 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
531 struct enetc_ndev_priv *priv = netdev_priv(ndev);
532 struct enetc_hw *hw = &priv->si->hw;
533 u32 lo, hi, tstamp_lo;
536 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
537 lo = enetc_rd(hw, ENETC_SICTR0);
538 hi = enetc_rd(hw, ENETC_SICTR1);
539 rxbd = enetc_rxbd_ext(rxbd);
540 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
544 tstamp = (u64)hi << 32 | tstamp_lo;
545 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
546 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
551 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
552 union enetc_rx_bd *rxbd, struct sk_buff *skb)
554 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
555 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
558 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
559 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
561 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
562 skb->ip_summed = CHECKSUM_COMPLETE;
565 /* copy VLAN to skb, if one is extracted, for now we assume it's a
566 * standard TPID, but HW also supports custom values
568 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
569 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
570 le16_to_cpu(rxbd->r.vlan_opt));
571 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
572 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
573 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
577 static void enetc_process_skb(struct enetc_bdr *rx_ring,
580 skb_record_rx_queue(skb, rx_ring->index);
581 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
584 static bool enetc_page_reusable(struct page *page)
586 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
589 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
590 struct enetc_rx_swbd *old)
592 struct enetc_rx_swbd *new;
594 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
596 /* next buf that may reuse a page */
597 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
599 /* copy page reference */
603 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
606 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
608 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
609 rx_swbd->page_offset,
610 size, DMA_FROM_DEVICE);
614 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
615 struct enetc_rx_swbd *rx_swbd)
617 if (likely(enetc_page_reusable(rx_swbd->page))) {
618 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
619 page_ref_inc(rx_swbd->page);
621 enetc_reuse_page(rx_ring, rx_swbd);
623 /* sync for use by the device */
624 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
625 rx_swbd->page_offset,
629 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
630 PAGE_SIZE, DMA_FROM_DEVICE);
633 rx_swbd->page = NULL;
636 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
639 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
643 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
644 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
645 if (unlikely(!skb)) {
646 rx_ring->stats.rx_alloc_errs++;
650 skb_reserve(skb, ENETC_RXB_PAD);
651 __skb_put(skb, size);
653 enetc_put_rx_buff(rx_ring, rx_swbd);
658 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
659 u16 size, struct sk_buff *skb)
661 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
663 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
664 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
666 enetc_put_rx_buff(rx_ring, rx_swbd);
669 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
671 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
672 struct napi_struct *napi, int work_limit)
674 int rx_frm_cnt = 0, rx_byte_cnt = 0;
677 cleaned_cnt = enetc_bd_unused(rx_ring);
678 /* next descriptor to process */
679 i = rx_ring->next_to_clean;
681 while (likely(rx_frm_cnt < work_limit)) {
682 union enetc_rx_bd *rxbd;
687 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
688 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
690 cleaned_cnt -= count;
693 rxbd = enetc_rxbd(rx_ring, i);
694 bd_status = le32_to_cpu(rxbd->r.lstatus);
698 enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
699 dma_rmb(); /* for reading other rxbd fields */
700 size = le16_to_cpu(rxbd->r.buf_len);
701 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
705 enetc_get_offloads(rx_ring, rxbd, skb);
709 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
710 if (unlikely(++i == rx_ring->bd_count))
713 if (unlikely(bd_status &
714 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
716 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
718 bd_status = le32_to_cpu(rxbd->r.lstatus);
720 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
721 if (unlikely(++i == rx_ring->bd_count))
725 rx_ring->ndev->stats.rx_dropped++;
726 rx_ring->ndev->stats.rx_errors++;
731 /* not last BD in frame? */
732 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
733 bd_status = le32_to_cpu(rxbd->r.lstatus);
734 size = ENETC_RXB_DMA_SIZE;
736 if (bd_status & ENETC_RXBD_LSTATUS_F) {
738 size = le16_to_cpu(rxbd->r.buf_len);
741 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
745 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
746 if (unlikely(++i == rx_ring->bd_count))
750 rx_byte_cnt += skb->len;
752 enetc_process_skb(rx_ring, skb);
754 napi_gro_receive(napi, skb);
759 rx_ring->next_to_clean = i;
761 rx_ring->stats.packets += rx_frm_cnt;
762 rx_ring->stats.bytes += rx_byte_cnt;
767 /* Probing and Init */
768 #define ENETC_MAX_RFS_SIZE 64
769 void enetc_get_si_caps(struct enetc_si *si)
771 struct enetc_hw *hw = &si->hw;
774 /* find out how many of various resources we have to work with */
775 val = enetc_rd(hw, ENETC_SICAPR0);
776 si->num_rx_rings = (val >> 16) & 0xff;
777 si->num_tx_rings = val & 0xff;
779 val = enetc_rd(hw, ENETC_SIRFSCAPR);
780 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
781 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
784 val = enetc_rd(hw, ENETC_SIPCAPR0);
785 if (val & ENETC_SIPCAPR0_RSS) {
788 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
789 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
792 if (val & ENETC_SIPCAPR0_QBV)
793 si->hw_features |= ENETC_SI_F_QBV;
795 if (val & ENETC_SIPCAPR0_PSFP)
796 si->hw_features |= ENETC_SI_F_PSFP;
799 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
801 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
802 &r->bd_dma_base, GFP_KERNEL);
806 /* h/w requires 128B alignment */
807 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
808 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
816 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
820 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
824 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
830 txr->next_to_clean = 0;
831 txr->next_to_use = 0;
836 static void enetc_free_txbdr(struct enetc_bdr *txr)
840 for (i = 0; i < txr->bd_count; i++)
841 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
843 size = txr->bd_count * sizeof(union enetc_tx_bd);
845 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
852 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
856 for (i = 0; i < priv->num_tx_rings; i++) {
857 err = enetc_alloc_txbdr(priv->tx_ring[i]);
867 enetc_free_txbdr(priv->tx_ring[i]);
872 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
876 for (i = 0; i < priv->num_tx_rings; i++)
877 enetc_free_txbdr(priv->tx_ring[i]);
880 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
882 size_t size = sizeof(union enetc_rx_bd);
885 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
892 err = enetc_dma_alloc_bdr(rxr, size);
898 rxr->next_to_clean = 0;
899 rxr->next_to_use = 0;
900 rxr->next_to_alloc = 0;
901 rxr->ext_en = extended;
906 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
910 size = rxr->bd_count * sizeof(union enetc_rx_bd);
912 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
919 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
921 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
924 for (i = 0; i < priv->num_rx_rings; i++) {
925 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
935 enetc_free_rxbdr(priv->rx_ring[i]);
940 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
944 for (i = 0; i < priv->num_rx_rings; i++)
945 enetc_free_rxbdr(priv->rx_ring[i]);
948 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
952 if (!tx_ring->tx_swbd)
955 for (i = 0; i < tx_ring->bd_count; i++) {
956 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
958 enetc_free_tx_skb(tx_ring, tx_swbd);
961 tx_ring->next_to_clean = 0;
962 tx_ring->next_to_use = 0;
965 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
969 if (!rx_ring->rx_swbd)
972 for (i = 0; i < rx_ring->bd_count; i++) {
973 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
978 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
979 PAGE_SIZE, DMA_FROM_DEVICE);
980 __free_page(rx_swbd->page);
981 rx_swbd->page = NULL;
984 rx_ring->next_to_clean = 0;
985 rx_ring->next_to_use = 0;
986 rx_ring->next_to_alloc = 0;
989 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
993 for (i = 0; i < priv->num_rx_rings; i++)
994 enetc_free_rx_ring(priv->rx_ring[i]);
996 for (i = 0; i < priv->num_tx_rings; i++)
997 enetc_free_tx_ring(priv->tx_ring[i]);
1000 static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1002 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1004 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
1009 /* h/w requires 128B alignment */
1010 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
1011 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1015 cbdr->next_to_clean = 0;
1016 cbdr->next_to_use = 0;
1021 static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1023 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1025 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1026 cbdr->bd_base = NULL;
1029 static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1031 /* set CBDR cache attributes */
1032 enetc_wr(hw, ENETC_SICAR2,
1033 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1035 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
1036 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
1037 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1039 enetc_wr(hw, ENETC_SICBDRPIR, 0);
1040 enetc_wr(hw, ENETC_SICBDRCIR, 0);
1043 enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1045 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1046 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1049 static void enetc_clear_cbdr(struct enetc_hw *hw)
1051 enetc_wr(hw, ENETC_SICBDRMR, 0);
1054 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1059 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1063 /* Set up RSS table defaults */
1064 for (i = 0; i < si->num_rss; i++)
1065 rss_table[i] = i % num_groups;
1067 enetc_set_rss_table(si, rss_table, si->num_rss);
1074 static int enetc_configure_si(struct enetc_ndev_priv *priv)
1076 struct enetc_si *si = priv->si;
1077 struct enetc_hw *hw = &si->hw;
1080 enetc_setup_cbdr(hw, &si->cbd_ring);
1081 /* set SI cache attributes */
1082 enetc_wr(hw, ENETC_SICAR0,
1083 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1084 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1086 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1089 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1097 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1099 struct enetc_si *si = priv->si;
1100 int cpus = num_online_cpus();
1102 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1103 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1105 /* Enable all available TX rings in order to configure as many
1106 * priorities as possible, when needed.
1107 * TODO: Make # of TX rings run-time configurable
1109 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1110 priv->num_tx_rings = si->num_tx_rings;
1111 priv->bdr_int_num = cpus;
1112 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1113 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1116 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1119 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1121 struct enetc_si *si = priv->si;
1124 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1128 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1130 if (!priv->cls_rules) {
1135 err = enetc_configure_si(priv);
1142 kfree(priv->cls_rules);
1144 enetc_clear_cbdr(&si->hw);
1145 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1150 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1152 struct enetc_si *si = priv->si;
1154 enetc_clear_cbdr(&si->hw);
1155 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1157 kfree(priv->cls_rules);
1160 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1162 int idx = tx_ring->index;
1165 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1166 lower_32_bits(tx_ring->bd_dma_base));
1168 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1169 upper_32_bits(tx_ring->bd_dma_base));
1171 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1172 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1173 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1175 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1176 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1177 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1179 /* enable Tx ints by setting pkt thr to 1 */
1180 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1182 tbmr = ENETC_TBMR_EN;
1183 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1184 tbmr |= ENETC_TBMR_VIH;
1187 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1189 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1190 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1191 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1194 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1196 int idx = rx_ring->index;
1199 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1200 lower_32_bits(rx_ring->bd_dma_base));
1202 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1203 upper_32_bits(rx_ring->bd_dma_base));
1205 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1206 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1207 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1209 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1211 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1213 /* enable Rx ints by setting pkt thr to 1 */
1214 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1216 rbmr = ENETC_RBMR_EN;
1218 if (rx_ring->ext_en)
1219 rbmr |= ENETC_RBMR_BDS;
1221 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1222 rbmr |= ENETC_RBMR_VTE;
1224 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1225 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1227 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1230 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1233 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1237 for (i = 0; i < priv->num_tx_rings; i++)
1238 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1240 for (i = 0; i < priv->num_rx_rings; i++)
1241 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1244 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1246 int idx = rx_ring->index;
1248 /* disable EN bit on ring */
1249 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1252 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1254 int delay = 8, timeout = 100;
1255 int idx = tx_ring->index;
1257 /* disable EN bit on ring */
1258 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1260 /* wait for busy to clear */
1261 while (delay < timeout &&
1262 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1267 if (delay >= timeout)
1268 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1272 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1276 for (i = 0; i < priv->num_tx_rings; i++)
1277 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1279 for (i = 0; i < priv->num_rx_rings; i++)
1280 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1285 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1287 struct pci_dev *pdev = priv->si->pdev;
1291 for (i = 0; i < priv->bdr_int_num; i++) {
1292 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1293 struct enetc_int_vector *v = priv->int_vector[i];
1294 int entry = ENETC_BDR_INT_BASE_IDX + i;
1295 struct enetc_hw *hw = &priv->si->hw;
1297 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1298 priv->ndev->name, i);
1299 err = request_irq(irq, enetc_msix, 0, v->name, v);
1301 dev_err(priv->dev, "request_irq() failed!\n");
1306 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1307 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1308 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1310 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1312 for (j = 0; j < v->count_tx_rings; j++) {
1313 int idx = v->tx_ring[j].index;
1315 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1317 cpumask_clear(&cpu_mask);
1318 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1319 irq_set_affinity_hint(irq, &cpu_mask);
1326 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1328 irq_set_affinity_hint(irq, NULL);
1329 free_irq(irq, priv->int_vector[i]);
1335 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1337 struct pci_dev *pdev = priv->si->pdev;
1340 for (i = 0; i < priv->bdr_int_num; i++) {
1341 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1343 irq_set_affinity_hint(irq, NULL);
1344 free_irq(irq, priv->int_vector[i]);
1348 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1350 struct enetc_hw *hw = &priv->si->hw;
1354 /* enable Tx & Rx event indication */
1356 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1357 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1358 /* init to non-0 minimum, will be adjusted later */
1361 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1365 for (i = 0; i < priv->num_rx_rings; i++) {
1366 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1367 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1368 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1371 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1372 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1374 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1376 for (i = 0; i < priv->num_tx_rings; i++) {
1377 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1378 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1379 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1383 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1387 for (i = 0; i < priv->num_tx_rings; i++)
1388 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1390 for (i = 0; i < priv->num_rx_rings; i++)
1391 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1394 static int enetc_phylink_connect(struct net_device *ndev)
1396 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1397 struct ethtool_eee edata;
1401 return 0; /* phy-less mode */
1403 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1405 dev_err(&ndev->dev, "could not attach to PHY\n");
1409 /* disable EEE autoneg, until ENETC driver supports it */
1410 memset(&edata, 0, sizeof(struct ethtool_eee));
1411 phylink_ethtool_set_eee(priv->phylink, &edata);
1416 void enetc_start(struct net_device *ndev)
1418 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1421 enetc_setup_interrupts(priv);
1423 for (i = 0; i < priv->bdr_int_num; i++) {
1424 int irq = pci_irq_vector(priv->si->pdev,
1425 ENETC_BDR_INT_BASE_IDX + i);
1427 napi_enable(&priv->int_vector[i]->napi);
1432 phylink_start(priv->phylink);
1434 netif_carrier_on(ndev);
1436 netif_tx_start_all_queues(ndev);
1439 int enetc_open(struct net_device *ndev)
1441 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1444 err = enetc_setup_irqs(priv);
1448 err = enetc_phylink_connect(ndev);
1450 goto err_phy_connect;
1452 err = enetc_alloc_tx_resources(priv);
1456 err = enetc_alloc_rx_resources(priv);
1460 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1462 goto err_set_queues;
1464 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1466 goto err_set_queues;
1468 enetc_setup_bdrs(priv);
1474 enetc_free_rx_resources(priv);
1476 enetc_free_tx_resources(priv);
1479 phylink_disconnect_phy(priv->phylink);
1481 enetc_free_irqs(priv);
1486 void enetc_stop(struct net_device *ndev)
1488 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1491 netif_tx_stop_all_queues(ndev);
1493 for (i = 0; i < priv->bdr_int_num; i++) {
1494 int irq = pci_irq_vector(priv->si->pdev,
1495 ENETC_BDR_INT_BASE_IDX + i);
1498 napi_synchronize(&priv->int_vector[i]->napi);
1499 napi_disable(&priv->int_vector[i]->napi);
1503 phylink_stop(priv->phylink);
1505 netif_carrier_off(ndev);
1507 enetc_clear_interrupts(priv);
1510 int enetc_close(struct net_device *ndev)
1512 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1515 enetc_clear_bdrs(priv);
1518 phylink_disconnect_phy(priv->phylink);
1519 enetc_free_rxtx_rings(priv);
1520 enetc_free_rx_resources(priv);
1521 enetc_free_tx_resources(priv);
1522 enetc_free_irqs(priv);
1527 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1529 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1530 struct tc_mqprio_qopt *mqprio = type_data;
1531 struct enetc_bdr *tx_ring;
1535 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1536 num_tc = mqprio->num_tc;
1539 netdev_reset_tc(ndev);
1540 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1542 /* Reset all ring priorities to 0 */
1543 for (i = 0; i < priv->num_tx_rings; i++) {
1544 tx_ring = priv->tx_ring[i];
1545 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1551 /* Check if we have enough BD rings available to accommodate all TCs */
1552 if (num_tc > priv->num_tx_rings) {
1553 netdev_err(ndev, "Max %d traffic classes supported\n",
1554 priv->num_tx_rings);
1558 /* For the moment, we use only one BD ring per TC.
1560 * Configure num_tc BD rings with increasing priorities.
1562 for (i = 0; i < num_tc; i++) {
1563 tx_ring = priv->tx_ring[i];
1564 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1567 /* Reset the number of netdev queues based on the TC count */
1568 netif_set_real_num_tx_queues(ndev, num_tc);
1570 netdev_set_num_tc(ndev, num_tc);
1572 /* Each TC is associated with one netdev queue */
1573 for (i = 0; i < num_tc; i++)
1574 netdev_set_tc_queue(ndev, i, 1, i);
1579 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1583 case TC_SETUP_QDISC_MQPRIO:
1584 return enetc_setup_tc_mqprio(ndev, type_data);
1585 case TC_SETUP_QDISC_TAPRIO:
1586 return enetc_setup_tc_taprio(ndev, type_data);
1587 case TC_SETUP_QDISC_CBS:
1588 return enetc_setup_tc_cbs(ndev, type_data);
1589 case TC_SETUP_QDISC_ETF:
1590 return enetc_setup_tc_txtime(ndev, type_data);
1591 case TC_SETUP_BLOCK:
1592 return enetc_setup_tc_psfp(ndev, type_data);
1598 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1600 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1601 struct net_device_stats *stats = &ndev->stats;
1602 unsigned long packets = 0, bytes = 0;
1605 for (i = 0; i < priv->num_rx_rings; i++) {
1606 packets += priv->rx_ring[i]->stats.packets;
1607 bytes += priv->rx_ring[i]->stats.bytes;
1610 stats->rx_packets = packets;
1611 stats->rx_bytes = bytes;
1615 for (i = 0; i < priv->num_tx_rings; i++) {
1616 packets += priv->tx_ring[i]->stats.packets;
1617 bytes += priv->tx_ring[i]->stats.bytes;
1620 stats->tx_packets = packets;
1621 stats->tx_bytes = bytes;
1626 static int enetc_set_rss(struct net_device *ndev, int en)
1628 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1629 struct enetc_hw *hw = &priv->si->hw;
1632 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1634 reg = enetc_rd(hw, ENETC_SIMR);
1635 reg &= ~ENETC_SIMR_RSSE;
1636 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1637 enetc_wr(hw, ENETC_SIMR, reg);
1642 static int enetc_set_psfp(struct net_device *ndev, int en)
1644 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1648 err = enetc_psfp_enable(priv);
1652 priv->active_offloads |= ENETC_F_QCI;
1656 err = enetc_psfp_disable(priv);
1660 priv->active_offloads &= ~ENETC_F_QCI;
1665 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1667 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1670 for (i = 0; i < priv->num_rx_rings; i++)
1671 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1674 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1676 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1679 for (i = 0; i < priv->num_tx_rings; i++)
1680 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1683 int enetc_set_features(struct net_device *ndev,
1684 netdev_features_t features)
1686 netdev_features_t changed = ndev->features ^ features;
1689 if (changed & NETIF_F_RXHASH)
1690 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1692 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1693 enetc_enable_rxvlan(ndev,
1694 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1696 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1697 enetc_enable_txvlan(ndev,
1698 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1700 if (changed & NETIF_F_HW_TC)
1701 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1706 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1707 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1709 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1710 struct hwtstamp_config config;
1713 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1716 switch (config.tx_type) {
1717 case HWTSTAMP_TX_OFF:
1718 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1720 case HWTSTAMP_TX_ON:
1721 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1727 ao = priv->active_offloads;
1728 switch (config.rx_filter) {
1729 case HWTSTAMP_FILTER_NONE:
1730 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1733 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1734 config.rx_filter = HWTSTAMP_FILTER_ALL;
1737 if (netif_running(ndev) && ao != priv->active_offloads) {
1742 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1746 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1748 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1749 struct hwtstamp_config config;
1753 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1754 config.tx_type = HWTSTAMP_TX_ON;
1756 config.tx_type = HWTSTAMP_TX_OFF;
1758 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1759 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1761 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1766 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1768 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1769 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1770 if (cmd == SIOCSHWTSTAMP)
1771 return enetc_hwtstamp_set(ndev, rq);
1772 if (cmd == SIOCGHWTSTAMP)
1773 return enetc_hwtstamp_get(ndev, rq);
1779 return phylink_mii_ioctl(priv->phylink, rq, cmd);
1782 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1784 struct pci_dev *pdev = priv->si->pdev;
1786 int i, n, err, nvec;
1788 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1789 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1790 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1798 /* # of tx rings per int vector */
1799 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1801 for (i = 0; i < priv->bdr_int_num; i++) {
1802 struct enetc_int_vector *v;
1803 struct enetc_bdr *bdr;
1806 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1812 priv->int_vector[i] = v;
1814 /* init defaults for adaptive IC */
1815 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1817 v->rx_dim_en = true;
1819 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1820 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1822 v->count_tx_rings = v_tx_rings;
1824 for (j = 0; j < v_tx_rings; j++) {
1827 /* default tx ring mapping policy */
1828 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1829 idx = 2 * j + i; /* 2 CPUs */
1831 idx = j + i * v_tx_rings; /* default */
1833 __set_bit(idx, &v->tx_rings_map);
1834 bdr = &v->tx_ring[j];
1836 bdr->ndev = priv->ndev;
1837 bdr->dev = priv->dev;
1838 bdr->bd_count = priv->tx_bd_count;
1839 priv->tx_ring[idx] = bdr;
1844 bdr->ndev = priv->ndev;
1845 bdr->dev = priv->dev;
1846 bdr->bd_count = priv->rx_bd_count;
1847 priv->rx_ring[i] = bdr;
1854 netif_napi_del(&priv->int_vector[i]->napi);
1855 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1856 kfree(priv->int_vector[i]);
1859 pci_free_irq_vectors(pdev);
1864 void enetc_free_msix(struct enetc_ndev_priv *priv)
1868 for (i = 0; i < priv->bdr_int_num; i++) {
1869 struct enetc_int_vector *v = priv->int_vector[i];
1871 netif_napi_del(&v->napi);
1872 cancel_work_sync(&v->rx_dim.work);
1875 for (i = 0; i < priv->num_rx_rings; i++)
1876 priv->rx_ring[i] = NULL;
1878 for (i = 0; i < priv->num_tx_rings; i++)
1879 priv->tx_ring[i] = NULL;
1881 for (i = 0; i < priv->bdr_int_num; i++) {
1882 kfree(priv->int_vector[i]);
1883 priv->int_vector[i] = NULL;
1886 /* disable all MSIX for this device */
1887 pci_free_irq_vectors(priv->si->pdev);
1890 static void enetc_kfree_si(struct enetc_si *si)
1892 char *p = (char *)si - si->pad;
1897 static void enetc_detect_errata(struct enetc_si *si)
1899 if (si->pdev->revision == ENETC_REV1)
1900 si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1904 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1906 struct enetc_si *si, *p;
1907 struct enetc_hw *hw;
1912 err = pci_enable_device_mem(pdev);
1914 dev_err(&pdev->dev, "device enable failed\n");
1918 /* set up for high or low dma */
1919 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1921 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1924 "DMA configuration failed: 0x%x\n", err);
1929 err = pci_request_mem_regions(pdev, name);
1931 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1932 goto err_pci_mem_reg;
1935 pci_set_master(pdev);
1937 alloc_size = sizeof(struct enetc_si);
1939 /* align priv to 32B */
1940 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1941 alloc_size += sizeof_priv;
1943 /* force 32B alignment for enetc_si */
1944 alloc_size += ENETC_SI_ALIGN - 1;
1946 p = kzalloc(alloc_size, GFP_KERNEL);
1952 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1953 si->pad = (char *)si - (char *)p;
1955 pci_set_drvdata(pdev, si);
1959 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1960 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1963 dev_err(&pdev->dev, "ioremap() failed\n");
1966 if (len > ENETC_PORT_BASE)
1967 hw->port = hw->reg + ENETC_PORT_BASE;
1968 if (len > ENETC_GLOBAL_BASE)
1969 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1971 enetc_detect_errata(si);
1978 pci_release_mem_regions(pdev);
1981 pci_disable_device(pdev);
1986 void enetc_pci_remove(struct pci_dev *pdev)
1988 struct enetc_si *si = pci_get_drvdata(pdev);
1989 struct enetc_hw *hw = &si->hw;
1993 pci_release_mem_regions(pdev);
1994 pci_disable_device(pdev);