1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2020 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <net/pkt_cls.h>
21 #include "dpaa2-eth.h"
23 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
24 * using trace events only need to #include <trace/events/sched.h>
26 #define CREATE_TRACE_POINTS
27 #include "dpaa2-eth-trace.h"
29 MODULE_LICENSE("Dual BSD/GPL");
30 MODULE_AUTHOR("Freescale Semiconductor, Inc");
31 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
33 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
36 phys_addr_t phys_addr;
38 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
40 return phys_to_virt(phys_addr);
43 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
47 skb_checksum_none_assert(skb);
49 /* HW checksum validation is disabled, nothing to do here */
50 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
53 /* Read checksum validation bits */
54 if (!((fd_status & DPAA2_FAS_L3CV) &&
55 (fd_status & DPAA2_FAS_L4CV)))
58 /* Inform the stack there's no need to compute L3/L4 csum anymore */
59 skb->ip_summed = CHECKSUM_UNNECESSARY;
62 /* Free a received FD.
63 * Not to be used for Tx conf FDs or on any other paths.
65 static void free_rx_fd(struct dpaa2_eth_priv *priv,
66 const struct dpaa2_fd *fd,
69 struct device *dev = priv->net_dev->dev.parent;
70 dma_addr_t addr = dpaa2_fd_get_addr(fd);
71 u8 fd_format = dpaa2_fd_get_format(fd);
72 struct dpaa2_sg_entry *sgt;
76 /* If single buffer frame, just free the data buffer */
77 if (fd_format == dpaa2_fd_single)
79 else if (fd_format != dpaa2_fd_sg)
80 /* We don't support any other format */
83 /* For S/G frames, we first need to free all SG entries
84 * except the first one, which was taken care of already
86 sgt = vaddr + dpaa2_fd_get_offset(fd);
87 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
88 addr = dpaa2_sg_get_addr(&sgt[i]);
89 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
90 dma_unmap_page(dev, addr, priv->rx_buf_size,
93 free_pages((unsigned long)sg_vaddr, 0);
94 if (dpaa2_sg_is_final(&sgt[i]))
99 free_pages((unsigned long)vaddr, 0);
102 /* Build a linear skb based on a single-buffer frame descriptor */
103 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
104 const struct dpaa2_fd *fd,
107 struct sk_buff *skb = NULL;
108 u16 fd_offset = dpaa2_fd_get_offset(fd);
109 u32 fd_length = dpaa2_fd_get_len(fd);
113 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
117 skb_reserve(skb, fd_offset);
118 skb_put(skb, fd_length);
123 /* Build a non linear (fragmented) skb based on a S/G table */
124 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
125 struct dpaa2_eth_channel *ch,
126 struct dpaa2_sg_entry *sgt)
128 struct sk_buff *skb = NULL;
129 struct device *dev = priv->net_dev->dev.parent;
134 struct page *page, *head_page;
138 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
139 struct dpaa2_sg_entry *sge = &sgt[i];
141 /* NOTE: We only support SG entries in dpaa2_sg_single format,
142 * but this is the only format we may receive from HW anyway
145 /* Get the address and length from the S/G entry */
146 sg_addr = dpaa2_sg_get_addr(sge);
147 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
148 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
151 sg_length = dpaa2_sg_get_len(sge);
154 /* We build the skb around the first data buffer */
155 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
156 if (unlikely(!skb)) {
157 /* Free the first SG entry now, since we already
158 * unmapped it and obtained the virtual address
160 free_pages((unsigned long)sg_vaddr, 0);
162 /* We still need to subtract the buffers used
163 * by this FD from our software counter
165 while (!dpaa2_sg_is_final(&sgt[i]) &&
166 i < DPAA2_ETH_MAX_SG_ENTRIES)
171 sg_offset = dpaa2_sg_get_offset(sge);
172 skb_reserve(skb, sg_offset);
173 skb_put(skb, sg_length);
175 /* Rest of the data buffers are stored as skb frags */
176 page = virt_to_page(sg_vaddr);
177 head_page = virt_to_head_page(sg_vaddr);
179 /* Offset in page (which may be compound).
180 * Data in subsequent SG entries is stored from the
181 * beginning of the buffer, so we don't need to add the
184 page_offset = ((unsigned long)sg_vaddr &
186 (page_address(page) - page_address(head_page));
188 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
189 sg_length, priv->rx_buf_size);
192 if (dpaa2_sg_is_final(sge))
196 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
198 /* Count all data buffers + SG table buffer */
199 ch->buf_count -= i + 2;
204 /* Free buffers acquired from the buffer pool or which were meant to
205 * be released in the pool
207 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
209 struct device *dev = priv->net_dev->dev.parent;
213 for (i = 0; i < count; i++) {
214 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
215 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
217 free_pages((unsigned long)vaddr, 0);
221 static void xdp_release_buf(struct dpaa2_eth_priv *priv,
222 struct dpaa2_eth_channel *ch,
228 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
229 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
232 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
234 ch->xdp.drop_cnt)) == -EBUSY) {
235 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
241 free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
242 ch->buf_count -= ch->xdp.drop_cnt;
245 ch->xdp.drop_cnt = 0;
248 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
249 struct dpaa2_eth_fq *fq,
250 struct dpaa2_eth_xdp_fds *xdp_fds)
252 int total_enqueued = 0, retries = 0, enqueued;
253 struct dpaa2_eth_drv_stats *percpu_extras;
254 int num_fds, err, max_retries;
255 struct dpaa2_fd *fds;
257 percpu_extras = this_cpu_ptr(priv->percpu_extras);
259 /* try to enqueue all the FDs until the max number of retries is hit */
261 num_fds = xdp_fds->num;
262 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
263 while (total_enqueued < num_fds && retries < max_retries) {
264 err = priv->enqueue(priv, fq, &fds[total_enqueued],
265 0, num_fds - total_enqueued, &enqueued);
267 percpu_extras->tx_portal_busy += ++retries;
270 total_enqueued += enqueued;
274 return total_enqueued;
277 static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
278 struct dpaa2_eth_channel *ch,
279 struct dpaa2_eth_fq *fq)
281 struct rtnl_link_stats64 *percpu_stats;
282 struct dpaa2_fd *fds;
285 percpu_stats = this_cpu_ptr(priv->percpu_stats);
287 // enqueue the array of XDP_TX frames
288 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
290 /* update statistics */
291 percpu_stats->tx_packets += enqueued;
292 fds = fq->xdp_tx_fds.fds;
293 for (i = 0; i < enqueued; i++) {
294 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
297 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
298 xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
299 percpu_stats->tx_errors++;
300 ch->stats.xdp_tx_err++;
302 fq->xdp_tx_fds.num = 0;
305 static void xdp_enqueue(struct dpaa2_eth_priv *priv,
306 struct dpaa2_eth_channel *ch,
308 void *buf_start, u16 queue_id)
310 struct dpaa2_faead *faead;
311 struct dpaa2_fd *dest_fd;
312 struct dpaa2_eth_fq *fq;
315 /* Mark the egress frame hardware annotation area as valid */
316 frc = dpaa2_fd_get_frc(fd);
317 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
318 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
320 /* Instruct hardware to release the FD buffer directly into
321 * the buffer pool once transmission is completed, instead of
322 * sending a Tx confirmation frame to us
324 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
325 faead = dpaa2_get_faead(buf_start, false);
326 faead->ctrl = cpu_to_le32(ctrl);
327 faead->conf_fqid = 0;
329 fq = &priv->fq[queue_id];
330 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
331 memcpy(dest_fd, fd, sizeof(*dest_fd));
333 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
336 xdp_tx_flush(priv, ch, fq);
339 static u32 run_xdp(struct dpaa2_eth_priv *priv,
340 struct dpaa2_eth_channel *ch,
341 struct dpaa2_eth_fq *rx_fq,
342 struct dpaa2_fd *fd, void *vaddr)
344 dma_addr_t addr = dpaa2_fd_get_addr(fd);
345 struct bpf_prog *xdp_prog;
347 u32 xdp_act = XDP_PASS;
352 xdp_prog = READ_ONCE(ch->xdp.prog);
356 xdp.data = vaddr + dpaa2_fd_get_offset(fd);
357 xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
358 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
359 xdp_set_data_meta_invalid(&xdp);
360 xdp.rxq = &ch->xdp_rxq;
362 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
363 (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
365 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
367 /* xdp.data pointer may have changed */
368 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
369 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
375 xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
378 bpf_warn_invalid_xdp_action(xdp_act);
381 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
384 xdp_release_buf(priv, ch, addr);
385 ch->stats.xdp_drop++;
388 dma_unmap_page(priv->net_dev->dev.parent, addr,
389 priv->rx_buf_size, DMA_BIDIRECTIONAL);
392 /* Allow redirect use of full headroom */
393 xdp.data_hard_start = vaddr;
394 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
396 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
398 ch->stats.xdp_drop++;
400 ch->stats.xdp_redirect++;
404 ch->xdp.res |= xdp_act;
410 /* Main Rx frame processing routine */
411 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
412 struct dpaa2_eth_channel *ch,
413 const struct dpaa2_fd *fd,
414 struct dpaa2_eth_fq *fq)
416 dma_addr_t addr = dpaa2_fd_get_addr(fd);
417 u8 fd_format = dpaa2_fd_get_format(fd);
420 struct rtnl_link_stats64 *percpu_stats;
421 struct dpaa2_eth_drv_stats *percpu_extras;
422 struct device *dev = priv->net_dev->dev.parent;
423 struct dpaa2_fas *fas;
429 trace_dpaa2_rx_fd(priv->net_dev, fd);
431 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
432 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
435 fas = dpaa2_get_fas(vaddr, false);
437 buf_data = vaddr + dpaa2_fd_get_offset(fd);
440 percpu_stats = this_cpu_ptr(priv->percpu_stats);
441 percpu_extras = this_cpu_ptr(priv->percpu_extras);
443 if (fd_format == dpaa2_fd_single) {
444 xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
445 if (xdp_act != XDP_PASS) {
446 percpu_stats->rx_packets++;
447 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
451 dma_unmap_page(dev, addr, priv->rx_buf_size,
453 skb = build_linear_skb(ch, fd, vaddr);
454 } else if (fd_format == dpaa2_fd_sg) {
455 WARN_ON(priv->xdp_prog);
457 dma_unmap_page(dev, addr, priv->rx_buf_size,
459 skb = build_frag_skb(priv, ch, buf_data);
460 free_pages((unsigned long)vaddr, 0);
461 percpu_extras->rx_sg_frames++;
462 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
464 /* We don't support any other format */
465 goto err_frame_format;
473 /* Get the timestamp value */
474 if (priv->rx_tstamp) {
475 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
476 __le64 *ts = dpaa2_get_ts(vaddr, false);
479 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
481 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
482 shhwtstamps->hwtstamp = ns_to_ktime(ns);
485 /* Check if we need to validate the L4 csum */
486 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
487 status = le32_to_cpu(fas->status);
488 validate_rx_csum(priv, status, skb);
491 skb->protocol = eth_type_trans(skb, priv->net_dev);
492 skb_record_rx_queue(skb, fq->flowid);
494 percpu_stats->rx_packets++;
495 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
497 list_add_tail(&skb->list, ch->rx_list);
502 free_rx_fd(priv, fd, vaddr);
504 percpu_stats->rx_dropped++;
507 /* Consume all frames pull-dequeued into the store. This is the simplest way to
508 * make sure we don't accidentally issue another volatile dequeue which would
509 * overwrite (leak) frames already in the store.
511 * Observance of NAPI budget is not our concern, leaving that to the caller.
513 static int consume_frames(struct dpaa2_eth_channel *ch,
514 struct dpaa2_eth_fq **src)
516 struct dpaa2_eth_priv *priv = ch->priv;
517 struct dpaa2_eth_fq *fq = NULL;
519 const struct dpaa2_fd *fd;
520 int cleaned = 0, retries = 0;
524 dq = dpaa2_io_store_next(ch->store, &is_last);
526 /* If we're here, we *must* have placed a
527 * volatile dequeue comnmand, so keep reading through
528 * the store until we get some sort of valid response
529 * token (either a valid frame or an "empty dequeue")
531 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
532 netdev_err_once(priv->net_dev,
533 "Unable to read a valid dequeue response\n");
539 fd = dpaa2_dq_fd(dq);
540 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
542 fq->consume(priv, ch, fd, fq);
550 fq->stats.frames += cleaned;
551 ch->stats.frames += cleaned;
553 /* A dequeue operation only pulls frames from a single queue
554 * into the store. Return the frame queue as an out param.
562 /* Configure the egress frame annotation for timestamp update */
563 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
565 struct dpaa2_faead *faead;
568 /* Mark the egress frame annotation area as valid */
569 frc = dpaa2_fd_get_frc(fd);
570 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
572 /* Set hardware annotation size */
573 ctrl = dpaa2_fd_get_ctrl(fd);
574 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
576 /* enable UPD (update prepanded data) bit in FAEAD field of
577 * hardware frame annotation area
579 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
580 faead = dpaa2_get_faead(buf_start, true);
581 faead->ctrl = cpu_to_le32(ctrl);
584 /* Create a frame descriptor based on a fragmented skb */
585 static int build_sg_fd(struct dpaa2_eth_priv *priv,
589 struct device *dev = priv->net_dev->dev.parent;
590 void *sgt_buf = NULL;
592 int nr_frags = skb_shinfo(skb)->nr_frags;
593 struct dpaa2_sg_entry *sgt;
596 struct scatterlist *scl, *crt_scl;
599 struct dpaa2_eth_swa *swa;
601 /* Create and map scatterlist.
602 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
603 * to go beyond nr_frags+1.
604 * Note: We don't support chained scatterlists
606 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
609 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
613 sg_init_table(scl, nr_frags + 1);
614 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
615 if (unlikely(num_sg < 0)) {
617 goto dma_map_sg_failed;
619 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
620 if (unlikely(!num_dma_bufs)) {
622 goto dma_map_sg_failed;
625 /* Prepare the HW SGT structure */
626 sgt_buf_size = priv->tx_data_offset +
627 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
628 sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
629 if (unlikely(!sgt_buf)) {
631 goto sgt_buf_alloc_failed;
633 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
634 memset(sgt_buf, 0, sgt_buf_size);
636 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
638 /* Fill in the HW SGT structure.
640 * sgt_buf is zeroed out, so the following fields are implicit
641 * in all sgt entries:
643 * - format is 'dpaa2_sg_single'
645 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
646 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
647 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
649 dpaa2_sg_set_final(&sgt[i - 1], true);
651 /* Store the skb backpointer in the SGT buffer.
652 * Fit the scatterlist and the number of buffers alongside the
653 * skb backpointer in the software annotation area. We'll need
654 * all of them on Tx Conf.
656 swa = (struct dpaa2_eth_swa *)sgt_buf;
657 swa->type = DPAA2_ETH_SWA_SG;
660 swa->sg.num_sg = num_sg;
661 swa->sg.sgt_size = sgt_buf_size;
663 /* Separately map the SGT buffer */
664 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
665 if (unlikely(dma_mapping_error(dev, addr))) {
667 goto dma_map_single_failed;
669 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
670 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
671 dpaa2_fd_set_addr(fd, addr);
672 dpaa2_fd_set_len(fd, skb->len);
673 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
675 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
676 enable_tx_tstamp(fd, sgt_buf);
680 dma_map_single_failed:
681 skb_free_frag(sgt_buf);
682 sgt_buf_alloc_failed:
683 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
689 /* Create a SG frame descriptor based on a linear skb.
691 * This function is used on the Tx path when the skb headroom is not large
692 * enough for the HW requirements, thus instead of realloc-ing the skb we
693 * create a SG frame descriptor with only one entry.
695 static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
699 struct device *dev = priv->net_dev->dev.parent;
700 struct dpaa2_eth_sgt_cache *sgt_cache;
701 struct dpaa2_sg_entry *sgt;
702 struct dpaa2_eth_swa *swa;
703 dma_addr_t addr, sgt_addr;
704 void *sgt_buf = NULL;
708 /* Prepare the HW SGT structure */
709 sgt_cache = this_cpu_ptr(priv->sgt_cache);
710 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
712 if (sgt_cache->count == 0)
713 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
716 sgt_buf = sgt_cache->buf[--sgt_cache->count];
717 if (unlikely(!sgt_buf))
720 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
721 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
723 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
724 if (unlikely(dma_mapping_error(dev, addr))) {
726 goto data_map_failed;
729 /* Fill in the HW SGT structure */
730 dpaa2_sg_set_addr(sgt, addr);
731 dpaa2_sg_set_len(sgt, skb->len);
732 dpaa2_sg_set_final(sgt, true);
734 /* Store the skb backpointer in the SGT buffer */
735 swa = (struct dpaa2_eth_swa *)sgt_buf;
736 swa->type = DPAA2_ETH_SWA_SINGLE;
737 swa->single.skb = skb;
738 swa->sg.sgt_size = sgt_buf_size;
740 /* Separately map the SGT buffer */
741 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
742 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
747 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
748 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
749 dpaa2_fd_set_addr(fd, sgt_addr);
750 dpaa2_fd_set_len(fd, skb->len);
751 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
753 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
754 enable_tx_tstamp(fd, sgt_buf);
759 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
761 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
764 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
769 /* Create a frame descriptor based on a linear skb */
770 static int build_single_fd(struct dpaa2_eth_priv *priv,
774 struct device *dev = priv->net_dev->dev.parent;
775 u8 *buffer_start, *aligned_start;
776 struct dpaa2_eth_swa *swa;
779 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
781 /* If there's enough room to align the FD address, do it.
782 * It will help hardware optimize accesses.
784 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
785 DPAA2_ETH_TX_BUF_ALIGN);
786 if (aligned_start >= skb->head)
787 buffer_start = aligned_start;
789 /* Store a backpointer to the skb at the beginning of the buffer
790 * (in the private data area) such that we can release it
793 swa = (struct dpaa2_eth_swa *)buffer_start;
794 swa->type = DPAA2_ETH_SWA_SINGLE;
795 swa->single.skb = skb;
797 addr = dma_map_single(dev, buffer_start,
798 skb_tail_pointer(skb) - buffer_start,
800 if (unlikely(dma_mapping_error(dev, addr)))
803 dpaa2_fd_set_addr(fd, addr);
804 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
805 dpaa2_fd_set_len(fd, skb->len);
806 dpaa2_fd_set_format(fd, dpaa2_fd_single);
807 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
809 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
810 enable_tx_tstamp(fd, buffer_start);
815 /* FD freeing routine on the Tx path
817 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
818 * back-pointed to is also freed.
819 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
822 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
823 struct dpaa2_eth_fq *fq,
824 const struct dpaa2_fd *fd, bool in_napi)
826 struct device *dev = priv->net_dev->dev.parent;
827 dma_addr_t fd_addr, sg_addr;
828 struct sk_buff *skb = NULL;
829 unsigned char *buffer_start;
830 struct dpaa2_eth_swa *swa;
831 u8 fd_format = dpaa2_fd_get_format(fd);
832 u32 fd_len = dpaa2_fd_get_len(fd);
834 struct dpaa2_eth_sgt_cache *sgt_cache;
835 struct dpaa2_sg_entry *sgt;
837 fd_addr = dpaa2_fd_get_addr(fd);
838 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
839 swa = (struct dpaa2_eth_swa *)buffer_start;
841 if (fd_format == dpaa2_fd_single) {
842 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
843 skb = swa->single.skb;
844 /* Accessing the skb buffer is safe before dma unmap,
845 * because we didn't map the actual skb shell.
847 dma_unmap_single(dev, fd_addr,
848 skb_tail_pointer(skb) - buffer_start,
851 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
852 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
855 } else if (fd_format == dpaa2_fd_sg) {
856 if (swa->type == DPAA2_ETH_SWA_SG) {
859 /* Unmap the scatterlist */
860 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
864 /* Unmap the SGT buffer */
865 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
868 skb = swa->single.skb;
870 /* Unmap the SGT Buffer */
871 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
874 sgt = (struct dpaa2_sg_entry *)(buffer_start +
875 priv->tx_data_offset);
876 sg_addr = dpaa2_sg_get_addr(sgt);
877 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
880 netdev_dbg(priv->net_dev, "Invalid FD format\n");
884 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
886 fq->dq_bytes += fd_len;
889 if (swa->type == DPAA2_ETH_SWA_XDP) {
890 xdp_return_frame(swa->xdp.xdpf);
894 /* Get the timestamp value */
895 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
896 struct skb_shared_hwtstamps shhwtstamps;
897 __le64 *ts = dpaa2_get_ts(buffer_start, true);
900 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
902 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
903 shhwtstamps.hwtstamp = ns_to_ktime(ns);
904 skb_tstamp_tx(skb, &shhwtstamps);
907 /* Free SGT buffer allocated on tx */
908 if (fd_format != dpaa2_fd_single) {
909 sgt_cache = this_cpu_ptr(priv->sgt_cache);
910 if (swa->type == DPAA2_ETH_SWA_SG) {
911 skb_free_frag(buffer_start);
913 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
916 sgt_cache->buf[sgt_cache->count++] = buffer_start;
920 /* Move on with skb release */
921 napi_consume_skb(skb, in_napi);
924 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
926 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
928 struct rtnl_link_stats64 *percpu_stats;
929 struct dpaa2_eth_drv_stats *percpu_extras;
930 struct dpaa2_eth_fq *fq;
931 struct netdev_queue *nq;
933 unsigned int needed_headroom;
938 percpu_stats = this_cpu_ptr(priv->percpu_stats);
939 percpu_extras = this_cpu_ptr(priv->percpu_extras);
941 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
943 /* We'll be holding a back-reference to the skb until Tx Confirmation;
944 * we don't want that overwritten by a concurrent Tx with a cloned skb.
946 skb = skb_unshare(skb, GFP_ATOMIC);
947 if (unlikely(!skb)) {
948 /* skb_unshare() has already freed the skb */
949 percpu_stats->tx_dropped++;
953 /* Setup the FD fields */
954 memset(&fd, 0, sizeof(fd));
956 if (skb_is_nonlinear(skb)) {
957 err = build_sg_fd(priv, skb, &fd);
958 percpu_extras->tx_sg_frames++;
959 percpu_extras->tx_sg_bytes += skb->len;
960 } else if (skb_headroom(skb) < needed_headroom) {
961 err = build_sg_fd_single_buf(priv, skb, &fd);
962 percpu_extras->tx_sg_frames++;
963 percpu_extras->tx_sg_bytes += skb->len;
964 percpu_extras->tx_converted_sg_frames++;
965 percpu_extras->tx_converted_sg_bytes += skb->len;
967 err = build_single_fd(priv, skb, &fd);
971 percpu_stats->tx_dropped++;
976 trace_dpaa2_tx_fd(net_dev, &fd);
978 /* TxConf FQ selection relies on queue id from the stack.
979 * In case of a forwarded frame from another DPNI interface, we choose
980 * a queue affined to the same core that processed the Rx frame
982 queue_mapping = skb_get_queue_mapping(skb);
984 if (net_dev->num_tc) {
985 prio = netdev_txq_to_tc(net_dev, queue_mapping);
986 /* Hardware interprets priority level 0 as being the highest,
987 * so we need to do a reverse mapping to the netdev tc index
989 prio = net_dev->num_tc - prio - 1;
990 /* We have only one FQ array entry for all Tx hardware queues
991 * with the same flow id (but different priority levels)
993 queue_mapping %= dpaa2_eth_queue_count(priv);
995 fq = &priv->fq[queue_mapping];
997 fd_len = dpaa2_fd_get_len(&fd);
998 nq = netdev_get_tx_queue(net_dev, queue_mapping);
999 netdev_tx_sent_queue(nq, fd_len);
1001 /* Everything that happens after this enqueues might race with
1002 * the Tx confirmation callback for this frame
1004 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1005 err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
1009 percpu_extras->tx_portal_busy += i;
1010 if (unlikely(err < 0)) {
1011 percpu_stats->tx_errors++;
1012 /* Clean up everything, including freeing the skb */
1013 free_tx_fd(priv, fq, &fd, false);
1014 netdev_tx_completed_queue(nq, 1, fd_len);
1016 percpu_stats->tx_packets++;
1017 percpu_stats->tx_bytes += fd_len;
1020 return NETDEV_TX_OK;
1025 return NETDEV_TX_OK;
1028 /* Tx confirmation frame processing routine */
1029 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1030 struct dpaa2_eth_channel *ch __always_unused,
1031 const struct dpaa2_fd *fd,
1032 struct dpaa2_eth_fq *fq)
1034 struct rtnl_link_stats64 *percpu_stats;
1035 struct dpaa2_eth_drv_stats *percpu_extras;
1036 u32 fd_len = dpaa2_fd_get_len(fd);
1040 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1042 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1043 percpu_extras->tx_conf_frames++;
1044 percpu_extras->tx_conf_bytes += fd_len;
1046 /* Check frame errors in the FD field */
1047 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1048 free_tx_fd(priv, fq, fd, true);
1050 if (likely(!fd_errors))
1053 if (net_ratelimit())
1054 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1057 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1058 /* Tx-conf logically pertains to the egress path. */
1059 percpu_stats->tx_errors++;
1062 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1066 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1067 DPNI_OFF_RX_L3_CSUM, enable);
1069 netdev_err(priv->net_dev,
1070 "dpni_set_offload(RX_L3_CSUM) failed\n");
1074 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1075 DPNI_OFF_RX_L4_CSUM, enable);
1077 netdev_err(priv->net_dev,
1078 "dpni_set_offload(RX_L4_CSUM) failed\n");
1085 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1089 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1090 DPNI_OFF_TX_L3_CSUM, enable);
1092 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1096 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1097 DPNI_OFF_TX_L4_CSUM, enable);
1099 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1106 /* Perform a single release command to add buffers
1107 * to the specified buffer pool
1109 static int add_bufs(struct dpaa2_eth_priv *priv,
1110 struct dpaa2_eth_channel *ch, u16 bpid)
1112 struct device *dev = priv->net_dev->dev.parent;
1113 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1119 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1120 /* Allocate buffer visible to WRIOP + skb shared info +
1123 /* allocate one page for each Rx buffer. WRIOP sees
1124 * the entire page except for a tailroom reserved for
1127 page = dev_alloc_pages(0);
1131 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1133 if (unlikely(dma_mapping_error(dev, addr)))
1136 buf_array[i] = addr;
1139 trace_dpaa2_eth_buf_seed(priv->net_dev,
1140 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1141 addr, priv->rx_buf_size,
1146 /* In case the portal is busy, retry until successful */
1147 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1148 buf_array, i)) == -EBUSY) {
1149 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1154 /* If release command failed, clean up and bail out;
1155 * not much else we can do about it
1158 free_bufs(priv, buf_array, i);
1165 __free_pages(page, 0);
1167 /* If we managed to allocate at least some buffers,
1168 * release them to hardware
1176 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1181 for (j = 0; j < priv->num_channels; j++) {
1182 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1183 i += DPAA2_ETH_BUFS_PER_CMD) {
1184 new_count = add_bufs(priv, priv->channel[j], bpid);
1185 priv->channel[j]->buf_count += new_count;
1187 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1197 * Drain the specified number of buffers from the DPNI's private buffer pool.
1198 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1200 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1202 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1207 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1210 if (ret == -EBUSY &&
1211 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1213 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1216 free_bufs(priv, buf_array, ret);
1221 static void drain_pool(struct dpaa2_eth_priv *priv)
1225 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1226 drain_bufs(priv, 1);
1228 for (i = 0; i < priv->num_channels; i++)
1229 priv->channel[i]->buf_count = 0;
1232 /* Function is called from softirq context only, so we don't need to guard
1233 * the access to percpu count
1235 static int refill_pool(struct dpaa2_eth_priv *priv,
1236 struct dpaa2_eth_channel *ch,
1241 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1245 new_count = add_bufs(priv, ch, bpid);
1246 if (unlikely(!new_count)) {
1247 /* Out of memory; abort for now, we'll try later on */
1250 ch->buf_count += new_count;
1251 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1253 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1259 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1261 struct dpaa2_eth_sgt_cache *sgt_cache;
1265 for_each_possible_cpu(k) {
1266 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1267 count = sgt_cache->count;
1269 for (i = 0; i < count; i++)
1270 kfree(sgt_cache->buf[i]);
1271 sgt_cache->count = 0;
1275 static int pull_channel(struct dpaa2_eth_channel *ch)
1280 /* Retry while portal is busy */
1282 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1286 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1288 ch->stats.dequeue_portal_busy += dequeues;
1290 ch->stats.pull_err++;
1295 /* NAPI poll routine
1297 * Frames are dequeued from the QMan channel associated with this NAPI context.
1298 * Rx, Tx confirmation and (if configured) Rx error frames all count
1299 * towards the NAPI budget.
1301 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1303 struct dpaa2_eth_channel *ch;
1304 struct dpaa2_eth_priv *priv;
1305 int rx_cleaned = 0, txconf_cleaned = 0;
1306 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1307 struct netdev_queue *nq;
1308 int store_cleaned, work_done;
1309 struct list_head rx_list;
1314 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1318 INIT_LIST_HEAD(&rx_list);
1319 ch->rx_list = &rx_list;
1322 err = pull_channel(ch);
1326 /* Refill pool if appropriate */
1327 refill_pool(priv, ch, priv->bpid);
1329 store_cleaned = consume_frames(ch, &fq);
1330 if (store_cleaned <= 0)
1332 if (fq->type == DPAA2_RX_FQ) {
1333 rx_cleaned += store_cleaned;
1334 flowid = fq->flowid;
1336 txconf_cleaned += store_cleaned;
1337 /* We have a single Tx conf FQ on this channel */
1341 /* If we either consumed the whole NAPI budget with Rx frames
1342 * or we reached the Tx confirmations threshold, we're done.
1344 if (rx_cleaned >= budget ||
1345 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1349 } while (store_cleaned);
1351 /* We didn't consume the entire budget, so finish napi and
1352 * re-enable data availability notifications
1354 napi_complete_done(napi, rx_cleaned);
1356 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1358 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1359 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1360 ch->nctx.desired_cpu);
1362 work_done = max(rx_cleaned, 1);
1365 netif_receive_skb_list(ch->rx_list);
1367 if (txc_fq && txc_fq->dq_frames) {
1368 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1369 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1371 txc_fq->dq_frames = 0;
1372 txc_fq->dq_bytes = 0;
1375 if (ch->xdp.res & XDP_REDIRECT)
1377 else if (rx_cleaned && ch->xdp.res & XDP_TX)
1378 xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1383 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1385 struct dpaa2_eth_channel *ch;
1388 for (i = 0; i < priv->num_channels; i++) {
1389 ch = priv->channel[i];
1390 napi_enable(&ch->napi);
1394 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1396 struct dpaa2_eth_channel *ch;
1399 for (i = 0; i < priv->num_channels; i++) {
1400 ch = priv->channel[i];
1401 napi_disable(&ch->napi);
1405 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1406 bool tx_pause, bool pfc)
1408 struct dpni_taildrop td = {0};
1409 struct dpaa2_eth_fq *fq;
1412 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1413 * flow control is disabled (as it might interfere with either the
1414 * buffer pool depletion trigger for pause frames or with the group
1415 * congestion trigger for PFC frames)
1417 td.enable = !tx_pause;
1418 if (priv->rx_fqtd_enabled == td.enable)
1421 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1422 td.units = DPNI_CONGESTION_UNIT_BYTES;
1424 for (i = 0; i < priv->num_fqs; i++) {
1426 if (fq->type != DPAA2_RX_FQ)
1428 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1429 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1430 fq->tc, fq->flowid, &td);
1432 netdev_err(priv->net_dev,
1433 "dpni_set_taildrop(FQ) failed\n");
1438 priv->rx_fqtd_enabled = td.enable;
1441 /* Congestion group taildrop: threshold is in frames, per group
1442 * of FQs belonging to the same traffic class
1443 * Enabled if general Tx pause disabled or if PFCs are enabled
1444 * (congestion group threhsold for PFC generation is lower than the
1445 * CG taildrop threshold, so it won't interfere with it; we also
1446 * want frames in non-PFC enabled traffic classes to be kept in check)
1448 td.enable = !tx_pause || (tx_pause && pfc);
1449 if (priv->rx_cgtd_enabled == td.enable)
1452 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1453 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1454 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1455 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1456 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1459 netdev_err(priv->net_dev,
1460 "dpni_set_taildrop(CG) failed\n");
1465 priv->rx_cgtd_enabled = td.enable;
1468 static int link_state_update(struct dpaa2_eth_priv *priv)
1470 struct dpni_link_state state = {0};
1474 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1475 if (unlikely(err)) {
1476 netdev_err(priv->net_dev,
1477 "dpni_get_link_state() failed\n");
1481 /* If Tx pause frame settings have changed, we need to update
1482 * Rx FQ taildrop configuration as well. We configure taildrop
1483 * only when pause frame generation is disabled.
1485 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
1486 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
1488 /* When we manage the MAC/PHY using phylink there is no need
1489 * to manually update the netif_carrier.
1494 /* Chech link state; speed / duplex changes are not treated yet */
1495 if (priv->link_state.up == state.up)
1499 netif_carrier_on(priv->net_dev);
1500 netif_tx_start_all_queues(priv->net_dev);
1502 netif_tx_stop_all_queues(priv->net_dev);
1503 netif_carrier_off(priv->net_dev);
1506 netdev_info(priv->net_dev, "Link Event: state %s\n",
1507 state.up ? "up" : "down");
1510 priv->link_state = state;
1515 static int dpaa2_eth_open(struct net_device *net_dev)
1517 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1520 err = seed_pool(priv, priv->bpid);
1522 /* Not much to do; the buffer pool, though not filled up,
1523 * may still contain some buffers which would enable us
1526 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1527 priv->dpbp_dev->obj_desc.id, priv->bpid);
1531 /* We'll only start the txqs when the link is actually ready;
1532 * make sure we don't race against the link up notification,
1533 * which may come immediately after dpni_enable();
1535 netif_tx_stop_all_queues(net_dev);
1537 /* Also, explicitly set carrier off, otherwise
1538 * netif_carrier_ok() will return true and cause 'ip link show'
1539 * to report the LOWER_UP flag, even though the link
1540 * notification wasn't even received.
1542 netif_carrier_off(net_dev);
1544 enable_ch_napi(priv);
1546 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1548 netdev_err(net_dev, "dpni_enable() failed\n");
1553 /* If the DPMAC object has already processed the link up
1554 * interrupt, we have to learn the link state ourselves.
1556 err = link_state_update(priv);
1558 netdev_err(net_dev, "Can't update link state\n");
1559 goto link_state_err;
1562 phylink_start(priv->mac->phylink);
1569 disable_ch_napi(priv);
1574 /* Total number of in-flight frames on ingress queues */
1575 static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1577 struct dpaa2_eth_fq *fq;
1578 u32 fcnt = 0, bcnt = 0, total = 0;
1581 for (i = 0; i < priv->num_fqs; i++) {
1583 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1585 netdev_warn(priv->net_dev, "query_fq_count failed");
1594 static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1600 pending = ingress_fq_count(priv);
1603 } while (pending && --retries);
1606 #define DPNI_TX_PENDING_VER_MAJOR 7
1607 #define DPNI_TX_PENDING_VER_MINOR 13
1608 static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1610 union dpni_statistics stats;
1614 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1615 DPNI_TX_PENDING_VER_MINOR) < 0)
1619 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1623 if (stats.page_6.tx_pending_frames == 0)
1625 } while (--retries);
1631 static int dpaa2_eth_stop(struct net_device *net_dev)
1633 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1634 int dpni_enabled = 0;
1638 netif_tx_stop_all_queues(net_dev);
1639 netif_carrier_off(net_dev);
1641 phylink_stop(priv->mac->phylink);
1644 /* On dpni_disable(), the MC firmware will:
1645 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1646 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1647 * of all in flight Tx frames is finished (and corresponding Tx conf
1648 * frames are enqueued back to software)
1650 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1651 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1652 * and Tx conf queues are consumed on NAPI poll.
1654 wait_for_egress_fq_empty(priv);
1657 dpni_disable(priv->mc_io, 0, priv->mc_token);
1658 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1660 /* Allow the hardware some slack */
1662 } while (dpni_enabled && --retries);
1664 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1665 /* Must go on and disable NAPI nonetheless, so we don't crash at
1666 * the next "ifconfig up"
1670 wait_for_ingress_fq_empty(priv);
1671 disable_ch_napi(priv);
1673 /* Empty the buffer pool */
1676 /* Empty the Scatter-Gather Buffer cache */
1677 dpaa2_eth_sgt_cache_drain(priv);
1682 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1684 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1685 struct device *dev = net_dev->dev.parent;
1688 err = eth_mac_addr(net_dev, addr);
1690 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1694 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1697 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1704 /** Fill in counters maintained by the GPP driver. These may be different from
1705 * the hardware counters obtained by ethtool.
1707 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1708 struct rtnl_link_stats64 *stats)
1710 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1711 struct rtnl_link_stats64 *percpu_stats;
1713 u64 *netstats = (u64 *)stats;
1715 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1717 for_each_possible_cpu(i) {
1718 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1719 cpustats = (u64 *)percpu_stats;
1720 for (j = 0; j < num; j++)
1721 netstats[j] += cpustats[j];
1725 /* Copy mac unicast addresses from @net_dev to @priv.
1726 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1728 static void add_uc_hw_addr(const struct net_device *net_dev,
1729 struct dpaa2_eth_priv *priv)
1731 struct netdev_hw_addr *ha;
1734 netdev_for_each_uc_addr(ha, net_dev) {
1735 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1738 netdev_warn(priv->net_dev,
1739 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1744 /* Copy mac multicast addresses from @net_dev to @priv
1745 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1747 static void add_mc_hw_addr(const struct net_device *net_dev,
1748 struct dpaa2_eth_priv *priv)
1750 struct netdev_hw_addr *ha;
1753 netdev_for_each_mc_addr(ha, net_dev) {
1754 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1757 netdev_warn(priv->net_dev,
1758 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1763 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1765 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1766 int uc_count = netdev_uc_count(net_dev);
1767 int mc_count = netdev_mc_count(net_dev);
1768 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1769 u32 options = priv->dpni_attrs.options;
1770 u16 mc_token = priv->mc_token;
1771 struct fsl_mc_io *mc_io = priv->mc_io;
1774 /* Basic sanity checks; these probably indicate a misconfiguration */
1775 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1776 netdev_info(net_dev,
1777 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1780 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1781 if (uc_count > max_mac) {
1782 netdev_info(net_dev,
1783 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1787 if (mc_count + uc_count > max_mac) {
1788 netdev_info(net_dev,
1789 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1790 uc_count + mc_count, max_mac);
1791 goto force_mc_promisc;
1794 /* Adjust promisc settings due to flag combinations */
1795 if (net_dev->flags & IFF_PROMISC)
1797 if (net_dev->flags & IFF_ALLMULTI) {
1798 /* First, rebuild unicast filtering table. This should be done
1799 * in promisc mode, in order to avoid frame loss while we
1800 * progressively add entries to the table.
1801 * We don't know whether we had been in promisc already, and
1802 * making an MC call to find out is expensive; so set uc promisc
1805 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1807 netdev_warn(net_dev, "Can't set uc promisc\n");
1809 /* Actual uc table reconstruction. */
1810 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1812 netdev_warn(net_dev, "Can't clear uc filters\n");
1813 add_uc_hw_addr(net_dev, priv);
1815 /* Finally, clear uc promisc and set mc promisc as requested. */
1816 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1818 netdev_warn(net_dev, "Can't clear uc promisc\n");
1819 goto force_mc_promisc;
1822 /* Neither unicast, nor multicast promisc will be on... eventually.
1823 * For now, rebuild mac filtering tables while forcing both of them on.
1825 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1827 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1828 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1830 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1832 /* Actual mac filtering tables reconstruction */
1833 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1835 netdev_warn(net_dev, "Can't clear mac filters\n");
1836 add_mc_hw_addr(net_dev, priv);
1837 add_uc_hw_addr(net_dev, priv);
1839 /* Now we can clear both ucast and mcast promisc, without risking
1840 * to drop legitimate frames anymore.
1842 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1844 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1845 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1847 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1852 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1854 netdev_warn(net_dev, "Can't set ucast promisc\n");
1856 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1858 netdev_warn(net_dev, "Can't set mcast promisc\n");
1861 static int dpaa2_eth_set_features(struct net_device *net_dev,
1862 netdev_features_t features)
1864 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1865 netdev_features_t changed = features ^ net_dev->features;
1869 if (changed & NETIF_F_RXCSUM) {
1870 enable = !!(features & NETIF_F_RXCSUM);
1871 err = set_rx_csum(priv, enable);
1876 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1877 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1878 err = set_tx_csum(priv, enable);
1886 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1888 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1889 struct hwtstamp_config config;
1891 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1894 switch (config.tx_type) {
1895 case HWTSTAMP_TX_OFF:
1896 priv->tx_tstamp = false;
1898 case HWTSTAMP_TX_ON:
1899 priv->tx_tstamp = true;
1905 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1906 priv->rx_tstamp = false;
1908 priv->rx_tstamp = true;
1909 /* TS is set for all frame types, not only those requested */
1910 config.rx_filter = HWTSTAMP_FILTER_ALL;
1913 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1917 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1919 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1921 if (cmd == SIOCSHWTSTAMP)
1922 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1925 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
1930 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1932 int mfl, linear_mfl;
1934 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1935 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
1936 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1938 if (mfl > linear_mfl) {
1939 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1940 linear_mfl - VLAN_ETH_HLEN);
1947 static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1951 /* We enforce a maximum Rx frame length based on MTU only if we have
1952 * an XDP program attached (in order to avoid Rx S/G frames).
1953 * Otherwise, we accept all incoming frames as long as they are not
1954 * larger than maximum size supported in hardware
1957 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1959 mfl = DPAA2_ETH_MFL;
1961 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1963 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1970 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1972 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1975 if (!priv->xdp_prog)
1978 if (!xdp_mtu_valid(priv, new_mtu))
1981 err = set_rx_mfl(priv, new_mtu, true);
1990 static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1992 struct dpni_buffer_layout buf_layout = {0};
1995 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1996 DPNI_QUEUE_RX, &buf_layout);
1998 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2002 /* Reserve extra headroom for XDP header size changes */
2003 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2004 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2005 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2006 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2007 DPNI_QUEUE_RX, &buf_layout);
2009 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2016 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2018 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2019 struct dpaa2_eth_channel *ch;
2020 struct bpf_prog *old;
2021 bool up, need_update;
2024 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2028 bpf_prog_add(prog, priv->num_channels);
2030 up = netif_running(dev);
2031 need_update = (!!priv->xdp_prog != !!prog);
2034 dpaa2_eth_stop(dev);
2036 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2037 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2038 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2039 * so we are sure no old format buffers will be used from now on.
2042 err = set_rx_mfl(priv, dev->mtu, !!prog);
2045 err = update_rx_buffer_headroom(priv, !!prog);
2050 old = xchg(&priv->xdp_prog, prog);
2054 for (i = 0; i < priv->num_channels; i++) {
2055 ch = priv->channel[i];
2056 old = xchg(&ch->xdp.prog, prog);
2062 err = dpaa2_eth_open(dev);
2071 bpf_prog_sub(prog, priv->num_channels);
2073 dpaa2_eth_open(dev);
2078 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2080 switch (xdp->command) {
2081 case XDP_SETUP_PROG:
2082 return setup_xdp(dev, xdp->prog);
2090 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2091 struct xdp_frame *xdpf,
2092 struct dpaa2_fd *fd)
2094 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2095 struct device *dev = net_dev->dev.parent;
2096 unsigned int needed_headroom;
2097 struct dpaa2_eth_swa *swa;
2098 void *buffer_start, *aligned_start;
2101 /* We require a minimum headroom to be able to transmit the frame.
2102 * Otherwise return an error and let the original net_device handle it
2104 needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
2105 if (xdpf->headroom < needed_headroom)
2108 /* Setup the FD fields */
2109 memset(fd, 0, sizeof(*fd));
2111 /* Align FD address, if possible */
2112 buffer_start = xdpf->data - needed_headroom;
2113 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2114 DPAA2_ETH_TX_BUF_ALIGN);
2115 if (aligned_start >= xdpf->data - xdpf->headroom)
2116 buffer_start = aligned_start;
2118 swa = (struct dpaa2_eth_swa *)buffer_start;
2119 /* fill in necessary fields here */
2120 swa->type = DPAA2_ETH_SWA_XDP;
2121 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2122 swa->xdp.xdpf = xdpf;
2124 addr = dma_map_single(dev, buffer_start,
2127 if (unlikely(dma_mapping_error(dev, addr)))
2130 dpaa2_fd_set_addr(fd, addr);
2131 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2132 dpaa2_fd_set_len(fd, xdpf->len);
2133 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2134 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2139 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2140 struct xdp_frame **frames, u32 flags)
2142 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2143 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2144 struct rtnl_link_stats64 *percpu_stats;
2145 struct dpaa2_eth_fq *fq;
2146 struct dpaa2_fd *fds;
2147 int enqueued, i, err;
2149 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2152 if (!netif_running(net_dev))
2155 fq = &priv->fq[smp_processor_id()];
2156 xdp_redirect_fds = &fq->xdp_redirect_fds;
2157 fds = xdp_redirect_fds->fds;
2159 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2161 /* create a FD for each xdp_frame in the list received */
2162 for (i = 0; i < n; i++) {
2163 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2167 xdp_redirect_fds->num = i;
2169 /* enqueue all the frame descriptors */
2170 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2172 /* update statistics */
2173 percpu_stats->tx_packets += enqueued;
2174 for (i = 0; i < enqueued; i++)
2175 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2176 for (i = enqueued; i < n; i++)
2177 xdp_return_frame_rx_napi(frames[i]);
2182 static int update_xps(struct dpaa2_eth_priv *priv)
2184 struct net_device *net_dev = priv->net_dev;
2185 struct cpumask xps_mask;
2186 struct dpaa2_eth_fq *fq;
2187 int i, num_queues, netdev_queues;
2190 num_queues = dpaa2_eth_queue_count(priv);
2191 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2193 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2194 * queues, so only process those
2196 for (i = 0; i < netdev_queues; i++) {
2197 fq = &priv->fq[i % num_queues];
2199 cpumask_clear(&xps_mask);
2200 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2202 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2204 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2212 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2213 struct tc_mqprio_qopt *mqprio)
2215 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2216 u8 num_tc, num_queues;
2219 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2220 num_queues = dpaa2_eth_queue_count(priv);
2221 num_tc = mqprio->num_tc;
2223 if (num_tc == net_dev->num_tc)
2226 if (num_tc > dpaa2_eth_tc_count(priv)) {
2227 netdev_err(net_dev, "Max %d traffic classes supported\n",
2228 dpaa2_eth_tc_count(priv));
2233 netdev_reset_tc(net_dev);
2234 netif_set_real_num_tx_queues(net_dev, num_queues);
2238 netdev_set_num_tc(net_dev, num_tc);
2239 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2241 for (i = 0; i < num_tc; i++)
2242 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2250 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2252 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2254 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2255 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2256 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2257 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2260 if (p->command == TC_TBF_STATS)
2263 /* Only per port Tx shaping */
2264 if (p->parent != TC_H_ROOT)
2267 if (p->command == TC_TBF_REPLACE) {
2268 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2269 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2270 DPAA2_ETH_MAX_BURST_SIZE);
2274 tx_cr_shaper.max_burst_size = cfg->max_size;
2275 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2278 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2281 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2284 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2291 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2292 enum tc_setup_type type, void *type_data)
2295 case TC_SETUP_QDISC_MQPRIO:
2296 return dpaa2_eth_setup_mqprio(net_dev, type_data);
2297 case TC_SETUP_QDISC_TBF:
2298 return dpaa2_eth_setup_tbf(net_dev, type_data);
2304 static const struct net_device_ops dpaa2_eth_ops = {
2305 .ndo_open = dpaa2_eth_open,
2306 .ndo_start_xmit = dpaa2_eth_tx,
2307 .ndo_stop = dpaa2_eth_stop,
2308 .ndo_set_mac_address = dpaa2_eth_set_addr,
2309 .ndo_get_stats64 = dpaa2_eth_get_stats,
2310 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2311 .ndo_set_features = dpaa2_eth_set_features,
2312 .ndo_do_ioctl = dpaa2_eth_ioctl,
2313 .ndo_change_mtu = dpaa2_eth_change_mtu,
2314 .ndo_bpf = dpaa2_eth_xdp,
2315 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2316 .ndo_setup_tc = dpaa2_eth_setup_tc,
2319 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2321 struct dpaa2_eth_channel *ch;
2323 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2325 /* Update NAPI statistics */
2328 napi_schedule(&ch->napi);
2331 /* Allocate and configure a DPCON object */
2332 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2334 struct fsl_mc_device *dpcon;
2335 struct device *dev = priv->net_dev->dev.parent;
2338 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2339 FSL_MC_POOL_DPCON, &dpcon);
2342 err = -EPROBE_DEFER;
2344 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2345 return ERR_PTR(err);
2348 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2350 dev_err(dev, "dpcon_open() failed\n");
2354 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2356 dev_err(dev, "dpcon_reset() failed\n");
2360 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2362 dev_err(dev, "dpcon_enable() failed\n");
2369 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2371 fsl_mc_object_free(dpcon);
2373 return ERR_PTR(err);
2376 static void free_dpcon(struct dpaa2_eth_priv *priv,
2377 struct fsl_mc_device *dpcon)
2379 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2380 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2381 fsl_mc_object_free(dpcon);
2384 static struct dpaa2_eth_channel *
2385 alloc_channel(struct dpaa2_eth_priv *priv)
2387 struct dpaa2_eth_channel *channel;
2388 struct dpcon_attr attr;
2389 struct device *dev = priv->net_dev->dev.parent;
2392 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2396 channel->dpcon = setup_dpcon(priv);
2397 if (IS_ERR(channel->dpcon)) {
2398 err = PTR_ERR(channel->dpcon);
2402 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2405 dev_err(dev, "dpcon_get_attributes() failed\n");
2409 channel->dpcon_id = attr.id;
2410 channel->ch_id = attr.qbman_ch_id;
2411 channel->priv = priv;
2416 free_dpcon(priv, channel->dpcon);
2419 return ERR_PTR(err);
2422 static void free_channel(struct dpaa2_eth_priv *priv,
2423 struct dpaa2_eth_channel *channel)
2425 free_dpcon(priv, channel->dpcon);
2429 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2430 * and register data availability notifications
2432 static int setup_dpio(struct dpaa2_eth_priv *priv)
2434 struct dpaa2_io_notification_ctx *nctx;
2435 struct dpaa2_eth_channel *channel;
2436 struct dpcon_notification_cfg dpcon_notif_cfg;
2437 struct device *dev = priv->net_dev->dev.parent;
2440 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2441 * many cores as possible, so we need one channel for each core
2442 * (unless there's fewer queues than cores, in which case the extra
2443 * channels would be wasted).
2444 * Allocate one channel per core and register it to the core's
2445 * affine DPIO. If not enough channels are available for all cores
2446 * or if some cores don't have an affine DPIO, there will be no
2447 * ingress frame processing on those cores.
2449 cpumask_clear(&priv->dpio_cpumask);
2450 for_each_online_cpu(i) {
2451 /* Try to allocate a channel */
2452 channel = alloc_channel(priv);
2453 if (IS_ERR_OR_NULL(channel)) {
2454 err = PTR_ERR_OR_ZERO(channel);
2455 if (err != -EPROBE_DEFER)
2457 "No affine channel for cpu %d and above\n", i);
2461 priv->channel[priv->num_channels] = channel;
2463 nctx = &channel->nctx;
2466 nctx->id = channel->ch_id;
2467 nctx->desired_cpu = i;
2469 /* Register the new context */
2470 channel->dpio = dpaa2_io_service_select(i);
2471 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2473 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2474 /* If no affine DPIO for this core, there's probably
2475 * none available for next cores either. Signal we want
2476 * to retry later, in case the DPIO devices weren't
2479 err = -EPROBE_DEFER;
2480 goto err_service_reg;
2483 /* Register DPCON notification with MC */
2484 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2485 dpcon_notif_cfg.priority = 0;
2486 dpcon_notif_cfg.user_ctx = nctx->qman64;
2487 err = dpcon_set_notification(priv->mc_io, 0,
2488 channel->dpcon->mc_handle,
2491 dev_err(dev, "dpcon_set_notification failed()\n");
2495 /* If we managed to allocate a channel and also found an affine
2496 * DPIO for this core, add it to the final mask
2498 cpumask_set_cpu(i, &priv->dpio_cpumask);
2499 priv->num_channels++;
2501 /* Stop if we already have enough channels to accommodate all
2502 * RX and TX conf queues
2504 if (priv->num_channels == priv->dpni_attrs.num_queues)
2511 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2513 free_channel(priv, channel);
2515 if (err == -EPROBE_DEFER) {
2516 for (i = 0; i < priv->num_channels; i++) {
2517 channel = priv->channel[i];
2518 nctx = &channel->nctx;
2519 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2520 free_channel(priv, channel);
2522 priv->num_channels = 0;
2526 if (cpumask_empty(&priv->dpio_cpumask)) {
2527 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2531 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2532 cpumask_pr_args(&priv->dpio_cpumask));
2537 static void free_dpio(struct dpaa2_eth_priv *priv)
2539 struct device *dev = priv->net_dev->dev.parent;
2540 struct dpaa2_eth_channel *ch;
2543 /* deregister CDAN notifications and free channels */
2544 for (i = 0; i < priv->num_channels; i++) {
2545 ch = priv->channel[i];
2546 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2547 free_channel(priv, ch);
2551 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2554 struct device *dev = priv->net_dev->dev.parent;
2557 for (i = 0; i < priv->num_channels; i++)
2558 if (priv->channel[i]->nctx.desired_cpu == cpu)
2559 return priv->channel[i];
2561 /* We should never get here. Issue a warning and return
2562 * the first channel, because it's still better than nothing
2564 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2566 return priv->channel[0];
2569 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2571 struct device *dev = priv->net_dev->dev.parent;
2572 struct dpaa2_eth_fq *fq;
2573 int rx_cpu, txc_cpu;
2576 /* For each FQ, pick one channel/CPU to deliver frames to.
2577 * This may well change at runtime, either through irqbalance or
2578 * through direct user intervention.
2580 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2582 for (i = 0; i < priv->num_fqs; i++) {
2586 fq->target_cpu = rx_cpu;
2587 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2588 if (rx_cpu >= nr_cpu_ids)
2589 rx_cpu = cpumask_first(&priv->dpio_cpumask);
2591 case DPAA2_TX_CONF_FQ:
2592 fq->target_cpu = txc_cpu;
2593 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2594 if (txc_cpu >= nr_cpu_ids)
2595 txc_cpu = cpumask_first(&priv->dpio_cpumask);
2598 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2600 fq->channel = get_affine_channel(priv, fq->target_cpu);
2606 static void setup_fqs(struct dpaa2_eth_priv *priv)
2610 /* We have one TxConf FQ per Tx flow.
2611 * The number of Tx and Rx queues is the same.
2612 * Tx queues come first in the fq array.
2614 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2615 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2616 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2617 priv->fq[priv->num_fqs++].flowid = (u16)i;
2620 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2621 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2622 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2623 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2624 priv->fq[priv->num_fqs].tc = (u8)j;
2625 priv->fq[priv->num_fqs++].flowid = (u16)i;
2629 /* For each FQ, decide on which core to process incoming frames */
2630 set_fq_affinity(priv);
2633 /* Allocate and configure one buffer pool for each interface */
2634 static int setup_dpbp(struct dpaa2_eth_priv *priv)
2637 struct fsl_mc_device *dpbp_dev;
2638 struct device *dev = priv->net_dev->dev.parent;
2639 struct dpbp_attr dpbp_attrs;
2641 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2645 err = -EPROBE_DEFER;
2647 dev_err(dev, "DPBP device allocation failed\n");
2651 priv->dpbp_dev = dpbp_dev;
2653 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2654 &dpbp_dev->mc_handle);
2656 dev_err(dev, "dpbp_open() failed\n");
2660 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2662 dev_err(dev, "dpbp_reset() failed\n");
2666 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2668 dev_err(dev, "dpbp_enable() failed\n");
2672 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2675 dev_err(dev, "dpbp_get_attributes() failed\n");
2678 priv->bpid = dpbp_attrs.bpid;
2683 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2686 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2688 fsl_mc_object_free(dpbp_dev);
2693 static void free_dpbp(struct dpaa2_eth_priv *priv)
2696 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2697 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2698 fsl_mc_object_free(priv->dpbp_dev);
2701 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2703 struct device *dev = priv->net_dev->dev.parent;
2704 struct dpni_buffer_layout buf_layout = {0};
2708 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2709 * version, this number is not always provided correctly on rev1.
2710 * We need to check for both alternatives in this situation.
2712 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2713 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2714 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2716 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2718 /* We need to ensure that the buffer size seen by WRIOP is a multiple
2719 * of 64 or 256 bytes depending on the WRIOP version.
2721 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
2724 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2725 buf_layout.pass_timestamp = true;
2726 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2727 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2728 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2729 DPNI_QUEUE_TX, &buf_layout);
2731 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2735 /* tx-confirm buffer */
2736 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2737 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2738 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2740 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2744 /* Now that we've set our tx buffer layout, retrieve the minimum
2745 * required tx data offset.
2747 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2748 &priv->tx_data_offset);
2750 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2754 if ((priv->tx_data_offset % 64) != 0)
2755 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2756 priv->tx_data_offset);
2759 buf_layout.pass_frame_status = true;
2760 buf_layout.pass_parser_result = true;
2761 buf_layout.data_align = rx_buf_align;
2762 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2763 buf_layout.private_data_size = 0;
2764 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2765 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2766 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2767 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2768 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2769 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2770 DPNI_QUEUE_RX, &buf_layout);
2772 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2779 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2780 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2782 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2783 struct dpaa2_eth_fq *fq,
2784 struct dpaa2_fd *fd, u8 prio,
2785 u32 num_frames __always_unused,
2786 int *frames_enqueued)
2790 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2791 priv->tx_qdid, prio,
2793 if (!err && frames_enqueued)
2794 *frames_enqueued = 1;
2798 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
2799 struct dpaa2_eth_fq *fq,
2800 struct dpaa2_fd *fd,
2801 u8 prio, u32 num_frames,
2802 int *frames_enqueued)
2806 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
2813 if (frames_enqueued)
2814 *frames_enqueued = err;
2818 static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2820 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2821 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2822 priv->enqueue = dpaa2_eth_enqueue_qd;
2824 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
2827 static int set_pause(struct dpaa2_eth_priv *priv)
2829 struct device *dev = priv->net_dev->dev.parent;
2830 struct dpni_link_cfg link_cfg = {0};
2833 /* Get the default link options so we don't override other flags */
2834 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2836 dev_err(dev, "dpni_get_link_cfg() failed\n");
2840 /* By default, enable both Rx and Tx pause frames */
2841 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
2842 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2843 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2845 dev_err(dev, "dpni_set_link_cfg() failed\n");
2849 priv->link_state.options = link_cfg.options;
2854 static void update_tx_fqids(struct dpaa2_eth_priv *priv)
2856 struct dpni_queue_id qid = {0};
2857 struct dpaa2_eth_fq *fq;
2858 struct dpni_queue queue;
2861 /* We only use Tx FQIDs for FQID-based enqueue, so check
2862 * if DPNI version supports it before updating FQIDs
2864 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2865 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2868 for (i = 0; i < priv->num_fqs; i++) {
2870 if (fq->type != DPAA2_TX_CONF_FQ)
2872 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2873 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2874 DPNI_QUEUE_TX, j, fq->flowid,
2879 fq->tx_fqid[j] = qid.fqid;
2880 if (fq->tx_fqid[j] == 0)
2885 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
2890 netdev_info(priv->net_dev,
2891 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
2892 priv->enqueue = dpaa2_eth_enqueue_qd;
2895 /* Configure ingress classification based on VLAN PCP */
2896 static int set_vlan_qos(struct dpaa2_eth_priv *priv)
2898 struct device *dev = priv->net_dev->dev.parent;
2899 struct dpkg_profile_cfg kg_cfg = {0};
2900 struct dpni_qos_tbl_cfg qos_cfg = {0};
2901 struct dpni_rule_cfg key_params;
2902 void *dma_mem, *key, *mask;
2903 u8 key_size = 2; /* VLAN TCI field */
2906 /* VLAN-based classification only makes sense if we have multiple
2908 * Also, we need to extract just the 3-bit PCP field from the VLAN
2909 * header and we can only do that by using a mask
2911 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
2912 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
2916 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2920 kg_cfg.num_extracts = 1;
2921 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
2922 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
2923 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
2924 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
2926 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
2928 dev_err(dev, "dpni_prepare_key_cfg failed\n");
2933 qos_cfg.default_tc = 0;
2934 qos_cfg.discard_on_miss = 0;
2935 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
2936 DPAA2_CLASSIFIER_DMA_SIZE,
2938 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
2939 dev_err(dev, "QoS table DMA mapping failed\n");
2944 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
2946 dev_err(dev, "dpni_set_qos_table failed\n");
2950 /* Add QoS table entries */
2951 key = kzalloc(key_size * 2, GFP_KERNEL);
2956 mask = key + key_size;
2957 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
2959 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
2961 if (dma_mapping_error(dev, key_params.key_iova)) {
2962 dev_err(dev, "Qos table entry DMA mapping failed\n");
2967 key_params.mask_iova = key_params.key_iova + key_size;
2968 key_params.key_size = key_size;
2970 /* We add rules for PCP-based distribution starting with highest
2971 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
2972 * classes to accommodate all priority levels, the lowest ones end up
2973 * on TC 0 which was configured as default
2975 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
2976 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
2977 dma_sync_single_for_device(dev, key_params.key_iova,
2978 key_size * 2, DMA_TO_DEVICE);
2980 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
2983 dev_err(dev, "dpni_add_qos_entry failed\n");
2984 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
2989 priv->vlan_cls_enabled = true;
2991 /* Table and key memory is not persistent, clean everything up after
2992 * configuration is finished
2995 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
2999 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3007 /* Configure the DPNI object this interface is associated with */
3008 static int setup_dpni(struct fsl_mc_device *ls_dev)
3010 struct device *dev = &ls_dev->dev;
3011 struct dpaa2_eth_priv *priv;
3012 struct net_device *net_dev;
3015 net_dev = dev_get_drvdata(dev);
3016 priv = netdev_priv(net_dev);
3018 /* get a handle for the DPNI object */
3019 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3021 dev_err(dev, "dpni_open() failed\n");
3025 /* Check if we can work with this DPNI object */
3026 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3027 &priv->dpni_ver_minor);
3029 dev_err(dev, "dpni_get_api_version() failed\n");
3032 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3033 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3034 priv->dpni_ver_major, priv->dpni_ver_minor,
3035 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3040 ls_dev->mc_io = priv->mc_io;
3041 ls_dev->mc_handle = priv->mc_token;
3043 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3045 dev_err(dev, "dpni_reset() failed\n");
3049 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3052 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3056 err = set_buffer_layout(priv);
3060 set_enqueue_mode(priv);
3062 /* Enable pause frame support */
3063 if (dpaa2_eth_has_pause_support(priv)) {
3064 err = set_pause(priv);
3069 err = set_vlan_qos(priv);
3070 if (err && err != -EOPNOTSUPP)
3073 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3074 sizeof(struct dpaa2_eth_cls_rule),
3076 if (!priv->cls_rules) {
3084 dpni_close(priv->mc_io, 0, priv->mc_token);
3089 static void free_dpni(struct dpaa2_eth_priv *priv)
3093 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3095 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3098 dpni_close(priv->mc_io, 0, priv->mc_token);
3101 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
3102 struct dpaa2_eth_fq *fq)
3104 struct device *dev = priv->net_dev->dev.parent;
3105 struct dpni_queue queue;
3106 struct dpni_queue_id qid;
3109 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3110 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3112 dev_err(dev, "dpni_get_queue(RX) failed\n");
3116 fq->fqid = qid.fqid;
3118 queue.destination.id = fq->channel->dpcon_id;
3119 queue.destination.type = DPNI_DEST_DPCON;
3120 queue.destination.priority = 1;
3121 queue.user_context = (u64)(uintptr_t)fq;
3122 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3123 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3124 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3127 dev_err(dev, "dpni_set_queue(RX) failed\n");
3132 /* only once for each channel */
3136 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3139 dev_err(dev, "xdp_rxq_info_reg failed\n");
3143 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3144 MEM_TYPE_PAGE_ORDER0, NULL);
3146 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3153 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
3154 struct dpaa2_eth_fq *fq)
3156 struct device *dev = priv->net_dev->dev.parent;
3157 struct dpni_queue queue;
3158 struct dpni_queue_id qid;
3161 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3162 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3163 DPNI_QUEUE_TX, i, fq->flowid,
3166 dev_err(dev, "dpni_get_queue(TX) failed\n");
3169 fq->tx_fqid[i] = qid.fqid;
3172 /* All Tx queues belonging to the same flowid have the same qdbin */
3173 fq->tx_qdbin = qid.qdbin;
3175 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3176 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3179 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3183 fq->fqid = qid.fqid;
3185 queue.destination.id = fq->channel->dpcon_id;
3186 queue.destination.type = DPNI_DEST_DPCON;
3187 queue.destination.priority = 0;
3188 queue.user_context = (u64)(uintptr_t)fq;
3189 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3190 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3191 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3194 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3201 /* Supported header fields for Rx hash distribution key */
3202 static const struct dpaa2_eth_dist_fields dist_fields[] = {
3205 .rxnfc_field = RXH_L2DA,
3206 .cls_prot = NET_PROT_ETH,
3207 .cls_field = NH_FLD_ETH_DA,
3208 .id = DPAA2_ETH_DIST_ETHDST,
3211 .cls_prot = NET_PROT_ETH,
3212 .cls_field = NH_FLD_ETH_SA,
3213 .id = DPAA2_ETH_DIST_ETHSRC,
3216 /* This is the last ethertype field parsed:
3217 * depending on frame format, it can be the MAC ethertype
3218 * or the VLAN etype.
3220 .cls_prot = NET_PROT_ETH,
3221 .cls_field = NH_FLD_ETH_TYPE,
3222 .id = DPAA2_ETH_DIST_ETHTYPE,
3226 .rxnfc_field = RXH_VLAN,
3227 .cls_prot = NET_PROT_VLAN,
3228 .cls_field = NH_FLD_VLAN_TCI,
3229 .id = DPAA2_ETH_DIST_VLAN,
3233 .rxnfc_field = RXH_IP_SRC,
3234 .cls_prot = NET_PROT_IP,
3235 .cls_field = NH_FLD_IP_SRC,
3236 .id = DPAA2_ETH_DIST_IPSRC,
3239 .rxnfc_field = RXH_IP_DST,
3240 .cls_prot = NET_PROT_IP,
3241 .cls_field = NH_FLD_IP_DST,
3242 .id = DPAA2_ETH_DIST_IPDST,
3245 .rxnfc_field = RXH_L3_PROTO,
3246 .cls_prot = NET_PROT_IP,
3247 .cls_field = NH_FLD_IP_PROTO,
3248 .id = DPAA2_ETH_DIST_IPPROTO,
3251 /* Using UDP ports, this is functionally equivalent to raw
3252 * byte pairs from L4 header.
3254 .rxnfc_field = RXH_L4_B_0_1,
3255 .cls_prot = NET_PROT_UDP,
3256 .cls_field = NH_FLD_UDP_PORT_SRC,
3257 .id = DPAA2_ETH_DIST_L4SRC,
3260 .rxnfc_field = RXH_L4_B_2_3,
3261 .cls_prot = NET_PROT_UDP,
3262 .cls_field = NH_FLD_UDP_PORT_DST,
3263 .id = DPAA2_ETH_DIST_L4DST,
3268 /* Configure the Rx hash key using the legacy API */
3269 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3271 struct device *dev = priv->net_dev->dev.parent;
3272 struct dpni_rx_tc_dist_cfg dist_cfg;
3275 memset(&dist_cfg, 0, sizeof(dist_cfg));
3277 dist_cfg.key_cfg_iova = key;
3278 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3279 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3281 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3282 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3285 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3293 /* Configure the Rx hash key using the new API */
3294 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3296 struct device *dev = priv->net_dev->dev.parent;
3297 struct dpni_rx_dist_cfg dist_cfg;
3300 memset(&dist_cfg, 0, sizeof(dist_cfg));
3302 dist_cfg.key_cfg_iova = key;
3303 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3304 dist_cfg.enable = 1;
3306 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3308 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3311 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3319 /* Configure the Rx flow classification key */
3320 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3322 struct device *dev = priv->net_dev->dev.parent;
3323 struct dpni_rx_dist_cfg dist_cfg;
3326 memset(&dist_cfg, 0, sizeof(dist_cfg));
3328 dist_cfg.key_cfg_iova = key;
3329 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3330 dist_cfg.enable = 1;
3332 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3334 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3337 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3345 /* Size of the Rx flow classification key */
3346 int dpaa2_eth_cls_key_size(u64 fields)
3350 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3351 if (!(fields & dist_fields[i].id))
3353 size += dist_fields[i].size;
3359 /* Offset of header field in Rx classification key */
3360 int dpaa2_eth_cls_fld_off(int prot, int field)
3364 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3365 if (dist_fields[i].cls_prot == prot &&
3366 dist_fields[i].cls_field == field)
3368 off += dist_fields[i].size;
3371 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3375 /* Prune unused fields from the classification rule.
3376 * Used when masking is not supported
3378 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3380 int off = 0, new_off = 0;
3383 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3384 size = dist_fields[i].size;
3385 if (dist_fields[i].id & fields) {
3386 memcpy(key_mem + new_off, key_mem + off, size);
3393 /* Set Rx distribution (hash or flow classification) key
3394 * flags is a combination of RXH_ bits
3396 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3397 enum dpaa2_eth_rx_dist type, u64 flags)
3399 struct device *dev = net_dev->dev.parent;
3400 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3401 struct dpkg_profile_cfg cls_cfg;
3402 u32 rx_hash_fields = 0;
3403 dma_addr_t key_iova;
3408 memset(&cls_cfg, 0, sizeof(cls_cfg));
3410 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3411 struct dpkg_extract *key =
3412 &cls_cfg.extracts[cls_cfg.num_extracts];
3414 /* For both Rx hashing and classification keys
3415 * we set only the selected fields.
3417 if (!(flags & dist_fields[i].id))
3419 if (type == DPAA2_ETH_RX_DIST_HASH)
3420 rx_hash_fields |= dist_fields[i].rxnfc_field;
3422 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3423 dev_err(dev, "error adding key extraction rule, too many rules?\n");
3427 key->type = DPKG_EXTRACT_FROM_HDR;
3428 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3429 key->extract.from_hdr.type = DPKG_FULL_FIELD;
3430 key->extract.from_hdr.field = dist_fields[i].cls_field;
3431 cls_cfg.num_extracts++;
3434 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3438 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3440 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3444 /* Prepare for setting the rx dist */
3445 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3447 if (dma_mapping_error(dev, key_iova)) {
3448 dev_err(dev, "DMA mapping failed\n");
3453 if (type == DPAA2_ETH_RX_DIST_HASH) {
3454 if (dpaa2_eth_has_legacy_dist(priv))
3455 err = config_legacy_hash_key(priv, key_iova);
3457 err = config_hash_key(priv, key_iova);
3459 err = config_cls_key(priv, key_iova);
3462 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3464 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3465 priv->rx_hash_fields = rx_hash_fields;
3472 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3474 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3478 if (!dpaa2_eth_hash_enabled(priv))
3481 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3482 if (dist_fields[i].rxnfc_field & flags)
3483 key |= dist_fields[i].id;
3485 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3488 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3490 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3493 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3495 struct device *dev = priv->net_dev->dev.parent;
3498 /* Check if we actually support Rx flow classification */
3499 if (dpaa2_eth_has_legacy_dist(priv)) {
3500 dev_dbg(dev, "Rx cls not supported by current MC version\n");
3504 if (!dpaa2_eth_fs_enabled(priv)) {
3505 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3509 if (!dpaa2_eth_hash_enabled(priv)) {
3510 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3514 /* If there is no support for masking in the classification table,
3515 * we don't set a default key, as it will depend on the rules
3516 * added by the user at runtime.
3518 if (!dpaa2_eth_fs_mask_enabled(priv))
3521 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3526 priv->rx_cls_enabled = 1;
3531 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3532 * frame queues and channels
3534 static int bind_dpni(struct dpaa2_eth_priv *priv)
3536 struct net_device *net_dev = priv->net_dev;
3537 struct device *dev = net_dev->dev.parent;
3538 struct dpni_pools_cfg pools_params;
3539 struct dpni_error_cfg err_cfg;
3543 pools_params.num_dpbp = 1;
3544 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3545 pools_params.pools[0].backup_pool = 0;
3546 pools_params.pools[0].buffer_size = priv->rx_buf_size;
3547 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3549 dev_err(dev, "dpni_set_pools() failed\n");
3553 /* have the interface implicitly distribute traffic based on
3554 * the default hash key
3556 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3557 if (err && err != -EOPNOTSUPP)
3558 dev_err(dev, "Failed to configure hashing\n");
3560 /* Configure the flow classification key; it includes all
3561 * supported header fields and cannot be modified at runtime
3563 err = dpaa2_eth_set_default_cls(priv);
3564 if (err && err != -EOPNOTSUPP)
3565 dev_err(dev, "Failed to configure Rx classification key\n");
3567 /* Configure handling of error frames */
3568 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3569 err_cfg.set_frame_annotation = 1;
3570 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3571 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3574 dev_err(dev, "dpni_set_errors_behavior failed\n");
3578 /* Configure Rx and Tx conf queues to generate CDANs */
3579 for (i = 0; i < priv->num_fqs; i++) {
3580 switch (priv->fq[i].type) {
3582 err = setup_rx_flow(priv, &priv->fq[i]);
3584 case DPAA2_TX_CONF_FQ:
3585 err = setup_tx_flow(priv, &priv->fq[i]);
3588 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3595 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3596 DPNI_QUEUE_TX, &priv->tx_qdid);
3598 dev_err(dev, "dpni_get_qdid() failed\n");
3605 /* Allocate rings for storing incoming frame descriptors */
3606 static int alloc_rings(struct dpaa2_eth_priv *priv)
3608 struct net_device *net_dev = priv->net_dev;
3609 struct device *dev = net_dev->dev.parent;
3612 for (i = 0; i < priv->num_channels; i++) {
3613 priv->channel[i]->store =
3614 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3615 if (!priv->channel[i]->store) {
3616 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3624 for (i = 0; i < priv->num_channels; i++) {
3625 if (!priv->channel[i]->store)
3627 dpaa2_io_store_destroy(priv->channel[i]->store);
3633 static void free_rings(struct dpaa2_eth_priv *priv)
3637 for (i = 0; i < priv->num_channels; i++)
3638 dpaa2_io_store_destroy(priv->channel[i]->store);
3641 static int set_mac_addr(struct dpaa2_eth_priv *priv)
3643 struct net_device *net_dev = priv->net_dev;
3644 struct device *dev = net_dev->dev.parent;
3645 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3648 /* Get firmware address, if any */
3649 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3651 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3655 /* Get DPNI attributes address, if any */
3656 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3659 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3663 /* First check if firmware has any address configured by bootloader */
3664 if (!is_zero_ether_addr(mac_addr)) {
3665 /* If the DPMAC addr != DPNI addr, update it */
3666 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3667 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3671 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3675 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3676 } else if (is_zero_ether_addr(dpni_mac_addr)) {
3677 /* No MAC address configured, fill in net_dev->dev_addr
3680 eth_hw_addr_random(net_dev);
3681 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3683 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3686 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3690 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3691 * practical purposes, this will be our "permanent" mac address,
3692 * at least until the next reboot. This move will also permit
3693 * register_netdevice() to properly fill up net_dev->perm_addr.
3695 net_dev->addr_assign_type = NET_ADDR_PERM;
3697 /* NET_ADDR_PERM is default, all we have to do is
3698 * fill in the device addr.
3700 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3706 static int netdev_init(struct net_device *net_dev)
3708 struct device *dev = net_dev->dev.parent;
3709 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3710 u32 options = priv->dpni_attrs.options;
3711 u64 supported = 0, not_supported = 0;
3712 u8 bcast_addr[ETH_ALEN];
3716 net_dev->netdev_ops = &dpaa2_eth_ops;
3717 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3719 err = set_mac_addr(priv);
3723 /* Explicitly add the broadcast address to the MAC filtering table */
3724 eth_broadcast_addr(bcast_addr);
3725 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3727 dev_err(dev, "dpni_add_mac_addr() failed\n");
3731 /* Set MTU upper limit; lower limit is 68B (default value) */
3732 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3733 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3736 dev_err(dev, "dpni_set_max_frame_length() failed\n");
3740 /* Set actual number of queues in the net device */
3741 num_queues = dpaa2_eth_queue_count(priv);
3742 err = netif_set_real_num_tx_queues(net_dev, num_queues);
3744 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3747 err = netif_set_real_num_rx_queues(net_dev, num_queues);
3749 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3753 /* Capabilities listing */
3754 supported |= IFF_LIVE_ADDR_CHANGE;
3756 if (options & DPNI_OPT_NO_MAC_FILTER)
3757 not_supported |= IFF_UNICAST_FLT;
3759 supported |= IFF_UNICAST_FLT;
3761 net_dev->priv_flags |= supported;
3762 net_dev->priv_flags &= ~not_supported;
3765 net_dev->features = NETIF_F_RXCSUM |
3766 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3767 NETIF_F_SG | NETIF_F_HIGHDMA |
3768 NETIF_F_LLTX | NETIF_F_HW_TC;
3769 net_dev->hw_features = net_dev->features;
3774 static int poll_link_state(void *arg)
3776 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3779 while (!kthread_should_stop()) {
3780 err = link_state_update(priv);
3784 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3790 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
3792 struct fsl_mc_device *dpni_dev, *dpmac_dev;
3793 struct dpaa2_mac *mac;
3796 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
3797 dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
3798 if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
3801 if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
3804 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
3808 mac->mc_dev = dpmac_dev;
3809 mac->mc_io = priv->mc_io;
3810 mac->net_dev = priv->net_dev;
3812 err = dpaa2_mac_connect(mac);
3814 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
3823 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
3828 dpaa2_mac_disconnect(priv->mac);
3833 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3836 struct device *dev = (struct device *)arg;
3837 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3838 struct net_device *net_dev = dev_get_drvdata(dev);
3839 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3842 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3843 DPNI_IRQ_INDEX, &status);
3844 if (unlikely(err)) {
3845 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3849 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3850 link_state_update(netdev_priv(net_dev));
3852 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
3853 set_mac_addr(netdev_priv(net_dev));
3854 update_tx_fqids(priv);
3858 dpaa2_eth_disconnect_mac(priv);
3860 dpaa2_eth_connect_mac(priv);
3867 static int setup_irqs(struct fsl_mc_device *ls_dev)
3870 struct fsl_mc_device_irq *irq;
3872 err = fsl_mc_allocate_irqs(ls_dev);
3874 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3878 irq = ls_dev->irqs[0];
3879 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3880 NULL, dpni_irq0_handler_thread,
3881 IRQF_NO_SUSPEND | IRQF_ONESHOT,
3882 dev_name(&ls_dev->dev), &ls_dev->dev);
3884 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3888 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3889 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
3890 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
3892 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3896 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3899 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3906 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3908 fsl_mc_free_irqs(ls_dev);
3913 static void add_ch_napi(struct dpaa2_eth_priv *priv)
3916 struct dpaa2_eth_channel *ch;
3918 for (i = 0; i < priv->num_channels; i++) {
3919 ch = priv->channel[i];
3920 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3921 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3926 static void del_ch_napi(struct dpaa2_eth_priv *priv)
3929 struct dpaa2_eth_channel *ch;
3931 for (i = 0; i < priv->num_channels; i++) {
3932 ch = priv->channel[i];
3933 netif_napi_del(&ch->napi);
3937 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3940 struct net_device *net_dev = NULL;
3941 struct dpaa2_eth_priv *priv = NULL;
3944 dev = &dpni_dev->dev;
3947 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3949 dev_err(dev, "alloc_etherdev_mq() failed\n");
3953 SET_NETDEV_DEV(net_dev, dev);
3954 dev_set_drvdata(dev, net_dev);
3956 priv = netdev_priv(net_dev);
3957 priv->net_dev = net_dev;
3959 priv->iommu_domain = iommu_get_domain_for_dev(dev);
3961 /* Obtain a MC portal */
3962 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3966 err = -EPROBE_DEFER;
3968 dev_err(dev, "MC portal allocation failed\n");
3969 goto err_portal_alloc;
3972 /* MC objects initialization and configuration */
3973 err = setup_dpni(dpni_dev);
3975 goto err_dpni_setup;
3977 err = setup_dpio(priv);
3979 goto err_dpio_setup;
3983 err = setup_dpbp(priv);
3985 goto err_dpbp_setup;
3987 err = bind_dpni(priv);
3991 /* Add a NAPI context for each channel */
3994 /* Percpu statistics */
3995 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3996 if (!priv->percpu_stats) {
3997 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3999 goto err_alloc_percpu_stats;
4001 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4002 if (!priv->percpu_extras) {
4003 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4005 goto err_alloc_percpu_extras;
4008 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4009 if (!priv->sgt_cache) {
4010 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4012 goto err_alloc_sgt_cache;
4015 err = netdev_init(net_dev);
4017 goto err_netdev_init;
4019 /* Configure checksum offload based on current interface flags */
4020 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4024 err = set_tx_csum(priv, !!(net_dev->features &
4025 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4029 err = alloc_rings(priv);
4031 goto err_alloc_rings;
4033 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4034 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4035 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4036 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4038 dev_dbg(dev, "PFC not supported\n");
4042 err = setup_irqs(dpni_dev);
4044 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4045 priv->poll_thread = kthread_run(poll_link_state, priv,
4046 "%s_poll_link", net_dev->name);
4047 if (IS_ERR(priv->poll_thread)) {
4048 dev_err(dev, "Error starting polling thread\n");
4049 goto err_poll_thread;
4051 priv->do_link_poll = true;
4054 err = dpaa2_eth_connect_mac(priv);
4056 goto err_connect_mac;
4058 err = register_netdev(net_dev);
4060 dev_err(dev, "register_netdev() failed\n");
4061 goto err_netdev_reg;
4064 #ifdef CONFIG_DEBUG_FS
4065 dpaa2_dbg_add(priv);
4068 dev_info(dev, "Probed interface %s\n", net_dev->name);
4072 dpaa2_eth_disconnect_mac(priv);
4074 if (priv->do_link_poll)
4075 kthread_stop(priv->poll_thread);
4077 fsl_mc_free_irqs(dpni_dev);
4083 free_percpu(priv->sgt_cache);
4084 err_alloc_sgt_cache:
4085 free_percpu(priv->percpu_extras);
4086 err_alloc_percpu_extras:
4087 free_percpu(priv->percpu_stats);
4088 err_alloc_percpu_stats:
4097 fsl_mc_portal_free(priv->mc_io);
4099 dev_set_drvdata(dev, NULL);
4100 free_netdev(net_dev);
4105 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4108 struct net_device *net_dev;
4109 struct dpaa2_eth_priv *priv;
4112 net_dev = dev_get_drvdata(dev);
4113 priv = netdev_priv(net_dev);
4115 #ifdef CONFIG_DEBUG_FS
4116 dpaa2_dbg_remove(priv);
4119 dpaa2_eth_disconnect_mac(priv);
4122 unregister_netdev(net_dev);
4124 if (priv->do_link_poll)
4125 kthread_stop(priv->poll_thread);
4127 fsl_mc_free_irqs(ls_dev);
4130 free_percpu(priv->sgt_cache);
4131 free_percpu(priv->percpu_stats);
4132 free_percpu(priv->percpu_extras);
4139 fsl_mc_portal_free(priv->mc_io);
4141 free_netdev(net_dev);
4143 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4148 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4150 .vendor = FSL_MC_VENDOR_FREESCALE,
4155 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4157 static struct fsl_mc_driver dpaa2_eth_driver = {
4159 .name = KBUILD_MODNAME,
4160 .owner = THIS_MODULE,
4162 .probe = dpaa2_eth_probe,
4163 .remove = dpaa2_eth_remove,
4164 .match_id_table = dpaa2_eth_match_id_table
4167 static int __init dpaa2_eth_driver_init(void)
4171 dpaa2_eth_dbg_init();
4172 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4174 dpaa2_eth_dbg_exit();
4181 static void __exit dpaa2_eth_driver_exit(void)
4183 dpaa2_eth_dbg_exit();
4184 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4187 module_init(dpaa2_eth_driver_init);
4188 module_exit(dpaa2_eth_driver_exit);