1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2020 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/fsl/ptp_qoriq.h>
18 #include <linux/ptp_classify.h>
19 #include <net/pkt_cls.h>
22 #include "dpaa2-eth.h"
24 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25 * using trace events only need to #include <trace/events/sched.h>
27 #define CREATE_TRACE_POINTS
28 #include "dpaa2-eth-trace.h"
30 MODULE_LICENSE("Dual BSD/GPL");
31 MODULE_AUTHOR("Freescale Semiconductor, Inc");
32 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34 struct ptp_qoriq *dpaa2_ptp;
35 EXPORT_SYMBOL(dpaa2_ptp);
37 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
40 phys_addr_t phys_addr;
42 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
44 return phys_to_virt(phys_addr);
47 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
51 skb_checksum_none_assert(skb);
53 /* HW checksum validation is disabled, nothing to do here */
54 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
57 /* Read checksum validation bits */
58 if (!((fd_status & DPAA2_FAS_L3CV) &&
59 (fd_status & DPAA2_FAS_L4CV)))
62 /* Inform the stack there's no need to compute L3/L4 csum anymore */
63 skb->ip_summed = CHECKSUM_UNNECESSARY;
66 /* Free a received FD.
67 * Not to be used for Tx conf FDs or on any other paths.
69 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
70 const struct dpaa2_fd *fd,
73 struct device *dev = priv->net_dev->dev.parent;
74 dma_addr_t addr = dpaa2_fd_get_addr(fd);
75 u8 fd_format = dpaa2_fd_get_format(fd);
76 struct dpaa2_sg_entry *sgt;
80 /* If single buffer frame, just free the data buffer */
81 if (fd_format == dpaa2_fd_single)
83 else if (fd_format != dpaa2_fd_sg)
84 /* We don't support any other format */
87 /* For S/G frames, we first need to free all SG entries
88 * except the first one, which was taken care of already
90 sgt = vaddr + dpaa2_fd_get_offset(fd);
91 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
92 addr = dpaa2_sg_get_addr(&sgt[i]);
93 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
94 dma_unmap_page(dev, addr, priv->rx_buf_size,
97 free_pages((unsigned long)sg_vaddr, 0);
98 if (dpaa2_sg_is_final(&sgt[i]))
103 free_pages((unsigned long)vaddr, 0);
106 /* Build a linear skb based on a single-buffer frame descriptor */
107 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
108 const struct dpaa2_fd *fd,
111 struct sk_buff *skb = NULL;
112 u16 fd_offset = dpaa2_fd_get_offset(fd);
113 u32 fd_length = dpaa2_fd_get_len(fd);
117 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
121 skb_reserve(skb, fd_offset);
122 skb_put(skb, fd_length);
127 /* Build a non linear (fragmented) skb based on a S/G table */
128 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
129 struct dpaa2_eth_channel *ch,
130 struct dpaa2_sg_entry *sgt)
132 struct sk_buff *skb = NULL;
133 struct device *dev = priv->net_dev->dev.parent;
138 struct page *page, *head_page;
142 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
143 struct dpaa2_sg_entry *sge = &sgt[i];
145 /* NOTE: We only support SG entries in dpaa2_sg_single format,
146 * but this is the only format we may receive from HW anyway
149 /* Get the address and length from the S/G entry */
150 sg_addr = dpaa2_sg_get_addr(sge);
151 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
152 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
155 sg_length = dpaa2_sg_get_len(sge);
158 /* We build the skb around the first data buffer */
159 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
160 if (unlikely(!skb)) {
161 /* Free the first SG entry now, since we already
162 * unmapped it and obtained the virtual address
164 free_pages((unsigned long)sg_vaddr, 0);
166 /* We still need to subtract the buffers used
167 * by this FD from our software counter
169 while (!dpaa2_sg_is_final(&sgt[i]) &&
170 i < DPAA2_ETH_MAX_SG_ENTRIES)
175 sg_offset = dpaa2_sg_get_offset(sge);
176 skb_reserve(skb, sg_offset);
177 skb_put(skb, sg_length);
179 /* Rest of the data buffers are stored as skb frags */
180 page = virt_to_page(sg_vaddr);
181 head_page = virt_to_head_page(sg_vaddr);
183 /* Offset in page (which may be compound).
184 * Data in subsequent SG entries is stored from the
185 * beginning of the buffer, so we don't need to add the
188 page_offset = ((unsigned long)sg_vaddr &
190 (page_address(page) - page_address(head_page));
192 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
193 sg_length, priv->rx_buf_size);
196 if (dpaa2_sg_is_final(sge))
200 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
202 /* Count all data buffers + SG table buffer */
203 ch->buf_count -= i + 2;
208 /* Free buffers acquired from the buffer pool or which were meant to
209 * be released in the pool
211 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
214 struct device *dev = priv->net_dev->dev.parent;
218 for (i = 0; i < count; i++) {
219 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
220 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
222 free_pages((unsigned long)vaddr, 0);
226 static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
227 struct dpaa2_eth_channel *ch,
233 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
234 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
237 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
239 ch->recycled_bufs_cnt)) == -EBUSY) {
240 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
246 dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
247 ch->buf_count -= ch->recycled_bufs_cnt;
250 ch->recycled_bufs_cnt = 0;
253 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
254 struct dpaa2_eth_fq *fq,
255 struct dpaa2_eth_xdp_fds *xdp_fds)
257 int total_enqueued = 0, retries = 0, enqueued;
258 struct dpaa2_eth_drv_stats *percpu_extras;
259 int num_fds, err, max_retries;
260 struct dpaa2_fd *fds;
262 percpu_extras = this_cpu_ptr(priv->percpu_extras);
264 /* try to enqueue all the FDs until the max number of retries is hit */
266 num_fds = xdp_fds->num;
267 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
268 while (total_enqueued < num_fds && retries < max_retries) {
269 err = priv->enqueue(priv, fq, &fds[total_enqueued],
270 0, num_fds - total_enqueued, &enqueued);
272 percpu_extras->tx_portal_busy += ++retries;
275 total_enqueued += enqueued;
279 return total_enqueued;
282 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
283 struct dpaa2_eth_channel *ch,
284 struct dpaa2_eth_fq *fq)
286 struct rtnl_link_stats64 *percpu_stats;
287 struct dpaa2_fd *fds;
290 percpu_stats = this_cpu_ptr(priv->percpu_stats);
292 // enqueue the array of XDP_TX frames
293 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
295 /* update statistics */
296 percpu_stats->tx_packets += enqueued;
297 fds = fq->xdp_tx_fds.fds;
298 for (i = 0; i < enqueued; i++) {
299 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
302 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
303 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
304 percpu_stats->tx_errors++;
305 ch->stats.xdp_tx_err++;
307 fq->xdp_tx_fds.num = 0;
310 static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
311 struct dpaa2_eth_channel *ch,
313 void *buf_start, u16 queue_id)
315 struct dpaa2_faead *faead;
316 struct dpaa2_fd *dest_fd;
317 struct dpaa2_eth_fq *fq;
320 /* Mark the egress frame hardware annotation area as valid */
321 frc = dpaa2_fd_get_frc(fd);
322 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
323 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
325 /* Instruct hardware to release the FD buffer directly into
326 * the buffer pool once transmission is completed, instead of
327 * sending a Tx confirmation frame to us
329 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
330 faead = dpaa2_get_faead(buf_start, false);
331 faead->ctrl = cpu_to_le32(ctrl);
332 faead->conf_fqid = 0;
334 fq = &priv->fq[queue_id];
335 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
336 memcpy(dest_fd, fd, sizeof(*dest_fd));
338 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
341 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
344 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
345 struct dpaa2_eth_channel *ch,
346 struct dpaa2_eth_fq *rx_fq,
347 struct dpaa2_fd *fd, void *vaddr)
349 dma_addr_t addr = dpaa2_fd_get_addr(fd);
350 struct bpf_prog *xdp_prog;
352 u32 xdp_act = XDP_PASS;
355 xdp_prog = READ_ONCE(ch->xdp.prog);
359 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
360 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
361 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
362 dpaa2_fd_get_len(fd), false);
364 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
366 /* xdp.data pointer may have changed */
367 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
368 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
374 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
377 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
380 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
383 dpaa2_eth_recycle_buf(priv, ch, addr);
384 ch->stats.xdp_drop++;
387 dma_unmap_page(priv->net_dev->dev.parent, addr,
388 priv->rx_buf_size, DMA_BIDIRECTIONAL);
391 /* Allow redirect use of full headroom */
392 xdp.data_hard_start = vaddr;
393 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
395 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
397 addr = dma_map_page(priv->net_dev->dev.parent,
398 virt_to_page(vaddr), 0,
399 priv->rx_buf_size, DMA_BIDIRECTIONAL);
400 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
401 free_pages((unsigned long)vaddr, 0);
404 dpaa2_eth_recycle_buf(priv, ch, addr);
406 ch->stats.xdp_drop++;
408 ch->stats.xdp_redirect++;
413 ch->xdp.res |= xdp_act;
418 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
419 const struct dpaa2_fd *fd,
422 u16 fd_offset = dpaa2_fd_get_offset(fd);
423 struct dpaa2_eth_priv *priv = ch->priv;
424 u32 fd_length = dpaa2_fd_get_len(fd);
425 struct sk_buff *skb = NULL;
426 unsigned int skb_len;
428 if (fd_length > priv->rx_copybreak)
431 skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
433 skb = napi_alloc_skb(&ch->napi, skb_len);
437 skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
438 skb_put(skb, fd_length);
440 memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
442 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
447 /* Main Rx frame processing routine */
448 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
449 struct dpaa2_eth_channel *ch,
450 const struct dpaa2_fd *fd,
451 struct dpaa2_eth_fq *fq)
453 dma_addr_t addr = dpaa2_fd_get_addr(fd);
454 u8 fd_format = dpaa2_fd_get_format(fd);
457 struct rtnl_link_stats64 *percpu_stats;
458 struct dpaa2_eth_drv_stats *percpu_extras;
459 struct device *dev = priv->net_dev->dev.parent;
460 struct dpaa2_fas *fas;
466 trace_dpaa2_rx_fd(priv->net_dev, fd);
468 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
469 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
472 fas = dpaa2_get_fas(vaddr, false);
474 buf_data = vaddr + dpaa2_fd_get_offset(fd);
477 percpu_stats = this_cpu_ptr(priv->percpu_stats);
478 percpu_extras = this_cpu_ptr(priv->percpu_extras);
480 if (fd_format == dpaa2_fd_single) {
481 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
482 if (xdp_act != XDP_PASS) {
483 percpu_stats->rx_packets++;
484 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
488 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
490 dma_unmap_page(dev, addr, priv->rx_buf_size,
492 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
494 } else if (fd_format == dpaa2_fd_sg) {
495 WARN_ON(priv->xdp_prog);
497 dma_unmap_page(dev, addr, priv->rx_buf_size,
499 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
500 free_pages((unsigned long)vaddr, 0);
501 percpu_extras->rx_sg_frames++;
502 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
504 /* We don't support any other format */
505 goto err_frame_format;
513 /* Get the timestamp value */
514 if (priv->rx_tstamp) {
515 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
516 __le64 *ts = dpaa2_get_ts(vaddr, false);
519 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
521 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
522 shhwtstamps->hwtstamp = ns_to_ktime(ns);
525 /* Check if we need to validate the L4 csum */
526 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
527 status = le32_to_cpu(fas->status);
528 dpaa2_eth_validate_rx_csum(priv, status, skb);
531 skb->protocol = eth_type_trans(skb, priv->net_dev);
532 skb_record_rx_queue(skb, fq->flowid);
534 percpu_stats->rx_packets++;
535 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
536 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
538 list_add_tail(&skb->list, ch->rx_list);
543 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
545 percpu_stats->rx_dropped++;
548 /* Processing of Rx frames received on the error FQ
549 * We check and print the error bits and then free the frame
551 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
552 struct dpaa2_eth_channel *ch,
553 const struct dpaa2_fd *fd,
554 struct dpaa2_eth_fq *fq __always_unused)
556 struct device *dev = priv->net_dev->dev.parent;
557 dma_addr_t addr = dpaa2_fd_get_addr(fd);
558 u8 fd_format = dpaa2_fd_get_format(fd);
559 struct rtnl_link_stats64 *percpu_stats;
560 struct dpaa2_eth_trap_item *trap_item;
561 struct dpaa2_fapr *fapr;
566 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
567 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
570 buf_data = vaddr + dpaa2_fd_get_offset(fd);
572 if (fd_format == dpaa2_fd_single) {
573 dma_unmap_page(dev, addr, priv->rx_buf_size,
575 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
576 } else if (fd_format == dpaa2_fd_sg) {
577 dma_unmap_page(dev, addr, priv->rx_buf_size,
579 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
580 free_pages((unsigned long)vaddr, 0);
582 /* We don't support any other format */
583 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
584 goto err_frame_format;
587 fapr = dpaa2_get_fapr(vaddr, false);
588 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
590 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
591 &priv->devlink_port, NULL);
595 percpu_stats = this_cpu_ptr(priv->percpu_stats);
596 percpu_stats->rx_errors++;
600 /* Consume all frames pull-dequeued into the store. This is the simplest way to
601 * make sure we don't accidentally issue another volatile dequeue which would
602 * overwrite (leak) frames already in the store.
604 * Observance of NAPI budget is not our concern, leaving that to the caller.
606 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
607 struct dpaa2_eth_fq **src)
609 struct dpaa2_eth_priv *priv = ch->priv;
610 struct dpaa2_eth_fq *fq = NULL;
612 const struct dpaa2_fd *fd;
613 int cleaned = 0, retries = 0;
617 dq = dpaa2_io_store_next(ch->store, &is_last);
619 /* If we're here, we *must* have placed a
620 * volatile dequeue comnmand, so keep reading through
621 * the store until we get some sort of valid response
622 * token (either a valid frame or an "empty dequeue")
624 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
625 netdev_err_once(priv->net_dev,
626 "Unable to read a valid dequeue response\n");
632 fd = dpaa2_dq_fd(dq);
633 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
635 fq->consume(priv, ch, fd, fq);
643 fq->stats.frames += cleaned;
644 ch->stats.frames += cleaned;
645 ch->stats.frames_per_cdan += cleaned;
647 /* A dequeue operation only pulls frames from a single queue
648 * into the store. Return the frame queue as an out param.
656 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
657 u8 *msgtype, u8 *twostep, u8 *udp,
658 u16 *correction_offset,
659 u16 *origintimestamp_offset)
661 unsigned int ptp_class;
662 struct ptp_header *hdr;
666 ptp_class = ptp_classify_raw(skb);
667 if (ptp_class == PTP_CLASS_NONE)
670 hdr = ptp_parse_header(skb, ptp_class);
674 *msgtype = ptp_get_msgtype(hdr, ptp_class);
675 *twostep = hdr->flag_field[0] & 0x2;
677 type = ptp_class & PTP_CLASS_PMASK;
678 if (type == PTP_CLASS_IPV4 ||
679 type == PTP_CLASS_IPV6)
684 base = skb_mac_header(skb);
685 *correction_offset = (u8 *)&hdr->correction - base;
686 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
691 /* Configure the egress frame annotation for timestamp update */
692 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
697 struct ptp_tstamp origin_timestamp;
698 struct dpni_single_step_cfg cfg;
699 u8 msgtype, twostep, udp;
700 struct dpaa2_faead *faead;
701 struct dpaa2_fas *fas;
702 struct timespec64 ts;
703 u16 offset1, offset2;
708 /* Mark the egress frame annotation area as valid */
709 frc = dpaa2_fd_get_frc(fd);
710 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
712 /* Set hardware annotation size */
713 ctrl = dpaa2_fd_get_ctrl(fd);
714 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
716 /* enable UPD (update prepanded data) bit in FAEAD field of
717 * hardware frame annotation area
719 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
720 faead = dpaa2_get_faead(buf_start, true);
721 faead->ctrl = cpu_to_le32(ctrl);
723 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
724 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
725 &offset1, &offset2) ||
726 msgtype != PTP_MSGTYPE_SYNC || twostep) {
727 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
731 /* Mark the frame annotation status as valid */
732 frc = dpaa2_fd_get_frc(fd);
733 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
735 /* Mark the PTP flag for one step timestamping */
736 fas = dpaa2_get_fas(buf_start, true);
737 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
739 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
740 ns = dpaa2_get_ts(buf_start, true);
741 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
742 DPAA2_PTP_CLK_PERIOD_NS);
744 /* Update current time to PTP message originTimestamp field */
745 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
746 data = skb_mac_header(skb);
747 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
748 *(__be32 *)(data + offset2 + 2) =
749 htonl(origin_timestamp.sec_lsb);
750 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
754 cfg.offset = offset1;
757 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
759 WARN_ONCE(1, "Failed to set single step register");
763 /* Create a frame descriptor based on a fragmented skb */
764 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
769 struct device *dev = priv->net_dev->dev.parent;
770 void *sgt_buf = NULL;
772 int nr_frags = skb_shinfo(skb)->nr_frags;
773 struct dpaa2_sg_entry *sgt;
776 struct scatterlist *scl, *crt_scl;
779 struct dpaa2_eth_swa *swa;
781 /* Create and map scatterlist.
782 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
783 * to go beyond nr_frags+1.
784 * Note: We don't support chained scatterlists
786 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
789 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
793 sg_init_table(scl, nr_frags + 1);
794 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
795 if (unlikely(num_sg < 0)) {
797 goto dma_map_sg_failed;
799 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
800 if (unlikely(!num_dma_bufs)) {
802 goto dma_map_sg_failed;
805 /* Prepare the HW SGT structure */
806 sgt_buf_size = priv->tx_data_offset +
807 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
808 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
809 if (unlikely(!sgt_buf)) {
811 goto sgt_buf_alloc_failed;
813 memset(sgt_buf, 0, sgt_buf_size);
815 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
817 /* Fill in the HW SGT structure.
819 * sgt_buf is zeroed out, so the following fields are implicit
820 * in all sgt entries:
822 * - format is 'dpaa2_sg_single'
824 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
825 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
826 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
828 dpaa2_sg_set_final(&sgt[i - 1], true);
830 /* Store the skb backpointer in the SGT buffer.
831 * Fit the scatterlist and the number of buffers alongside the
832 * skb backpointer in the software annotation area. We'll need
833 * all of them on Tx Conf.
835 *swa_addr = (void *)sgt_buf;
836 swa = (struct dpaa2_eth_swa *)sgt_buf;
837 swa->type = DPAA2_ETH_SWA_SG;
840 swa->sg.num_sg = num_sg;
841 swa->sg.sgt_size = sgt_buf_size;
843 /* Separately map the SGT buffer */
844 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
845 if (unlikely(dma_mapping_error(dev, addr))) {
847 goto dma_map_single_failed;
849 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
850 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
851 dpaa2_fd_set_addr(fd, addr);
852 dpaa2_fd_set_len(fd, skb->len);
853 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
857 dma_map_single_failed:
858 skb_free_frag(sgt_buf);
859 sgt_buf_alloc_failed:
860 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
866 /* Create a SG frame descriptor based on a linear skb.
868 * This function is used on the Tx path when the skb headroom is not large
869 * enough for the HW requirements, thus instead of realloc-ing the skb we
870 * create a SG frame descriptor with only one entry.
872 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
877 struct device *dev = priv->net_dev->dev.parent;
878 struct dpaa2_eth_sgt_cache *sgt_cache;
879 struct dpaa2_sg_entry *sgt;
880 struct dpaa2_eth_swa *swa;
881 dma_addr_t addr, sgt_addr;
882 void *sgt_buf = NULL;
886 /* Prepare the HW SGT structure */
887 sgt_cache = this_cpu_ptr(priv->sgt_cache);
888 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
890 if (sgt_cache->count == 0)
891 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
894 sgt_buf = sgt_cache->buf[--sgt_cache->count];
895 if (unlikely(!sgt_buf))
898 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
899 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
901 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
902 if (unlikely(dma_mapping_error(dev, addr))) {
904 goto data_map_failed;
907 /* Fill in the HW SGT structure */
908 dpaa2_sg_set_addr(sgt, addr);
909 dpaa2_sg_set_len(sgt, skb->len);
910 dpaa2_sg_set_final(sgt, true);
912 /* Store the skb backpointer in the SGT buffer */
913 *swa_addr = (void *)sgt_buf;
914 swa = (struct dpaa2_eth_swa *)sgt_buf;
915 swa->type = DPAA2_ETH_SWA_SINGLE;
916 swa->single.skb = skb;
917 swa->single.sgt_size = sgt_buf_size;
919 /* Separately map the SGT buffer */
920 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
921 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
926 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
927 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
928 dpaa2_fd_set_addr(fd, sgt_addr);
929 dpaa2_fd_set_len(fd, skb->len);
930 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
935 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
937 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
940 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
945 /* Create a frame descriptor based on a linear skb */
946 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
951 struct device *dev = priv->net_dev->dev.parent;
952 u8 *buffer_start, *aligned_start;
953 struct dpaa2_eth_swa *swa;
956 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
958 /* If there's enough room to align the FD address, do it.
959 * It will help hardware optimize accesses.
961 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
962 DPAA2_ETH_TX_BUF_ALIGN);
963 if (aligned_start >= skb->head)
964 buffer_start = aligned_start;
966 /* Store a backpointer to the skb at the beginning of the buffer
967 * (in the private data area) such that we can release it
970 *swa_addr = (void *)buffer_start;
971 swa = (struct dpaa2_eth_swa *)buffer_start;
972 swa->type = DPAA2_ETH_SWA_SINGLE;
973 swa->single.skb = skb;
975 addr = dma_map_single(dev, buffer_start,
976 skb_tail_pointer(skb) - buffer_start,
978 if (unlikely(dma_mapping_error(dev, addr)))
981 dpaa2_fd_set_addr(fd, addr);
982 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
983 dpaa2_fd_set_len(fd, skb->len);
984 dpaa2_fd_set_format(fd, dpaa2_fd_single);
985 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
990 /* FD freeing routine on the Tx path
992 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
993 * back-pointed to is also freed.
994 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
997 static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
998 struct dpaa2_eth_fq *fq,
999 const struct dpaa2_fd *fd, bool in_napi)
1001 struct device *dev = priv->net_dev->dev.parent;
1002 dma_addr_t fd_addr, sg_addr;
1003 struct sk_buff *skb = NULL;
1004 unsigned char *buffer_start;
1005 struct dpaa2_eth_swa *swa;
1006 u8 fd_format = dpaa2_fd_get_format(fd);
1007 u32 fd_len = dpaa2_fd_get_len(fd);
1009 struct dpaa2_eth_sgt_cache *sgt_cache;
1010 struct dpaa2_sg_entry *sgt;
1012 fd_addr = dpaa2_fd_get_addr(fd);
1013 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1014 swa = (struct dpaa2_eth_swa *)buffer_start;
1016 if (fd_format == dpaa2_fd_single) {
1017 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1018 skb = swa->single.skb;
1019 /* Accessing the skb buffer is safe before dma unmap,
1020 * because we didn't map the actual skb shell.
1022 dma_unmap_single(dev, fd_addr,
1023 skb_tail_pointer(skb) - buffer_start,
1026 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1027 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1030 } else if (fd_format == dpaa2_fd_sg) {
1031 if (swa->type == DPAA2_ETH_SWA_SG) {
1034 /* Unmap the scatterlist */
1035 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1039 /* Unmap the SGT buffer */
1040 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1043 skb = swa->single.skb;
1045 /* Unmap the SGT Buffer */
1046 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1049 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1050 priv->tx_data_offset);
1051 sg_addr = dpaa2_sg_get_addr(sgt);
1052 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1055 netdev_dbg(priv->net_dev, "Invalid FD format\n");
1059 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1061 fq->dq_bytes += fd_len;
1064 if (swa->type == DPAA2_ETH_SWA_XDP) {
1065 xdp_return_frame(swa->xdp.xdpf);
1069 /* Get the timestamp value */
1070 if (skb->cb[0] == TX_TSTAMP) {
1071 struct skb_shared_hwtstamps shhwtstamps;
1072 __le64 *ts = dpaa2_get_ts(buffer_start, true);
1075 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1077 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1078 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1079 skb_tstamp_tx(skb, &shhwtstamps);
1080 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1081 mutex_unlock(&priv->onestep_tstamp_lock);
1084 /* Free SGT buffer allocated on tx */
1085 if (fd_format != dpaa2_fd_single) {
1086 sgt_cache = this_cpu_ptr(priv->sgt_cache);
1087 if (swa->type == DPAA2_ETH_SWA_SG) {
1088 skb_free_frag(buffer_start);
1090 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
1091 kfree(buffer_start);
1093 sgt_cache->buf[sgt_cache->count++] = buffer_start;
1097 /* Move on with skb release */
1098 napi_consume_skb(skb, in_napi);
1101 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1102 struct net_device *net_dev)
1104 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1106 struct rtnl_link_stats64 *percpu_stats;
1107 struct dpaa2_eth_drv_stats *percpu_extras;
1108 struct dpaa2_eth_fq *fq;
1109 struct netdev_queue *nq;
1111 unsigned int needed_headroom;
1117 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1118 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1120 needed_headroom = dpaa2_eth_needed_headroom(skb);
1122 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1123 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1125 skb = skb_unshare(skb, GFP_ATOMIC);
1126 if (unlikely(!skb)) {
1127 /* skb_unshare() has already freed the skb */
1128 percpu_stats->tx_dropped++;
1129 return NETDEV_TX_OK;
1132 /* Setup the FD fields */
1133 memset(&fd, 0, sizeof(fd));
1135 if (skb_is_nonlinear(skb)) {
1136 err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
1137 percpu_extras->tx_sg_frames++;
1138 percpu_extras->tx_sg_bytes += skb->len;
1139 } else if (skb_headroom(skb) < needed_headroom) {
1140 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
1141 percpu_extras->tx_sg_frames++;
1142 percpu_extras->tx_sg_bytes += skb->len;
1143 percpu_extras->tx_converted_sg_frames++;
1144 percpu_extras->tx_converted_sg_bytes += skb->len;
1146 err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
1149 if (unlikely(err)) {
1150 percpu_stats->tx_dropped++;
1155 dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
1158 trace_dpaa2_tx_fd(net_dev, &fd);
1160 /* TxConf FQ selection relies on queue id from the stack.
1161 * In case of a forwarded frame from another DPNI interface, we choose
1162 * a queue affined to the same core that processed the Rx frame
1164 queue_mapping = skb_get_queue_mapping(skb);
1166 if (net_dev->num_tc) {
1167 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1168 /* Hardware interprets priority level 0 as being the highest,
1169 * so we need to do a reverse mapping to the netdev tc index
1171 prio = net_dev->num_tc - prio - 1;
1172 /* We have only one FQ array entry for all Tx hardware queues
1173 * with the same flow id (but different priority levels)
1175 queue_mapping %= dpaa2_eth_queue_count(priv);
1177 fq = &priv->fq[queue_mapping];
1179 fd_len = dpaa2_fd_get_len(&fd);
1180 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1181 netdev_tx_sent_queue(nq, fd_len);
1183 /* Everything that happens after this enqueues might race with
1184 * the Tx confirmation callback for this frame
1186 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1187 err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
1191 percpu_extras->tx_portal_busy += i;
1192 if (unlikely(err < 0)) {
1193 percpu_stats->tx_errors++;
1194 /* Clean up everything, including freeing the skb */
1195 dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
1196 netdev_tx_completed_queue(nq, 1, fd_len);
1198 percpu_stats->tx_packets++;
1199 percpu_stats->tx_bytes += fd_len;
1202 return NETDEV_TX_OK;
1207 return NETDEV_TX_OK;
1210 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1212 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1214 struct sk_buff *skb;
1217 skb = skb_dequeue(&priv->tx_skbs);
1221 /* Lock just before TX one-step timestamping packet,
1222 * and release the lock in dpaa2_eth_free_tx_fd when
1223 * confirm the packet has been sent on hardware, or
1224 * when clean up during transmit failure.
1226 mutex_lock(&priv->onestep_tstamp_lock);
1227 __dpaa2_eth_tx(skb, priv->net_dev);
1231 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1233 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1234 u8 msgtype, twostep, udp;
1235 u16 offset1, offset2;
1237 /* Utilize skb->cb[0] for timestamping request per skb */
1240 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1241 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1242 skb->cb[0] = TX_TSTAMP;
1243 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1244 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1247 /* TX for one-step timestamping PTP Sync packet */
1248 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1249 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1250 &offset1, &offset2))
1251 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1252 skb_queue_tail(&priv->tx_skbs, skb);
1253 queue_work(priv->dpaa2_ptp_wq,
1254 &priv->tx_onestep_tstamp);
1255 return NETDEV_TX_OK;
1257 /* Use two-step timestamping if not one-step timestamping
1260 skb->cb[0] = TX_TSTAMP;
1263 /* TX for other packets */
1264 return __dpaa2_eth_tx(skb, net_dev);
1267 /* Tx confirmation frame processing routine */
1268 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1269 struct dpaa2_eth_channel *ch,
1270 const struct dpaa2_fd *fd,
1271 struct dpaa2_eth_fq *fq)
1273 struct rtnl_link_stats64 *percpu_stats;
1274 struct dpaa2_eth_drv_stats *percpu_extras;
1275 u32 fd_len = dpaa2_fd_get_len(fd);
1279 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1281 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1282 percpu_extras->tx_conf_frames++;
1283 percpu_extras->tx_conf_bytes += fd_len;
1284 ch->stats.bytes_per_cdan += fd_len;
1286 /* Check frame errors in the FD field */
1287 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1288 dpaa2_eth_free_tx_fd(priv, fq, fd, true);
1290 if (likely(!fd_errors))
1293 if (net_ratelimit())
1294 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1297 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1298 /* Tx-conf logically pertains to the egress path. */
1299 percpu_stats->tx_errors++;
1302 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1307 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1310 netdev_err(priv->net_dev,
1311 "dpni_enable_vlan_filter failed\n");
1318 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1322 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1323 DPNI_OFF_RX_L3_CSUM, enable);
1325 netdev_err(priv->net_dev,
1326 "dpni_set_offload(RX_L3_CSUM) failed\n");
1330 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1331 DPNI_OFF_RX_L4_CSUM, enable);
1333 netdev_err(priv->net_dev,
1334 "dpni_set_offload(RX_L4_CSUM) failed\n");
1341 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1345 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1346 DPNI_OFF_TX_L3_CSUM, enable);
1348 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1352 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1353 DPNI_OFF_TX_L4_CSUM, enable);
1355 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1362 /* Perform a single release command to add buffers
1363 * to the specified buffer pool
1365 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1366 struct dpaa2_eth_channel *ch, u16 bpid)
1368 struct device *dev = priv->net_dev->dev.parent;
1369 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1375 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1376 /* Allocate buffer visible to WRIOP + skb shared info +
1379 /* allocate one page for each Rx buffer. WRIOP sees
1380 * the entire page except for a tailroom reserved for
1383 page = dev_alloc_pages(0);
1387 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1389 if (unlikely(dma_mapping_error(dev, addr)))
1392 buf_array[i] = addr;
1395 trace_dpaa2_eth_buf_seed(priv->net_dev,
1396 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1397 addr, priv->rx_buf_size,
1402 /* In case the portal is busy, retry until successful */
1403 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1404 buf_array, i)) == -EBUSY) {
1405 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1410 /* If release command failed, clean up and bail out;
1411 * not much else we can do about it
1414 dpaa2_eth_free_bufs(priv, buf_array, i);
1421 __free_pages(page, 0);
1423 /* If we managed to allocate at least some buffers,
1424 * release them to hardware
1432 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1437 for (j = 0; j < priv->num_channels; j++) {
1438 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1439 i += DPAA2_ETH_BUFS_PER_CMD) {
1440 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
1441 priv->channel[j]->buf_count += new_count;
1443 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1453 * Drain the specified number of buffers from the DPNI's private buffer pool.
1454 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1456 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
1458 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1463 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1466 if (ret == -EBUSY &&
1467 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1469 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1472 dpaa2_eth_free_bufs(priv, buf_array, ret);
1477 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
1481 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1482 dpaa2_eth_drain_bufs(priv, 1);
1484 for (i = 0; i < priv->num_channels; i++)
1485 priv->channel[i]->buf_count = 0;
1488 /* Function is called from softirq context only, so we don't need to guard
1489 * the access to percpu count
1491 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1492 struct dpaa2_eth_channel *ch,
1497 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1501 new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
1502 if (unlikely(!new_count)) {
1503 /* Out of memory; abort for now, we'll try later on */
1506 ch->buf_count += new_count;
1507 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1509 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1515 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1517 struct dpaa2_eth_sgt_cache *sgt_cache;
1521 for_each_possible_cpu(k) {
1522 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1523 count = sgt_cache->count;
1525 for (i = 0; i < count; i++)
1526 kfree(sgt_cache->buf[i]);
1527 sgt_cache->count = 0;
1531 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1536 /* Retry while portal is busy */
1538 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1542 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1544 ch->stats.dequeue_portal_busy += dequeues;
1546 ch->stats.pull_err++;
1551 /* NAPI poll routine
1553 * Frames are dequeued from the QMan channel associated with this NAPI context.
1554 * Rx, Tx confirmation and (if configured) Rx error frames all count
1555 * towards the NAPI budget.
1557 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1559 struct dpaa2_eth_channel *ch;
1560 struct dpaa2_eth_priv *priv;
1561 int rx_cleaned = 0, txconf_cleaned = 0;
1562 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1563 struct netdev_queue *nq;
1564 int store_cleaned, work_done;
1565 struct list_head rx_list;
1570 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1574 INIT_LIST_HEAD(&rx_list);
1575 ch->rx_list = &rx_list;
1578 err = dpaa2_eth_pull_channel(ch);
1582 /* Refill pool if appropriate */
1583 dpaa2_eth_refill_pool(priv, ch, priv->bpid);
1585 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1586 if (store_cleaned <= 0)
1588 if (fq->type == DPAA2_RX_FQ) {
1589 rx_cleaned += store_cleaned;
1590 flowid = fq->flowid;
1592 txconf_cleaned += store_cleaned;
1593 /* We have a single Tx conf FQ on this channel */
1597 /* If we either consumed the whole NAPI budget with Rx frames
1598 * or we reached the Tx confirmations threshold, we're done.
1600 if (rx_cleaned >= budget ||
1601 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1605 } while (store_cleaned);
1607 /* Update NET DIM with the values for this CDAN */
1608 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
1609 ch->stats.bytes_per_cdan);
1610 ch->stats.frames_per_cdan = 0;
1611 ch->stats.bytes_per_cdan = 0;
1613 /* We didn't consume the entire budget, so finish napi and
1614 * re-enable data availability notifications
1616 napi_complete_done(napi, rx_cleaned);
1618 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1620 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1621 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1622 ch->nctx.desired_cpu);
1624 work_done = max(rx_cleaned, 1);
1627 netif_receive_skb_list(ch->rx_list);
1629 if (txc_fq && txc_fq->dq_frames) {
1630 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1631 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1633 txc_fq->dq_frames = 0;
1634 txc_fq->dq_bytes = 0;
1637 if (ch->xdp.res & XDP_REDIRECT)
1639 else if (rx_cleaned && ch->xdp.res & XDP_TX)
1640 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
1645 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
1647 struct dpaa2_eth_channel *ch;
1650 for (i = 0; i < priv->num_channels; i++) {
1651 ch = priv->channel[i];
1652 napi_enable(&ch->napi);
1656 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
1658 struct dpaa2_eth_channel *ch;
1661 for (i = 0; i < priv->num_channels; i++) {
1662 ch = priv->channel[i];
1663 napi_disable(&ch->napi);
1667 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1668 bool tx_pause, bool pfc)
1670 struct dpni_taildrop td = {0};
1671 struct dpaa2_eth_fq *fq;
1674 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1675 * flow control is disabled (as it might interfere with either the
1676 * buffer pool depletion trigger for pause frames or with the group
1677 * congestion trigger for PFC frames)
1679 td.enable = !tx_pause;
1680 if (priv->rx_fqtd_enabled == td.enable)
1683 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1684 td.units = DPNI_CONGESTION_UNIT_BYTES;
1686 for (i = 0; i < priv->num_fqs; i++) {
1688 if (fq->type != DPAA2_RX_FQ)
1690 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1691 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1692 fq->tc, fq->flowid, &td);
1694 netdev_err(priv->net_dev,
1695 "dpni_set_taildrop(FQ) failed\n");
1700 priv->rx_fqtd_enabled = td.enable;
1703 /* Congestion group taildrop: threshold is in frames, per group
1704 * of FQs belonging to the same traffic class
1705 * Enabled if general Tx pause disabled or if PFCs are enabled
1706 * (congestion group threhsold for PFC generation is lower than the
1707 * CG taildrop threshold, so it won't interfere with it; we also
1708 * want frames in non-PFC enabled traffic classes to be kept in check)
1710 td.enable = !tx_pause || pfc;
1711 if (priv->rx_cgtd_enabled == td.enable)
1714 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1715 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1716 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1717 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1718 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1721 netdev_err(priv->net_dev,
1722 "dpni_set_taildrop(CG) failed\n");
1727 priv->rx_cgtd_enabled = td.enable;
1730 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
1732 struct dpni_link_state state = {0};
1736 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1737 if (unlikely(err)) {
1738 netdev_err(priv->net_dev,
1739 "dpni_get_link_state() failed\n");
1743 /* If Tx pause frame settings have changed, we need to update
1744 * Rx FQ taildrop configuration as well. We configure taildrop
1745 * only when pause frame generation is disabled.
1747 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
1748 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
1750 /* When we manage the MAC/PHY using phylink there is no need
1751 * to manually update the netif_carrier.
1753 if (dpaa2_eth_is_type_phy(priv))
1756 /* Chech link state; speed / duplex changes are not treated yet */
1757 if (priv->link_state.up == state.up)
1761 netif_carrier_on(priv->net_dev);
1762 netif_tx_start_all_queues(priv->net_dev);
1764 netif_tx_stop_all_queues(priv->net_dev);
1765 netif_carrier_off(priv->net_dev);
1768 netdev_info(priv->net_dev, "Link Event: state %s\n",
1769 state.up ? "up" : "down");
1772 priv->link_state = state;
1777 static int dpaa2_eth_open(struct net_device *net_dev)
1779 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1782 err = dpaa2_eth_seed_pool(priv, priv->bpid);
1784 /* Not much to do; the buffer pool, though not filled up,
1785 * may still contain some buffers which would enable us
1788 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1789 priv->dpbp_dev->obj_desc.id, priv->bpid);
1792 if (!dpaa2_eth_is_type_phy(priv)) {
1793 /* We'll only start the txqs when the link is actually ready;
1794 * make sure we don't race against the link up notification,
1795 * which may come immediately after dpni_enable();
1797 netif_tx_stop_all_queues(net_dev);
1799 /* Also, explicitly set carrier off, otherwise
1800 * netif_carrier_ok() will return true and cause 'ip link show'
1801 * to report the LOWER_UP flag, even though the link
1802 * notification wasn't even received.
1804 netif_carrier_off(net_dev);
1806 dpaa2_eth_enable_ch_napi(priv);
1808 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1810 netdev_err(net_dev, "dpni_enable() failed\n");
1814 if (dpaa2_eth_is_type_phy(priv))
1815 phylink_start(priv->mac->phylink);
1820 dpaa2_eth_disable_ch_napi(priv);
1821 dpaa2_eth_drain_pool(priv);
1825 /* Total number of in-flight frames on ingress queues */
1826 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
1828 struct dpaa2_eth_fq *fq;
1829 u32 fcnt = 0, bcnt = 0, total = 0;
1832 for (i = 0; i < priv->num_fqs; i++) {
1834 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1836 netdev_warn(priv->net_dev, "query_fq_count failed");
1845 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1851 pending = dpaa2_eth_ingress_fq_count(priv);
1854 } while (pending && --retries);
1857 #define DPNI_TX_PENDING_VER_MAJOR 7
1858 #define DPNI_TX_PENDING_VER_MINOR 13
1859 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1861 union dpni_statistics stats;
1865 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1866 DPNI_TX_PENDING_VER_MINOR) < 0)
1870 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1874 if (stats.page_6.tx_pending_frames == 0)
1876 } while (--retries);
1882 static int dpaa2_eth_stop(struct net_device *net_dev)
1884 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1885 int dpni_enabled = 0;
1888 if (dpaa2_eth_is_type_phy(priv)) {
1889 phylink_stop(priv->mac->phylink);
1891 netif_tx_stop_all_queues(net_dev);
1892 netif_carrier_off(net_dev);
1895 /* On dpni_disable(), the MC firmware will:
1896 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1897 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1898 * of all in flight Tx frames is finished (and corresponding Tx conf
1899 * frames are enqueued back to software)
1901 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1902 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1903 * and Tx conf queues are consumed on NAPI poll.
1905 dpaa2_eth_wait_for_egress_fq_empty(priv);
1908 dpni_disable(priv->mc_io, 0, priv->mc_token);
1909 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1911 /* Allow the hardware some slack */
1913 } while (dpni_enabled && --retries);
1915 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1916 /* Must go on and disable NAPI nonetheless, so we don't crash at
1917 * the next "ifconfig up"
1921 dpaa2_eth_wait_for_ingress_fq_empty(priv);
1922 dpaa2_eth_disable_ch_napi(priv);
1924 /* Empty the buffer pool */
1925 dpaa2_eth_drain_pool(priv);
1927 /* Empty the Scatter-Gather Buffer cache */
1928 dpaa2_eth_sgt_cache_drain(priv);
1933 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1935 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1936 struct device *dev = net_dev->dev.parent;
1939 err = eth_mac_addr(net_dev, addr);
1941 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1945 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1948 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1955 /** Fill in counters maintained by the GPP driver. These may be different from
1956 * the hardware counters obtained by ethtool.
1958 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1959 struct rtnl_link_stats64 *stats)
1961 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1962 struct rtnl_link_stats64 *percpu_stats;
1964 u64 *netstats = (u64 *)stats;
1966 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1968 for_each_possible_cpu(i) {
1969 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1970 cpustats = (u64 *)percpu_stats;
1971 for (j = 0; j < num; j++)
1972 netstats[j] += cpustats[j];
1976 /* Copy mac unicast addresses from @net_dev to @priv.
1977 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1979 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
1980 struct dpaa2_eth_priv *priv)
1982 struct netdev_hw_addr *ha;
1985 netdev_for_each_uc_addr(ha, net_dev) {
1986 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1989 netdev_warn(priv->net_dev,
1990 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1995 /* Copy mac multicast addresses from @net_dev to @priv
1996 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1998 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
1999 struct dpaa2_eth_priv *priv)
2001 struct netdev_hw_addr *ha;
2004 netdev_for_each_mc_addr(ha, net_dev) {
2005 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2008 netdev_warn(priv->net_dev,
2009 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2014 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2015 __be16 vlan_proto, u16 vid)
2017 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2020 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2024 netdev_warn(priv->net_dev,
2025 "Could not add the vlan id %u\n",
2033 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2034 __be16 vlan_proto, u16 vid)
2036 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2039 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2042 netdev_warn(priv->net_dev,
2043 "Could not remove the vlan id %u\n",
2051 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2053 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2054 int uc_count = netdev_uc_count(net_dev);
2055 int mc_count = netdev_mc_count(net_dev);
2056 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2057 u32 options = priv->dpni_attrs.options;
2058 u16 mc_token = priv->mc_token;
2059 struct fsl_mc_io *mc_io = priv->mc_io;
2062 /* Basic sanity checks; these probably indicate a misconfiguration */
2063 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2064 netdev_info(net_dev,
2065 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2068 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2069 if (uc_count > max_mac) {
2070 netdev_info(net_dev,
2071 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2075 if (mc_count + uc_count > max_mac) {
2076 netdev_info(net_dev,
2077 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2078 uc_count + mc_count, max_mac);
2079 goto force_mc_promisc;
2082 /* Adjust promisc settings due to flag combinations */
2083 if (net_dev->flags & IFF_PROMISC)
2085 if (net_dev->flags & IFF_ALLMULTI) {
2086 /* First, rebuild unicast filtering table. This should be done
2087 * in promisc mode, in order to avoid frame loss while we
2088 * progressively add entries to the table.
2089 * We don't know whether we had been in promisc already, and
2090 * making an MC call to find out is expensive; so set uc promisc
2093 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2095 netdev_warn(net_dev, "Can't set uc promisc\n");
2097 /* Actual uc table reconstruction. */
2098 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2100 netdev_warn(net_dev, "Can't clear uc filters\n");
2101 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2103 /* Finally, clear uc promisc and set mc promisc as requested. */
2104 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2106 netdev_warn(net_dev, "Can't clear uc promisc\n");
2107 goto force_mc_promisc;
2110 /* Neither unicast, nor multicast promisc will be on... eventually.
2111 * For now, rebuild mac filtering tables while forcing both of them on.
2113 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2115 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2116 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2118 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2120 /* Actual mac filtering tables reconstruction */
2121 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2123 netdev_warn(net_dev, "Can't clear mac filters\n");
2124 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2125 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2127 /* Now we can clear both ucast and mcast promisc, without risking
2128 * to drop legitimate frames anymore.
2130 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2132 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2133 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2135 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2140 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2142 netdev_warn(net_dev, "Can't set ucast promisc\n");
2144 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2146 netdev_warn(net_dev, "Can't set mcast promisc\n");
2149 static int dpaa2_eth_set_features(struct net_device *net_dev,
2150 netdev_features_t features)
2152 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2153 netdev_features_t changed = features ^ net_dev->features;
2157 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2158 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2159 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2164 if (changed & NETIF_F_RXCSUM) {
2165 enable = !!(features & NETIF_F_RXCSUM);
2166 err = dpaa2_eth_set_rx_csum(priv, enable);
2171 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2172 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2173 err = dpaa2_eth_set_tx_csum(priv, enable);
2181 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2183 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2184 struct hwtstamp_config config;
2189 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2192 switch (config.tx_type) {
2193 case HWTSTAMP_TX_OFF:
2194 case HWTSTAMP_TX_ON:
2195 case HWTSTAMP_TX_ONESTEP_SYNC:
2196 priv->tx_tstamp_type = config.tx_type;
2202 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2203 priv->rx_tstamp = false;
2205 priv->rx_tstamp = true;
2206 /* TS is set for all frame types, not only those requested */
2207 config.rx_filter = HWTSTAMP_FILTER_ALL;
2210 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2214 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2216 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2218 if (cmd == SIOCSHWTSTAMP)
2219 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2221 if (dpaa2_eth_is_type_phy(priv))
2222 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2227 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2229 int mfl, linear_mfl;
2231 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2232 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2233 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2235 if (mfl > linear_mfl) {
2236 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2237 linear_mfl - VLAN_ETH_HLEN);
2244 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2248 /* We enforce a maximum Rx frame length based on MTU only if we have
2249 * an XDP program attached (in order to avoid Rx S/G frames).
2250 * Otherwise, we accept all incoming frames as long as they are not
2251 * larger than maximum size supported in hardware
2254 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2256 mfl = DPAA2_ETH_MFL;
2258 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2260 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2267 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2269 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2272 if (!priv->xdp_prog)
2275 if (!xdp_mtu_valid(priv, new_mtu))
2278 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2287 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2289 struct dpni_buffer_layout buf_layout = {0};
2292 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2293 DPNI_QUEUE_RX, &buf_layout);
2295 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2299 /* Reserve extra headroom for XDP header size changes */
2300 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2301 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2302 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2303 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2304 DPNI_QUEUE_RX, &buf_layout);
2306 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2313 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2315 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2316 struct dpaa2_eth_channel *ch;
2317 struct bpf_prog *old;
2318 bool up, need_update;
2321 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2325 bpf_prog_add(prog, priv->num_channels);
2327 up = netif_running(dev);
2328 need_update = (!!priv->xdp_prog != !!prog);
2331 dpaa2_eth_stop(dev);
2333 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2334 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2335 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2336 * so we are sure no old format buffers will be used from now on.
2339 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2342 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2347 old = xchg(&priv->xdp_prog, prog);
2351 for (i = 0; i < priv->num_channels; i++) {
2352 ch = priv->channel[i];
2353 old = xchg(&ch->xdp.prog, prog);
2359 err = dpaa2_eth_open(dev);
2368 bpf_prog_sub(prog, priv->num_channels);
2370 dpaa2_eth_open(dev);
2375 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2377 switch (xdp->command) {
2378 case XDP_SETUP_PROG:
2379 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2387 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2388 struct xdp_frame *xdpf,
2389 struct dpaa2_fd *fd)
2391 struct device *dev = net_dev->dev.parent;
2392 unsigned int needed_headroom;
2393 struct dpaa2_eth_swa *swa;
2394 void *buffer_start, *aligned_start;
2397 /* We require a minimum headroom to be able to transmit the frame.
2398 * Otherwise return an error and let the original net_device handle it
2400 needed_headroom = dpaa2_eth_needed_headroom(NULL);
2401 if (xdpf->headroom < needed_headroom)
2404 /* Setup the FD fields */
2405 memset(fd, 0, sizeof(*fd));
2407 /* Align FD address, if possible */
2408 buffer_start = xdpf->data - needed_headroom;
2409 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2410 DPAA2_ETH_TX_BUF_ALIGN);
2411 if (aligned_start >= xdpf->data - xdpf->headroom)
2412 buffer_start = aligned_start;
2414 swa = (struct dpaa2_eth_swa *)buffer_start;
2415 /* fill in necessary fields here */
2416 swa->type = DPAA2_ETH_SWA_XDP;
2417 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2418 swa->xdp.xdpf = xdpf;
2420 addr = dma_map_single(dev, buffer_start,
2423 if (unlikely(dma_mapping_error(dev, addr)))
2426 dpaa2_fd_set_addr(fd, addr);
2427 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2428 dpaa2_fd_set_len(fd, xdpf->len);
2429 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2430 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2435 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2436 struct xdp_frame **frames, u32 flags)
2438 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2439 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2440 struct rtnl_link_stats64 *percpu_stats;
2441 struct dpaa2_eth_fq *fq;
2442 struct dpaa2_fd *fds;
2443 int enqueued, i, err;
2445 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2448 if (!netif_running(net_dev))
2451 fq = &priv->fq[smp_processor_id()];
2452 xdp_redirect_fds = &fq->xdp_redirect_fds;
2453 fds = xdp_redirect_fds->fds;
2455 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2457 /* create a FD for each xdp_frame in the list received */
2458 for (i = 0; i < n; i++) {
2459 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2463 xdp_redirect_fds->num = i;
2465 /* enqueue all the frame descriptors */
2466 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2468 /* update statistics */
2469 percpu_stats->tx_packets += enqueued;
2470 for (i = 0; i < enqueued; i++)
2471 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2476 static int update_xps(struct dpaa2_eth_priv *priv)
2478 struct net_device *net_dev = priv->net_dev;
2479 struct cpumask xps_mask;
2480 struct dpaa2_eth_fq *fq;
2481 int i, num_queues, netdev_queues;
2484 num_queues = dpaa2_eth_queue_count(priv);
2485 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2487 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2488 * queues, so only process those
2490 for (i = 0; i < netdev_queues; i++) {
2491 fq = &priv->fq[i % num_queues];
2493 cpumask_clear(&xps_mask);
2494 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2496 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2498 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2506 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2507 struct tc_mqprio_qopt *mqprio)
2509 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2510 u8 num_tc, num_queues;
2513 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2514 num_queues = dpaa2_eth_queue_count(priv);
2515 num_tc = mqprio->num_tc;
2517 if (num_tc == net_dev->num_tc)
2520 if (num_tc > dpaa2_eth_tc_count(priv)) {
2521 netdev_err(net_dev, "Max %d traffic classes supported\n",
2522 dpaa2_eth_tc_count(priv));
2527 netdev_reset_tc(net_dev);
2528 netif_set_real_num_tx_queues(net_dev, num_queues);
2532 netdev_set_num_tc(net_dev, num_tc);
2533 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2535 for (i = 0; i < num_tc; i++)
2536 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2544 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2546 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2548 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2549 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2550 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2551 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2554 if (p->command == TC_TBF_STATS)
2557 /* Only per port Tx shaping */
2558 if (p->parent != TC_H_ROOT)
2561 if (p->command == TC_TBF_REPLACE) {
2562 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2563 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2564 DPAA2_ETH_MAX_BURST_SIZE);
2568 tx_cr_shaper.max_burst_size = cfg->max_size;
2569 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2572 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2575 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2578 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2585 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2586 enum tc_setup_type type, void *type_data)
2589 case TC_SETUP_QDISC_MQPRIO:
2590 return dpaa2_eth_setup_mqprio(net_dev, type_data);
2591 case TC_SETUP_QDISC_TBF:
2592 return dpaa2_eth_setup_tbf(net_dev, type_data);
2598 static const struct net_device_ops dpaa2_eth_ops = {
2599 .ndo_open = dpaa2_eth_open,
2600 .ndo_start_xmit = dpaa2_eth_tx,
2601 .ndo_stop = dpaa2_eth_stop,
2602 .ndo_set_mac_address = dpaa2_eth_set_addr,
2603 .ndo_get_stats64 = dpaa2_eth_get_stats,
2604 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2605 .ndo_set_features = dpaa2_eth_set_features,
2606 .ndo_eth_ioctl = dpaa2_eth_ioctl,
2607 .ndo_change_mtu = dpaa2_eth_change_mtu,
2608 .ndo_bpf = dpaa2_eth_xdp,
2609 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2610 .ndo_setup_tc = dpaa2_eth_setup_tc,
2611 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
2612 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
2615 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2617 struct dpaa2_eth_channel *ch;
2619 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2621 /* Update NAPI statistics */
2624 napi_schedule(&ch->napi);
2627 /* Allocate and configure a DPCON object */
2628 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
2630 struct fsl_mc_device *dpcon;
2631 struct device *dev = priv->net_dev->dev.parent;
2634 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2635 FSL_MC_POOL_DPCON, &dpcon);
2638 err = -EPROBE_DEFER;
2640 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2641 return ERR_PTR(err);
2644 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2646 dev_err(dev, "dpcon_open() failed\n");
2650 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2652 dev_err(dev, "dpcon_reset() failed\n");
2656 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2658 dev_err(dev, "dpcon_enable() failed\n");
2665 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2667 fsl_mc_object_free(dpcon);
2669 return ERR_PTR(err);
2672 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2673 struct fsl_mc_device *dpcon)
2675 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2676 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2677 fsl_mc_object_free(dpcon);
2680 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
2682 struct dpaa2_eth_channel *channel;
2683 struct dpcon_attr attr;
2684 struct device *dev = priv->net_dev->dev.parent;
2687 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2691 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
2692 if (IS_ERR(channel->dpcon)) {
2693 err = PTR_ERR(channel->dpcon);
2697 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2700 dev_err(dev, "dpcon_get_attributes() failed\n");
2704 channel->dpcon_id = attr.id;
2705 channel->ch_id = attr.qbman_ch_id;
2706 channel->priv = priv;
2711 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2714 return ERR_PTR(err);
2717 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2718 struct dpaa2_eth_channel *channel)
2720 dpaa2_eth_free_dpcon(priv, channel->dpcon);
2724 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2725 * and register data availability notifications
2727 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
2729 struct dpaa2_io_notification_ctx *nctx;
2730 struct dpaa2_eth_channel *channel;
2731 struct dpcon_notification_cfg dpcon_notif_cfg;
2732 struct device *dev = priv->net_dev->dev.parent;
2735 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2736 * many cores as possible, so we need one channel for each core
2737 * (unless there's fewer queues than cores, in which case the extra
2738 * channels would be wasted).
2739 * Allocate one channel per core and register it to the core's
2740 * affine DPIO. If not enough channels are available for all cores
2741 * or if some cores don't have an affine DPIO, there will be no
2742 * ingress frame processing on those cores.
2744 cpumask_clear(&priv->dpio_cpumask);
2745 for_each_online_cpu(i) {
2746 /* Try to allocate a channel */
2747 channel = dpaa2_eth_alloc_channel(priv);
2748 if (IS_ERR_OR_NULL(channel)) {
2749 err = PTR_ERR_OR_ZERO(channel);
2750 if (err != -EPROBE_DEFER)
2752 "No affine channel for cpu %d and above\n", i);
2756 priv->channel[priv->num_channels] = channel;
2758 nctx = &channel->nctx;
2760 nctx->cb = dpaa2_eth_cdan_cb;
2761 nctx->id = channel->ch_id;
2762 nctx->desired_cpu = i;
2764 /* Register the new context */
2765 channel->dpio = dpaa2_io_service_select(i);
2766 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2768 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2769 /* If no affine DPIO for this core, there's probably
2770 * none available for next cores either. Signal we want
2771 * to retry later, in case the DPIO devices weren't
2774 err = -EPROBE_DEFER;
2775 goto err_service_reg;
2778 /* Register DPCON notification with MC */
2779 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2780 dpcon_notif_cfg.priority = 0;
2781 dpcon_notif_cfg.user_ctx = nctx->qman64;
2782 err = dpcon_set_notification(priv->mc_io, 0,
2783 channel->dpcon->mc_handle,
2786 dev_err(dev, "dpcon_set_notification failed()\n");
2790 /* If we managed to allocate a channel and also found an affine
2791 * DPIO for this core, add it to the final mask
2793 cpumask_set_cpu(i, &priv->dpio_cpumask);
2794 priv->num_channels++;
2796 /* Stop if we already have enough channels to accommodate all
2797 * RX and TX conf queues
2799 if (priv->num_channels == priv->dpni_attrs.num_queues)
2806 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2808 dpaa2_eth_free_channel(priv, channel);
2810 if (err == -EPROBE_DEFER) {
2811 for (i = 0; i < priv->num_channels; i++) {
2812 channel = priv->channel[i];
2813 nctx = &channel->nctx;
2814 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2815 dpaa2_eth_free_channel(priv, channel);
2817 priv->num_channels = 0;
2821 if (cpumask_empty(&priv->dpio_cpumask)) {
2822 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2826 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2827 cpumask_pr_args(&priv->dpio_cpumask));
2832 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
2834 struct device *dev = priv->net_dev->dev.parent;
2835 struct dpaa2_eth_channel *ch;
2838 /* deregister CDAN notifications and free channels */
2839 for (i = 0; i < priv->num_channels; i++) {
2840 ch = priv->channel[i];
2841 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2842 dpaa2_eth_free_channel(priv, ch);
2846 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
2849 struct device *dev = priv->net_dev->dev.parent;
2852 for (i = 0; i < priv->num_channels; i++)
2853 if (priv->channel[i]->nctx.desired_cpu == cpu)
2854 return priv->channel[i];
2856 /* We should never get here. Issue a warning and return
2857 * the first channel, because it's still better than nothing
2859 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2861 return priv->channel[0];
2864 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
2866 struct device *dev = priv->net_dev->dev.parent;
2867 struct dpaa2_eth_fq *fq;
2868 int rx_cpu, txc_cpu;
2871 /* For each FQ, pick one channel/CPU to deliver frames to.
2872 * This may well change at runtime, either through irqbalance or
2873 * through direct user intervention.
2875 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2877 for (i = 0; i < priv->num_fqs; i++) {
2881 case DPAA2_RX_ERR_FQ:
2882 fq->target_cpu = rx_cpu;
2883 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2884 if (rx_cpu >= nr_cpu_ids)
2885 rx_cpu = cpumask_first(&priv->dpio_cpumask);
2887 case DPAA2_TX_CONF_FQ:
2888 fq->target_cpu = txc_cpu;
2889 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2890 if (txc_cpu >= nr_cpu_ids)
2891 txc_cpu = cpumask_first(&priv->dpio_cpumask);
2894 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2896 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
2902 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
2906 /* We have one TxConf FQ per Tx flow.
2907 * The number of Tx and Rx queues is the same.
2908 * Tx queues come first in the fq array.
2910 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2911 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2912 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2913 priv->fq[priv->num_fqs++].flowid = (u16)i;
2916 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2917 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2918 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2919 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2920 priv->fq[priv->num_fqs].tc = (u8)j;
2921 priv->fq[priv->num_fqs++].flowid = (u16)i;
2925 /* We have exactly one Rx error queue per DPNI */
2926 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
2927 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
2929 /* For each FQ, decide on which core to process incoming frames */
2930 dpaa2_eth_set_fq_affinity(priv);
2933 /* Allocate and configure one buffer pool for each interface */
2934 static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
2937 struct fsl_mc_device *dpbp_dev;
2938 struct device *dev = priv->net_dev->dev.parent;
2939 struct dpbp_attr dpbp_attrs;
2941 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2945 err = -EPROBE_DEFER;
2947 dev_err(dev, "DPBP device allocation failed\n");
2951 priv->dpbp_dev = dpbp_dev;
2953 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2954 &dpbp_dev->mc_handle);
2956 dev_err(dev, "dpbp_open() failed\n");
2960 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2962 dev_err(dev, "dpbp_reset() failed\n");
2966 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2968 dev_err(dev, "dpbp_enable() failed\n");
2972 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2975 dev_err(dev, "dpbp_get_attributes() failed\n");
2978 priv->bpid = dpbp_attrs.bpid;
2983 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2986 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2988 fsl_mc_object_free(dpbp_dev);
2993 static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
2995 dpaa2_eth_drain_pool(priv);
2996 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2997 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2998 fsl_mc_object_free(priv->dpbp_dev);
3001 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3003 struct device *dev = priv->net_dev->dev.parent;
3004 struct dpni_buffer_layout buf_layout = {0};
3008 /* We need to check for WRIOP version 1.0.0, but depending on the MC
3009 * version, this number is not always provided correctly on rev1.
3010 * We need to check for both alternatives in this situation.
3012 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3013 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3014 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3016 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3018 /* We need to ensure that the buffer size seen by WRIOP is a multiple
3019 * of 64 or 256 bytes depending on the WRIOP version.
3021 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3024 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3025 buf_layout.pass_timestamp = true;
3026 buf_layout.pass_frame_status = true;
3027 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3028 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3029 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3030 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3031 DPNI_QUEUE_TX, &buf_layout);
3033 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3037 /* tx-confirm buffer */
3038 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3039 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3040 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3041 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3043 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3047 /* Now that we've set our tx buffer layout, retrieve the minimum
3048 * required tx data offset.
3050 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3051 &priv->tx_data_offset);
3053 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3057 if ((priv->tx_data_offset % 64) != 0)
3058 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3059 priv->tx_data_offset);
3062 buf_layout.pass_frame_status = true;
3063 buf_layout.pass_parser_result = true;
3064 buf_layout.data_align = rx_buf_align;
3065 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3066 buf_layout.private_data_size = 0;
3067 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3068 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3069 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3070 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3071 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3072 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3073 DPNI_QUEUE_RX, &buf_layout);
3075 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3082 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3083 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
3085 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3086 struct dpaa2_eth_fq *fq,
3087 struct dpaa2_fd *fd, u8 prio,
3088 u32 num_frames __always_unused,
3089 int *frames_enqueued)
3093 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3094 priv->tx_qdid, prio,
3096 if (!err && frames_enqueued)
3097 *frames_enqueued = 1;
3101 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3102 struct dpaa2_eth_fq *fq,
3103 struct dpaa2_fd *fd,
3104 u8 prio, u32 num_frames,
3105 int *frames_enqueued)
3109 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3116 if (frames_enqueued)
3117 *frames_enqueued = err;
3121 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3123 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3124 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3125 priv->enqueue = dpaa2_eth_enqueue_qd;
3127 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3130 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3132 struct device *dev = priv->net_dev->dev.parent;
3133 struct dpni_link_cfg link_cfg = {0};
3136 /* Get the default link options so we don't override other flags */
3137 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3139 dev_err(dev, "dpni_get_link_cfg() failed\n");
3143 /* By default, enable both Rx and Tx pause frames */
3144 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3145 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3146 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3148 dev_err(dev, "dpni_set_link_cfg() failed\n");
3152 priv->link_state.options = link_cfg.options;
3157 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3159 struct dpni_queue_id qid = {0};
3160 struct dpaa2_eth_fq *fq;
3161 struct dpni_queue queue;
3164 /* We only use Tx FQIDs for FQID-based enqueue, so check
3165 * if DPNI version supports it before updating FQIDs
3167 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3168 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3171 for (i = 0; i < priv->num_fqs; i++) {
3173 if (fq->type != DPAA2_TX_CONF_FQ)
3175 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3176 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3177 DPNI_QUEUE_TX, j, fq->flowid,
3182 fq->tx_fqid[j] = qid.fqid;
3183 if (fq->tx_fqid[j] == 0)
3188 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3193 netdev_info(priv->net_dev,
3194 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3195 priv->enqueue = dpaa2_eth_enqueue_qd;
3198 /* Configure ingress classification based on VLAN PCP */
3199 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3201 struct device *dev = priv->net_dev->dev.parent;
3202 struct dpkg_profile_cfg kg_cfg = {0};
3203 struct dpni_qos_tbl_cfg qos_cfg = {0};
3204 struct dpni_rule_cfg key_params;
3205 void *dma_mem, *key, *mask;
3206 u8 key_size = 2; /* VLAN TCI field */
3209 /* VLAN-based classification only makes sense if we have multiple
3211 * Also, we need to extract just the 3-bit PCP field from the VLAN
3212 * header and we can only do that by using a mask
3214 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3215 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3219 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3223 kg_cfg.num_extracts = 1;
3224 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3225 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3226 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3227 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3229 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3231 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3236 qos_cfg.default_tc = 0;
3237 qos_cfg.discard_on_miss = 0;
3238 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3239 DPAA2_CLASSIFIER_DMA_SIZE,
3241 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3242 dev_err(dev, "QoS table DMA mapping failed\n");
3247 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3249 dev_err(dev, "dpni_set_qos_table failed\n");
3253 /* Add QoS table entries */
3254 key = kzalloc(key_size * 2, GFP_KERNEL);
3259 mask = key + key_size;
3260 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3262 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3264 if (dma_mapping_error(dev, key_params.key_iova)) {
3265 dev_err(dev, "Qos table entry DMA mapping failed\n");
3270 key_params.mask_iova = key_params.key_iova + key_size;
3271 key_params.key_size = key_size;
3273 /* We add rules for PCP-based distribution starting with highest
3274 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3275 * classes to accommodate all priority levels, the lowest ones end up
3276 * on TC 0 which was configured as default
3278 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3279 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3280 dma_sync_single_for_device(dev, key_params.key_iova,
3281 key_size * 2, DMA_TO_DEVICE);
3283 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3286 dev_err(dev, "dpni_add_qos_entry failed\n");
3287 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3292 priv->vlan_cls_enabled = true;
3294 /* Table and key memory is not persistent, clean everything up after
3295 * configuration is finished
3298 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3302 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3310 /* Configure the DPNI object this interface is associated with */
3311 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3313 struct device *dev = &ls_dev->dev;
3314 struct dpaa2_eth_priv *priv;
3315 struct net_device *net_dev;
3318 net_dev = dev_get_drvdata(dev);
3319 priv = netdev_priv(net_dev);
3321 /* get a handle for the DPNI object */
3322 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3324 dev_err(dev, "dpni_open() failed\n");
3328 /* Check if we can work with this DPNI object */
3329 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3330 &priv->dpni_ver_minor);
3332 dev_err(dev, "dpni_get_api_version() failed\n");
3335 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3336 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3337 priv->dpni_ver_major, priv->dpni_ver_minor,
3338 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3343 ls_dev->mc_io = priv->mc_io;
3344 ls_dev->mc_handle = priv->mc_token;
3346 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3348 dev_err(dev, "dpni_reset() failed\n");
3352 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3355 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3359 err = dpaa2_eth_set_buffer_layout(priv);
3363 dpaa2_eth_set_enqueue_mode(priv);
3365 /* Enable pause frame support */
3366 if (dpaa2_eth_has_pause_support(priv)) {
3367 err = dpaa2_eth_set_pause(priv);
3372 err = dpaa2_eth_set_vlan_qos(priv);
3373 if (err && err != -EOPNOTSUPP)
3376 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3377 sizeof(struct dpaa2_eth_cls_rule),
3379 if (!priv->cls_rules) {
3387 dpni_close(priv->mc_io, 0, priv->mc_token);
3392 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3396 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3398 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3401 dpni_close(priv->mc_io, 0, priv->mc_token);
3404 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3405 struct dpaa2_eth_fq *fq)
3407 struct device *dev = priv->net_dev->dev.parent;
3408 struct dpni_queue queue;
3409 struct dpni_queue_id qid;
3412 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3413 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3415 dev_err(dev, "dpni_get_queue(RX) failed\n");
3419 fq->fqid = qid.fqid;
3421 queue.destination.id = fq->channel->dpcon_id;
3422 queue.destination.type = DPNI_DEST_DPCON;
3423 queue.destination.priority = 1;
3424 queue.user_context = (u64)(uintptr_t)fq;
3425 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3426 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3427 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3430 dev_err(dev, "dpni_set_queue(RX) failed\n");
3435 /* only once for each channel */
3439 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3442 dev_err(dev, "xdp_rxq_info_reg failed\n");
3446 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3447 MEM_TYPE_PAGE_ORDER0, NULL);
3449 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3456 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3457 struct dpaa2_eth_fq *fq)
3459 struct device *dev = priv->net_dev->dev.parent;
3460 struct dpni_queue queue;
3461 struct dpni_queue_id qid;
3464 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3465 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3466 DPNI_QUEUE_TX, i, fq->flowid,
3469 dev_err(dev, "dpni_get_queue(TX) failed\n");
3472 fq->tx_fqid[i] = qid.fqid;
3475 /* All Tx queues belonging to the same flowid have the same qdbin */
3476 fq->tx_qdbin = qid.qdbin;
3478 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3479 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3482 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3486 fq->fqid = qid.fqid;
3488 queue.destination.id = fq->channel->dpcon_id;
3489 queue.destination.type = DPNI_DEST_DPCON;
3490 queue.destination.priority = 0;
3491 queue.user_context = (u64)(uintptr_t)fq;
3492 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3493 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3494 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3497 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3504 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3505 struct dpaa2_eth_fq *fq)
3507 struct device *dev = priv->net_dev->dev.parent;
3508 struct dpni_queue q = { { 0 } };
3509 struct dpni_queue_id qid;
3510 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3513 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3514 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3516 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3520 fq->fqid = qid.fqid;
3522 q.destination.id = fq->channel->dpcon_id;
3523 q.destination.type = DPNI_DEST_DPCON;
3524 q.destination.priority = 1;
3525 q.user_context = (u64)(uintptr_t)fq;
3526 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3527 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3529 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3536 /* Supported header fields for Rx hash distribution key */
3537 static const struct dpaa2_eth_dist_fields dist_fields[] = {
3540 .rxnfc_field = RXH_L2DA,
3541 .cls_prot = NET_PROT_ETH,
3542 .cls_field = NH_FLD_ETH_DA,
3543 .id = DPAA2_ETH_DIST_ETHDST,
3546 .cls_prot = NET_PROT_ETH,
3547 .cls_field = NH_FLD_ETH_SA,
3548 .id = DPAA2_ETH_DIST_ETHSRC,
3551 /* This is the last ethertype field parsed:
3552 * depending on frame format, it can be the MAC ethertype
3553 * or the VLAN etype.
3555 .cls_prot = NET_PROT_ETH,
3556 .cls_field = NH_FLD_ETH_TYPE,
3557 .id = DPAA2_ETH_DIST_ETHTYPE,
3561 .rxnfc_field = RXH_VLAN,
3562 .cls_prot = NET_PROT_VLAN,
3563 .cls_field = NH_FLD_VLAN_TCI,
3564 .id = DPAA2_ETH_DIST_VLAN,
3568 .rxnfc_field = RXH_IP_SRC,
3569 .cls_prot = NET_PROT_IP,
3570 .cls_field = NH_FLD_IP_SRC,
3571 .id = DPAA2_ETH_DIST_IPSRC,
3574 .rxnfc_field = RXH_IP_DST,
3575 .cls_prot = NET_PROT_IP,
3576 .cls_field = NH_FLD_IP_DST,
3577 .id = DPAA2_ETH_DIST_IPDST,
3580 .rxnfc_field = RXH_L3_PROTO,
3581 .cls_prot = NET_PROT_IP,
3582 .cls_field = NH_FLD_IP_PROTO,
3583 .id = DPAA2_ETH_DIST_IPPROTO,
3586 /* Using UDP ports, this is functionally equivalent to raw
3587 * byte pairs from L4 header.
3589 .rxnfc_field = RXH_L4_B_0_1,
3590 .cls_prot = NET_PROT_UDP,
3591 .cls_field = NH_FLD_UDP_PORT_SRC,
3592 .id = DPAA2_ETH_DIST_L4SRC,
3595 .rxnfc_field = RXH_L4_B_2_3,
3596 .cls_prot = NET_PROT_UDP,
3597 .cls_field = NH_FLD_UDP_PORT_DST,
3598 .id = DPAA2_ETH_DIST_L4DST,
3603 /* Configure the Rx hash key using the legacy API */
3604 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3606 struct device *dev = priv->net_dev->dev.parent;
3607 struct dpni_rx_tc_dist_cfg dist_cfg;
3610 memset(&dist_cfg, 0, sizeof(dist_cfg));
3612 dist_cfg.key_cfg_iova = key;
3613 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3614 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3616 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3617 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3620 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3628 /* Configure the Rx hash key using the new API */
3629 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3631 struct device *dev = priv->net_dev->dev.parent;
3632 struct dpni_rx_dist_cfg dist_cfg;
3635 memset(&dist_cfg, 0, sizeof(dist_cfg));
3637 dist_cfg.key_cfg_iova = key;
3638 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3639 dist_cfg.enable = 1;
3641 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3643 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3646 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3650 /* If the flow steering / hashing key is shared between all
3651 * traffic classes, install it just once
3653 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3660 /* Configure the Rx flow classification key */
3661 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
3663 struct device *dev = priv->net_dev->dev.parent;
3664 struct dpni_rx_dist_cfg dist_cfg;
3667 memset(&dist_cfg, 0, sizeof(dist_cfg));
3669 dist_cfg.key_cfg_iova = key;
3670 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3671 dist_cfg.enable = 1;
3673 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3675 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3678 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3682 /* If the flow steering / hashing key is shared between all
3683 * traffic classes, install it just once
3685 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3692 /* Size of the Rx flow classification key */
3693 int dpaa2_eth_cls_key_size(u64 fields)
3697 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3698 if (!(fields & dist_fields[i].id))
3700 size += dist_fields[i].size;
3706 /* Offset of header field in Rx classification key */
3707 int dpaa2_eth_cls_fld_off(int prot, int field)
3711 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3712 if (dist_fields[i].cls_prot == prot &&
3713 dist_fields[i].cls_field == field)
3715 off += dist_fields[i].size;
3718 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3722 /* Prune unused fields from the classification rule.
3723 * Used when masking is not supported
3725 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3727 int off = 0, new_off = 0;
3730 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3731 size = dist_fields[i].size;
3732 if (dist_fields[i].id & fields) {
3733 memcpy(key_mem + new_off, key_mem + off, size);
3740 /* Set Rx distribution (hash or flow classification) key
3741 * flags is a combination of RXH_ bits
3743 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3744 enum dpaa2_eth_rx_dist type, u64 flags)
3746 struct device *dev = net_dev->dev.parent;
3747 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3748 struct dpkg_profile_cfg cls_cfg;
3749 u32 rx_hash_fields = 0;
3750 dma_addr_t key_iova;
3755 memset(&cls_cfg, 0, sizeof(cls_cfg));
3757 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3758 struct dpkg_extract *key =
3759 &cls_cfg.extracts[cls_cfg.num_extracts];
3761 /* For both Rx hashing and classification keys
3762 * we set only the selected fields.
3764 if (!(flags & dist_fields[i].id))
3766 if (type == DPAA2_ETH_RX_DIST_HASH)
3767 rx_hash_fields |= dist_fields[i].rxnfc_field;
3769 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3770 dev_err(dev, "error adding key extraction rule, too many rules?\n");
3774 key->type = DPKG_EXTRACT_FROM_HDR;
3775 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3776 key->extract.from_hdr.type = DPKG_FULL_FIELD;
3777 key->extract.from_hdr.field = dist_fields[i].cls_field;
3778 cls_cfg.num_extracts++;
3781 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3785 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3787 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3791 /* Prepare for setting the rx dist */
3792 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3794 if (dma_mapping_error(dev, key_iova)) {
3795 dev_err(dev, "DMA mapping failed\n");
3800 if (type == DPAA2_ETH_RX_DIST_HASH) {
3801 if (dpaa2_eth_has_legacy_dist(priv))
3802 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
3804 err = dpaa2_eth_config_hash_key(priv, key_iova);
3806 err = dpaa2_eth_config_cls_key(priv, key_iova);
3809 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3811 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3812 priv->rx_hash_fields = rx_hash_fields;
3819 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3821 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3825 if (!dpaa2_eth_hash_enabled(priv))
3828 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3829 if (dist_fields[i].rxnfc_field & flags)
3830 key |= dist_fields[i].id;
3832 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3835 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3837 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3840 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3842 struct device *dev = priv->net_dev->dev.parent;
3845 /* Check if we actually support Rx flow classification */
3846 if (dpaa2_eth_has_legacy_dist(priv)) {
3847 dev_dbg(dev, "Rx cls not supported by current MC version\n");
3851 if (!dpaa2_eth_fs_enabled(priv)) {
3852 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3856 if (!dpaa2_eth_hash_enabled(priv)) {
3857 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3861 /* If there is no support for masking in the classification table,
3862 * we don't set a default key, as it will depend on the rules
3863 * added by the user at runtime.
3865 if (!dpaa2_eth_fs_mask_enabled(priv))
3868 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3873 priv->rx_cls_enabled = 1;
3878 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3879 * frame queues and channels
3881 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
3883 struct net_device *net_dev = priv->net_dev;
3884 struct device *dev = net_dev->dev.parent;
3885 struct dpni_pools_cfg pools_params;
3886 struct dpni_error_cfg err_cfg;
3890 pools_params.num_dpbp = 1;
3891 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3892 pools_params.pools[0].backup_pool = 0;
3893 pools_params.pools[0].buffer_size = priv->rx_buf_size;
3894 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3896 dev_err(dev, "dpni_set_pools() failed\n");
3900 /* have the interface implicitly distribute traffic based on
3901 * the default hash key
3903 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3904 if (err && err != -EOPNOTSUPP)
3905 dev_err(dev, "Failed to configure hashing\n");
3907 /* Configure the flow classification key; it includes all
3908 * supported header fields and cannot be modified at runtime
3910 err = dpaa2_eth_set_default_cls(priv);
3911 if (err && err != -EOPNOTSUPP)
3912 dev_err(dev, "Failed to configure Rx classification key\n");
3914 /* Configure handling of error frames */
3915 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3916 err_cfg.set_frame_annotation = 1;
3917 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3918 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3921 dev_err(dev, "dpni_set_errors_behavior failed\n");
3925 /* Configure Rx and Tx conf queues to generate CDANs */
3926 for (i = 0; i < priv->num_fqs; i++) {
3927 switch (priv->fq[i].type) {
3929 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
3931 case DPAA2_TX_CONF_FQ:
3932 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
3934 case DPAA2_RX_ERR_FQ:
3935 err = setup_rx_err_flow(priv, &priv->fq[i]);
3938 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3945 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3946 DPNI_QUEUE_TX, &priv->tx_qdid);
3948 dev_err(dev, "dpni_get_qdid() failed\n");
3955 /* Allocate rings for storing incoming frame descriptors */
3956 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
3958 struct net_device *net_dev = priv->net_dev;
3959 struct device *dev = net_dev->dev.parent;
3962 for (i = 0; i < priv->num_channels; i++) {
3963 priv->channel[i]->store =
3964 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3965 if (!priv->channel[i]->store) {
3966 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3974 for (i = 0; i < priv->num_channels; i++) {
3975 if (!priv->channel[i]->store)
3977 dpaa2_io_store_destroy(priv->channel[i]->store);
3983 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
3987 for (i = 0; i < priv->num_channels; i++)
3988 dpaa2_io_store_destroy(priv->channel[i]->store);
3991 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
3993 struct net_device *net_dev = priv->net_dev;
3994 struct device *dev = net_dev->dev.parent;
3995 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3998 /* Get firmware address, if any */
3999 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4001 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4005 /* Get DPNI attributes address, if any */
4006 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4009 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4013 /* First check if firmware has any address configured by bootloader */
4014 if (!is_zero_ether_addr(mac_addr)) {
4015 /* If the DPMAC addr != DPNI addr, update it */
4016 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4017 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4021 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4025 eth_hw_addr_set(net_dev, mac_addr);
4026 } else if (is_zero_ether_addr(dpni_mac_addr)) {
4027 /* No MAC address configured, fill in net_dev->dev_addr
4030 eth_hw_addr_random(net_dev);
4031 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4033 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4036 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4040 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4041 * practical purposes, this will be our "permanent" mac address,
4042 * at least until the next reboot. This move will also permit
4043 * register_netdevice() to properly fill up net_dev->perm_addr.
4045 net_dev->addr_assign_type = NET_ADDR_PERM;
4047 /* NET_ADDR_PERM is default, all we have to do is
4048 * fill in the device addr.
4050 eth_hw_addr_set(net_dev, dpni_mac_addr);
4056 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4058 struct device *dev = net_dev->dev.parent;
4059 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4060 u32 options = priv->dpni_attrs.options;
4061 u64 supported = 0, not_supported = 0;
4062 u8 bcast_addr[ETH_ALEN];
4066 net_dev->netdev_ops = &dpaa2_eth_ops;
4067 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4069 err = dpaa2_eth_set_mac_addr(priv);
4073 /* Explicitly add the broadcast address to the MAC filtering table */
4074 eth_broadcast_addr(bcast_addr);
4075 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4077 dev_err(dev, "dpni_add_mac_addr() failed\n");
4081 /* Set MTU upper limit; lower limit is 68B (default value) */
4082 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4083 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4086 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4090 /* Set actual number of queues in the net device */
4091 num_queues = dpaa2_eth_queue_count(priv);
4092 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4094 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4097 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4099 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4103 /* Capabilities listing */
4104 supported |= IFF_LIVE_ADDR_CHANGE;
4106 if (options & DPNI_OPT_NO_MAC_FILTER)
4107 not_supported |= IFF_UNICAST_FLT;
4109 supported |= IFF_UNICAST_FLT;
4111 net_dev->priv_flags |= supported;
4112 net_dev->priv_flags &= ~not_supported;
4115 net_dev->features = NETIF_F_RXCSUM |
4116 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4117 NETIF_F_SG | NETIF_F_HIGHDMA |
4118 NETIF_F_LLTX | NETIF_F_HW_TC;
4119 net_dev->hw_features = net_dev->features;
4121 if (priv->dpni_attrs.vlan_filter_entries)
4122 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4127 static int dpaa2_eth_poll_link_state(void *arg)
4129 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4132 while (!kthread_should_stop()) {
4133 err = dpaa2_eth_link_state_update(priv);
4137 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4143 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4145 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4146 struct dpaa2_mac *mac;
4149 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4150 dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4152 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4153 return PTR_ERR(dpmac_dev);
4155 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4158 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4162 mac->mc_dev = dpmac_dev;
4163 mac->mc_io = priv->mc_io;
4164 mac->net_dev = priv->net_dev;
4166 err = dpaa2_mac_open(mac);
4171 if (dpaa2_eth_is_type_phy(priv)) {
4172 err = dpaa2_mac_connect(mac);
4173 if (err && err != -EPROBE_DEFER)
4174 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
4183 dpaa2_mac_close(mac);
4190 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4192 if (dpaa2_eth_is_type_phy(priv))
4193 dpaa2_mac_disconnect(priv->mac);
4195 if (!dpaa2_eth_has_mac(priv))
4198 dpaa2_mac_close(priv->mac);
4203 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4206 struct device *dev = (struct device *)arg;
4207 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4208 struct net_device *net_dev = dev_get_drvdata(dev);
4209 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4212 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4213 DPNI_IRQ_INDEX, &status);
4214 if (unlikely(err)) {
4215 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4219 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4220 dpaa2_eth_link_state_update(netdev_priv(net_dev));
4222 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4223 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4224 dpaa2_eth_update_tx_fqids(priv);
4227 if (dpaa2_eth_has_mac(priv))
4228 dpaa2_eth_disconnect_mac(priv);
4230 dpaa2_eth_connect_mac(priv);
4237 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4240 struct fsl_mc_device_irq *irq;
4242 err = fsl_mc_allocate_irqs(ls_dev);
4244 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4248 irq = ls_dev->irqs[0];
4249 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4250 NULL, dpni_irq0_handler_thread,
4251 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4252 dev_name(&ls_dev->dev), &ls_dev->dev);
4254 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4258 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4259 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4260 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4262 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4266 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4269 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4276 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4278 fsl_mc_free_irqs(ls_dev);
4283 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4286 struct dpaa2_eth_channel *ch;
4288 for (i = 0; i < priv->num_channels; i++) {
4289 ch = priv->channel[i];
4290 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4291 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4296 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4299 struct dpaa2_eth_channel *ch;
4301 for (i = 0; i < priv->num_channels; i++) {
4302 ch = priv->channel[i];
4303 netif_napi_del(&ch->napi);
4307 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4310 struct net_device *net_dev = NULL;
4311 struct dpaa2_eth_priv *priv = NULL;
4314 dev = &dpni_dev->dev;
4317 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4319 dev_err(dev, "alloc_etherdev_mq() failed\n");
4323 SET_NETDEV_DEV(net_dev, dev);
4324 dev_set_drvdata(dev, net_dev);
4326 priv = netdev_priv(net_dev);
4327 priv->net_dev = net_dev;
4329 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4331 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4332 priv->rx_tstamp = false;
4334 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4335 if (!priv->dpaa2_ptp_wq) {
4340 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4342 skb_queue_head_init(&priv->tx_skbs);
4344 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4346 /* Obtain a MC portal */
4347 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4351 err = -EPROBE_DEFER;
4353 dev_err(dev, "MC portal allocation failed\n");
4354 goto err_portal_alloc;
4357 /* MC objects initialization and configuration */
4358 err = dpaa2_eth_setup_dpni(dpni_dev);
4360 goto err_dpni_setup;
4362 err = dpaa2_eth_setup_dpio(priv);
4364 goto err_dpio_setup;
4366 dpaa2_eth_setup_fqs(priv);
4368 err = dpaa2_eth_setup_dpbp(priv);
4370 goto err_dpbp_setup;
4372 err = dpaa2_eth_bind_dpni(priv);
4376 /* Add a NAPI context for each channel */
4377 dpaa2_eth_add_ch_napi(priv);
4379 /* Percpu statistics */
4380 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4381 if (!priv->percpu_stats) {
4382 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4384 goto err_alloc_percpu_stats;
4386 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4387 if (!priv->percpu_extras) {
4388 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4390 goto err_alloc_percpu_extras;
4393 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4394 if (!priv->sgt_cache) {
4395 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4397 goto err_alloc_sgt_cache;
4400 err = dpaa2_eth_netdev_init(net_dev);
4402 goto err_netdev_init;
4404 /* Configure checksum offload based on current interface flags */
4405 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4409 err = dpaa2_eth_set_tx_csum(priv,
4410 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4414 err = dpaa2_eth_alloc_rings(priv);
4416 goto err_alloc_rings;
4418 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4419 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4420 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4421 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4423 dev_dbg(dev, "PFC not supported\n");
4427 err = dpaa2_eth_setup_irqs(dpni_dev);
4429 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4430 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4431 "%s_poll_link", net_dev->name);
4432 if (IS_ERR(priv->poll_thread)) {
4433 dev_err(dev, "Error starting polling thread\n");
4434 goto err_poll_thread;
4436 priv->do_link_poll = true;
4439 err = dpaa2_eth_connect_mac(priv);
4441 goto err_connect_mac;
4443 err = dpaa2_eth_dl_alloc(priv);
4445 goto err_dl_register;
4447 err = dpaa2_eth_dl_traps_register(priv);
4449 goto err_dl_trap_register;
4451 err = dpaa2_eth_dl_port_add(priv);
4453 goto err_dl_port_add;
4455 err = register_netdev(net_dev);
4457 dev_err(dev, "register_netdev() failed\n");
4458 goto err_netdev_reg;
4461 #ifdef CONFIG_DEBUG_FS
4462 dpaa2_dbg_add(priv);
4465 dpaa2_eth_dl_register(priv);
4466 dev_info(dev, "Probed interface %s\n", net_dev->name);
4470 dpaa2_eth_dl_port_del(priv);
4472 dpaa2_eth_dl_traps_unregister(priv);
4473 err_dl_trap_register:
4474 dpaa2_eth_dl_free(priv);
4476 dpaa2_eth_disconnect_mac(priv);
4478 if (priv->do_link_poll)
4479 kthread_stop(priv->poll_thread);
4481 fsl_mc_free_irqs(dpni_dev);
4483 dpaa2_eth_free_rings(priv);
4487 free_percpu(priv->sgt_cache);
4488 err_alloc_sgt_cache:
4489 free_percpu(priv->percpu_extras);
4490 err_alloc_percpu_extras:
4491 free_percpu(priv->percpu_stats);
4492 err_alloc_percpu_stats:
4493 dpaa2_eth_del_ch_napi(priv);
4495 dpaa2_eth_free_dpbp(priv);
4497 dpaa2_eth_free_dpio(priv);
4499 dpaa2_eth_free_dpni(priv);
4501 fsl_mc_portal_free(priv->mc_io);
4503 destroy_workqueue(priv->dpaa2_ptp_wq);
4505 dev_set_drvdata(dev, NULL);
4506 free_netdev(net_dev);
4511 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4514 struct net_device *net_dev;
4515 struct dpaa2_eth_priv *priv;
4518 net_dev = dev_get_drvdata(dev);
4519 priv = netdev_priv(net_dev);
4521 dpaa2_eth_dl_unregister(priv);
4523 #ifdef CONFIG_DEBUG_FS
4524 dpaa2_dbg_remove(priv);
4527 dpaa2_eth_disconnect_mac(priv);
4530 unregister_netdev(net_dev);
4532 dpaa2_eth_dl_port_del(priv);
4533 dpaa2_eth_dl_traps_unregister(priv);
4534 dpaa2_eth_dl_free(priv);
4536 if (priv->do_link_poll)
4537 kthread_stop(priv->poll_thread);
4539 fsl_mc_free_irqs(ls_dev);
4541 dpaa2_eth_free_rings(priv);
4542 free_percpu(priv->sgt_cache);
4543 free_percpu(priv->percpu_stats);
4544 free_percpu(priv->percpu_extras);
4546 dpaa2_eth_del_ch_napi(priv);
4547 dpaa2_eth_free_dpbp(priv);
4548 dpaa2_eth_free_dpio(priv);
4549 dpaa2_eth_free_dpni(priv);
4551 fsl_mc_portal_free(priv->mc_io);
4553 destroy_workqueue(priv->dpaa2_ptp_wq);
4555 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
4557 free_netdev(net_dev);
4562 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4564 .vendor = FSL_MC_VENDOR_FREESCALE,
4569 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4571 static struct fsl_mc_driver dpaa2_eth_driver = {
4573 .name = KBUILD_MODNAME,
4574 .owner = THIS_MODULE,
4576 .probe = dpaa2_eth_probe,
4577 .remove = dpaa2_eth_remove,
4578 .match_id_table = dpaa2_eth_match_id_table
4581 static int __init dpaa2_eth_driver_init(void)
4585 dpaa2_eth_dbg_init();
4586 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4588 dpaa2_eth_dbg_exit();
4595 static void __exit dpaa2_eth_driver_exit(void)
4597 dpaa2_eth_dbg_exit();
4598 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4601 module_init(dpaa2_eth_driver_init);
4602 module_exit(dpaa2_eth_driver_exit);