1 /* Copyright 2014-2016 Freescale Semiconductor Inc.
2 * Copyright 2016-2017 NXP
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of Freescale Semiconductor nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
16 * ALTERNATIVELY, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") as published by the Free Software
18 * Foundation, either version 2 of that License or (at your option) any
21 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/platform_device.h>
35 #include <linux/etherdevice.h>
36 #include <linux/of_net.h>
37 #include <linux/interrupt.h>
38 #include <linux/msi.h>
39 #include <linux/kthread.h>
40 #include <linux/iommu.h>
42 #include <linux/fsl/mc.h>
43 #include "dpaa2-eth.h"
45 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
46 * using trace events only need to #include <trace/events/sched.h>
48 #define CREATE_TRACE_POINTS
49 #include "dpaa2-eth-trace.h"
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_AUTHOR("Freescale Semiconductor, Inc");
53 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
55 const char dpaa2_eth_drv_version[] = "0.1";
57 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
60 phys_addr_t phys_addr;
62 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
64 return phys_to_virt(phys_addr);
67 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
71 skb_checksum_none_assert(skb);
73 /* HW checksum validation is disabled, nothing to do here */
74 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
77 /* Read checksum validation bits */
78 if (!((fd_status & DPAA2_FAS_L3CV) &&
79 (fd_status & DPAA2_FAS_L4CV)))
82 /* Inform the stack there's no need to compute L3/L4 csum anymore */
83 skb->ip_summed = CHECKSUM_UNNECESSARY;
86 /* Free a received FD.
87 * Not to be used for Tx conf FDs or on any other paths.
89 static void free_rx_fd(struct dpaa2_eth_priv *priv,
90 const struct dpaa2_fd *fd,
93 struct device *dev = priv->net_dev->dev.parent;
94 dma_addr_t addr = dpaa2_fd_get_addr(fd);
95 u8 fd_format = dpaa2_fd_get_format(fd);
96 struct dpaa2_sg_entry *sgt;
100 /* If single buffer frame, just free the data buffer */
101 if (fd_format == dpaa2_fd_single)
103 else if (fd_format != dpaa2_fd_sg)
104 /* We don't support any other format */
107 /* For S/G frames, we first need to free all SG entries
108 * except the first one, which was taken care of already
110 sgt = vaddr + dpaa2_fd_get_offset(fd);
111 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
112 addr = dpaa2_sg_get_addr(&sgt[i]);
113 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
114 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
117 skb_free_frag(sg_vaddr);
118 if (dpaa2_sg_is_final(&sgt[i]))
123 skb_free_frag(vaddr);
126 /* Build a linear skb based on a single-buffer frame descriptor */
127 static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
128 struct dpaa2_eth_channel *ch,
129 const struct dpaa2_fd *fd,
132 struct sk_buff *skb = NULL;
133 u16 fd_offset = dpaa2_fd_get_offset(fd);
134 u32 fd_length = dpaa2_fd_get_len(fd);
138 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
142 skb_reserve(skb, fd_offset);
143 skb_put(skb, fd_length);
148 /* Build a non linear (fragmented) skb based on a S/G table */
149 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
150 struct dpaa2_eth_channel *ch,
151 struct dpaa2_sg_entry *sgt)
153 struct sk_buff *skb = NULL;
154 struct device *dev = priv->net_dev->dev.parent;
159 struct page *page, *head_page;
163 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
164 struct dpaa2_sg_entry *sge = &sgt[i];
166 /* NOTE: We only support SG entries in dpaa2_sg_single format,
167 * but this is the only format we may receive from HW anyway
170 /* Get the address and length from the S/G entry */
171 sg_addr = dpaa2_sg_get_addr(sge);
172 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
173 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
176 sg_length = dpaa2_sg_get_len(sge);
179 /* We build the skb around the first data buffer */
180 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
181 if (unlikely(!skb)) {
182 /* Free the first SG entry now, since we already
183 * unmapped it and obtained the virtual address
185 skb_free_frag(sg_vaddr);
187 /* We still need to subtract the buffers used
188 * by this FD from our software counter
190 while (!dpaa2_sg_is_final(&sgt[i]) &&
191 i < DPAA2_ETH_MAX_SG_ENTRIES)
196 sg_offset = dpaa2_sg_get_offset(sge);
197 skb_reserve(skb, sg_offset);
198 skb_put(skb, sg_length);
200 /* Rest of the data buffers are stored as skb frags */
201 page = virt_to_page(sg_vaddr);
202 head_page = virt_to_head_page(sg_vaddr);
204 /* Offset in page (which may be compound).
205 * Data in subsequent SG entries is stored from the
206 * beginning of the buffer, so we don't need to add the
209 page_offset = ((unsigned long)sg_vaddr &
211 (page_address(page) - page_address(head_page));
213 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
214 sg_length, DPAA2_ETH_RX_BUF_SIZE);
217 if (dpaa2_sg_is_final(sge))
221 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
223 /* Count all data buffers + SG table buffer */
224 ch->buf_count -= i + 2;
229 /* Main Rx frame processing routine */
230 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
231 struct dpaa2_eth_channel *ch,
232 const struct dpaa2_fd *fd,
233 struct napi_struct *napi,
236 dma_addr_t addr = dpaa2_fd_get_addr(fd);
237 u8 fd_format = dpaa2_fd_get_format(fd);
240 struct rtnl_link_stats64 *percpu_stats;
241 struct dpaa2_eth_drv_stats *percpu_extras;
242 struct device *dev = priv->net_dev->dev.parent;
243 struct dpaa2_fas *fas;
248 trace_dpaa2_rx_fd(priv->net_dev, fd);
250 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
251 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
253 fas = dpaa2_get_fas(vaddr, false);
255 buf_data = vaddr + dpaa2_fd_get_offset(fd);
258 percpu_stats = this_cpu_ptr(priv->percpu_stats);
259 percpu_extras = this_cpu_ptr(priv->percpu_extras);
261 if (fd_format == dpaa2_fd_single) {
262 skb = build_linear_skb(priv, ch, fd, vaddr);
263 } else if (fd_format == dpaa2_fd_sg) {
264 skb = build_frag_skb(priv, ch, buf_data);
265 skb_free_frag(vaddr);
266 percpu_extras->rx_sg_frames++;
267 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
269 /* We don't support any other format */
270 goto err_frame_format;
278 /* Check if we need to validate the L4 csum */
279 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
280 status = le32_to_cpu(fas->status);
281 validate_rx_csum(priv, status, skb);
284 skb->protocol = eth_type_trans(skb, priv->net_dev);
285 skb_record_rx_queue(skb, queue_id);
287 percpu_stats->rx_packets++;
288 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
290 napi_gro_receive(napi, skb);
295 free_rx_fd(priv, fd, vaddr);
297 percpu_stats->rx_dropped++;
300 /* Consume all frames pull-dequeued into the store. This is the simplest way to
301 * make sure we don't accidentally issue another volatile dequeue which would
302 * overwrite (leak) frames already in the store.
304 * Observance of NAPI budget is not our concern, leaving that to the caller.
306 static int consume_frames(struct dpaa2_eth_channel *ch)
308 struct dpaa2_eth_priv *priv = ch->priv;
309 struct dpaa2_eth_fq *fq;
311 const struct dpaa2_fd *fd;
316 dq = dpaa2_io_store_next(ch->store, &is_last);
318 /* If we're here, we *must* have placed a
319 * volatile dequeue comnmand, so keep reading through
320 * the store until we get some sort of valid response
321 * token (either a valid frame or an "empty dequeue")
326 fd = dpaa2_dq_fd(dq);
327 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
330 fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
337 /* Create a frame descriptor based on a fragmented skb */
338 static int build_sg_fd(struct dpaa2_eth_priv *priv,
342 struct device *dev = priv->net_dev->dev.parent;
343 void *sgt_buf = NULL;
345 int nr_frags = skb_shinfo(skb)->nr_frags;
346 struct dpaa2_sg_entry *sgt;
349 struct scatterlist *scl, *crt_scl;
352 struct dpaa2_eth_swa *swa;
354 /* Create and map scatterlist.
355 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
356 * to go beyond nr_frags+1.
357 * Note: We don't support chained scatterlists
359 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
362 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
366 sg_init_table(scl, nr_frags + 1);
367 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
368 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
369 if (unlikely(!num_dma_bufs)) {
371 goto dma_map_sg_failed;
374 /* Prepare the HW SGT structure */
375 sgt_buf_size = priv->tx_data_offset +
376 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
377 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
378 if (unlikely(!sgt_buf)) {
380 goto sgt_buf_alloc_failed;
382 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
383 memset(sgt_buf, 0, sgt_buf_size);
385 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
387 /* Fill in the HW SGT structure.
389 * sgt_buf is zeroed out, so the following fields are implicit
390 * in all sgt entries:
392 * - format is 'dpaa2_sg_single'
394 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
395 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
396 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
398 dpaa2_sg_set_final(&sgt[i - 1], true);
400 /* Store the skb backpointer in the SGT buffer.
401 * Fit the scatterlist and the number of buffers alongside the
402 * skb backpointer in the software annotation area. We'll need
403 * all of them on Tx Conf.
405 swa = (struct dpaa2_eth_swa *)sgt_buf;
408 swa->num_sg = num_sg;
409 swa->sgt_size = sgt_buf_size;
411 /* Separately map the SGT buffer */
412 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
413 if (unlikely(dma_mapping_error(dev, addr))) {
415 goto dma_map_single_failed;
417 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
418 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
419 dpaa2_fd_set_addr(fd, addr);
420 dpaa2_fd_set_len(fd, skb->len);
421 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
425 dma_map_single_failed:
426 skb_free_frag(sgt_buf);
427 sgt_buf_alloc_failed:
428 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
434 /* Create a frame descriptor based on a linear skb */
435 static int build_single_fd(struct dpaa2_eth_priv *priv,
439 struct device *dev = priv->net_dev->dev.parent;
440 u8 *buffer_start, *aligned_start;
441 struct sk_buff **skbh;
444 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
446 /* If there's enough room to align the FD address, do it.
447 * It will help hardware optimize accesses.
449 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
450 DPAA2_ETH_TX_BUF_ALIGN);
451 if (aligned_start >= skb->head)
452 buffer_start = aligned_start;
454 /* Store a backpointer to the skb at the beginning of the buffer
455 * (in the private data area) such that we can release it
458 skbh = (struct sk_buff **)buffer_start;
461 addr = dma_map_single(dev, buffer_start,
462 skb_tail_pointer(skb) - buffer_start,
464 if (unlikely(dma_mapping_error(dev, addr)))
467 dpaa2_fd_set_addr(fd, addr);
468 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
469 dpaa2_fd_set_len(fd, skb->len);
470 dpaa2_fd_set_format(fd, dpaa2_fd_single);
471 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_PTA | DPAA2_FD_CTRL_PTV1);
476 /* FD freeing routine on the Tx path
478 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
479 * back-pointed to is also freed.
480 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
482 * Optionally, return the frame annotation status word (FAS), which needs
483 * to be checked if we're on the confirmation path.
485 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
486 const struct dpaa2_fd *fd)
488 struct device *dev = priv->net_dev->dev.parent;
490 struct sk_buff **skbh, *skb;
491 unsigned char *buffer_start;
492 struct dpaa2_eth_swa *swa;
493 u8 fd_format = dpaa2_fd_get_format(fd);
495 fd_addr = dpaa2_fd_get_addr(fd);
496 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
498 if (fd_format == dpaa2_fd_single) {
500 buffer_start = (unsigned char *)skbh;
501 /* Accessing the skb buffer is safe before dma unmap, because
502 * we didn't map the actual skb shell.
504 dma_unmap_single(dev, fd_addr,
505 skb_tail_pointer(skb) - buffer_start,
507 } else if (fd_format == dpaa2_fd_sg) {
508 swa = (struct dpaa2_eth_swa *)skbh;
511 /* Unmap the scatterlist */
512 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
515 /* Unmap the SGT buffer */
516 dma_unmap_single(dev, fd_addr, swa->sgt_size,
519 netdev_dbg(priv->net_dev, "Invalid FD format\n");
523 /* Free SGT buffer allocated on tx */
524 if (fd_format != dpaa2_fd_single)
527 /* Move on with skb release */
531 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
533 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
535 struct rtnl_link_stats64 *percpu_stats;
536 struct dpaa2_eth_drv_stats *percpu_extras;
537 struct dpaa2_eth_fq *fq;
539 unsigned int needed_headroom;
542 percpu_stats = this_cpu_ptr(priv->percpu_stats);
543 percpu_extras = this_cpu_ptr(priv->percpu_extras);
545 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
546 if (skb_headroom(skb) < needed_headroom) {
549 ns = skb_realloc_headroom(skb, needed_headroom);
551 percpu_stats->tx_dropped++;
552 goto err_alloc_headroom;
554 percpu_extras->tx_reallocs++;
559 /* We'll be holding a back-reference to the skb until Tx Confirmation;
560 * we don't want that overwritten by a concurrent Tx with a cloned skb.
562 skb = skb_unshare(skb, GFP_ATOMIC);
563 if (unlikely(!skb)) {
564 /* skb_unshare() has already freed the skb */
565 percpu_stats->tx_dropped++;
569 /* Setup the FD fields */
570 memset(&fd, 0, sizeof(fd));
572 if (skb_is_nonlinear(skb)) {
573 err = build_sg_fd(priv, skb, &fd);
574 percpu_extras->tx_sg_frames++;
575 percpu_extras->tx_sg_bytes += skb->len;
577 err = build_single_fd(priv, skb, &fd);
581 percpu_stats->tx_dropped++;
586 trace_dpaa2_tx_fd(net_dev, &fd);
588 /* TxConf FQ selection relies on queue id from the stack.
589 * In case of a forwarded frame from another DPNI interface, we choose
590 * a queue affined to the same core that processed the Rx frame
592 queue_mapping = skb_get_queue_mapping(skb);
593 fq = &priv->fq[queue_mapping];
594 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
595 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
601 percpu_extras->tx_portal_busy += i;
602 if (unlikely(err < 0)) {
603 percpu_stats->tx_errors++;
604 /* Clean up everything, including freeing the skb */
605 free_tx_fd(priv, &fd);
607 percpu_stats->tx_packets++;
608 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
620 /* Tx confirmation frame processing routine */
621 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
622 struct dpaa2_eth_channel *ch,
623 const struct dpaa2_fd *fd,
624 struct napi_struct *napi __always_unused,
625 u16 queue_id __always_unused)
627 struct rtnl_link_stats64 *percpu_stats;
628 struct dpaa2_eth_drv_stats *percpu_extras;
632 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
634 percpu_extras = this_cpu_ptr(priv->percpu_extras);
635 percpu_extras->tx_conf_frames++;
636 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
638 /* Check frame errors in the FD field */
639 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
640 free_tx_fd(priv, fd);
642 if (likely(!fd_errors))
646 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
649 percpu_stats = this_cpu_ptr(priv->percpu_stats);
650 /* Tx-conf logically pertains to the egress path. */
651 percpu_stats->tx_errors++;
654 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
658 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
659 DPNI_OFF_RX_L3_CSUM, enable);
661 netdev_err(priv->net_dev,
662 "dpni_set_offload(RX_L3_CSUM) failed\n");
666 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
667 DPNI_OFF_RX_L4_CSUM, enable);
669 netdev_err(priv->net_dev,
670 "dpni_set_offload(RX_L4_CSUM) failed\n");
677 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
681 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
682 DPNI_OFF_TX_L3_CSUM, enable);
684 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
688 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
689 DPNI_OFF_TX_L4_CSUM, enable);
691 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
698 /* Free buffers acquired from the buffer pool or which were meant to
699 * be released in the pool
701 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
703 struct device *dev = priv->net_dev->dev.parent;
707 for (i = 0; i < count; i++) {
708 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
709 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
711 skb_free_frag(vaddr);
715 /* Perform a single release command to add buffers
716 * to the specified buffer pool
718 static int add_bufs(struct dpaa2_eth_priv *priv,
719 struct dpaa2_eth_channel *ch, u16 bpid)
721 struct device *dev = priv->net_dev->dev.parent;
722 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
727 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
728 /* Allocate buffer visible to WRIOP + skb shared info +
731 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
735 buf = PTR_ALIGN(buf, priv->rx_buf_align);
737 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
739 if (unlikely(dma_mapping_error(dev, addr)))
745 trace_dpaa2_eth_buf_seed(priv->net_dev,
746 buf, dpaa2_eth_buf_raw_size(priv),
747 addr, DPAA2_ETH_RX_BUF_SIZE,
752 /* In case the portal is busy, retry until successful */
753 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
754 buf_array, i)) == -EBUSY)
757 /* If release command failed, clean up and bail out;
758 * not much else we can do about it
761 free_bufs(priv, buf_array, i);
770 /* If we managed to allocate at least some buffers,
771 * release them to hardware
779 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
784 /* This is the lazy seeding of Rx buffer pools.
785 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
786 * napi_alloc_frag(). The trouble with that is that it in turn ends up
787 * calling this_cpu_ptr(), which mandates execution in atomic context.
788 * Rather than splitting up the code, do a one-off preempt disable.
791 for (j = 0; j < priv->num_channels; j++) {
792 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
793 i += DPAA2_ETH_BUFS_PER_CMD) {
794 new_count = add_bufs(priv, priv->channel[j], bpid);
795 priv->channel[j]->buf_count += new_count;
797 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
809 * Drain the specified number of buffers from the DPNI's private buffer pool.
810 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
812 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
814 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
818 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
821 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
824 free_bufs(priv, buf_array, ret);
828 static void drain_pool(struct dpaa2_eth_priv *priv)
832 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
835 for (i = 0; i < priv->num_channels; i++)
836 priv->channel[i]->buf_count = 0;
839 /* Function is called from softirq context only, so we don't need to guard
840 * the access to percpu count
842 static int refill_pool(struct dpaa2_eth_priv *priv,
843 struct dpaa2_eth_channel *ch,
848 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
852 new_count = add_bufs(priv, ch, bpid);
853 if (unlikely(!new_count)) {
854 /* Out of memory; abort for now, we'll try later on */
857 ch->buf_count += new_count;
858 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
860 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
866 static int pull_channel(struct dpaa2_eth_channel *ch)
871 /* Retry while portal is busy */
873 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
877 } while (err == -EBUSY);
879 ch->stats.dequeue_portal_busy += dequeues;
881 ch->stats.pull_err++;
888 * Frames are dequeued from the QMan channel associated with this NAPI context.
889 * Rx, Tx confirmation and (if configured) Rx error frames all count
890 * towards the NAPI budget.
892 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
894 struct dpaa2_eth_channel *ch;
895 int cleaned = 0, store_cleaned;
896 struct dpaa2_eth_priv *priv;
899 ch = container_of(napi, struct dpaa2_eth_channel, napi);
902 while (cleaned < budget) {
903 err = pull_channel(ch);
907 /* Refill pool if appropriate */
908 refill_pool(priv, ch, priv->bpid);
910 store_cleaned = consume_frames(ch);
911 cleaned += store_cleaned;
913 /* If we have enough budget left for a full store,
914 * try a new pull dequeue, otherwise we're done here
916 if (store_cleaned == 0 ||
917 cleaned > budget - DPAA2_ETH_STORE_SIZE)
921 if (cleaned < budget && napi_complete_done(napi, cleaned)) {
922 /* Re-enable data available notifications */
924 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
926 } while (err == -EBUSY);
927 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
928 ch->nctx.desired_cpu);
931 ch->stats.frames += cleaned;
936 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
938 struct dpaa2_eth_channel *ch;
941 for (i = 0; i < priv->num_channels; i++) {
942 ch = priv->channel[i];
943 napi_enable(&ch->napi);
947 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
949 struct dpaa2_eth_channel *ch;
952 for (i = 0; i < priv->num_channels; i++) {
953 ch = priv->channel[i];
954 napi_disable(&ch->napi);
958 static int link_state_update(struct dpaa2_eth_priv *priv)
960 struct dpni_link_state state;
963 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
965 netdev_err(priv->net_dev,
966 "dpni_get_link_state() failed\n");
970 /* Chech link state; speed / duplex changes are not treated yet */
971 if (priv->link_state.up == state.up)
974 priv->link_state = state;
976 netif_carrier_on(priv->net_dev);
977 netif_tx_start_all_queues(priv->net_dev);
979 netif_tx_stop_all_queues(priv->net_dev);
980 netif_carrier_off(priv->net_dev);
983 netdev_info(priv->net_dev, "Link Event: state %s\n",
984 state.up ? "up" : "down");
989 static int dpaa2_eth_open(struct net_device *net_dev)
991 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
994 err = seed_pool(priv, priv->bpid);
996 /* Not much to do; the buffer pool, though not filled up,
997 * may still contain some buffers which would enable us
1000 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1001 priv->dpbp_dev->obj_desc.id, priv->bpid);
1004 /* We'll only start the txqs when the link is actually ready; make sure
1005 * we don't race against the link up notification, which may come
1006 * immediately after dpni_enable();
1008 netif_tx_stop_all_queues(net_dev);
1009 enable_ch_napi(priv);
1010 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1011 * return true and cause 'ip link show' to report the LOWER_UP flag,
1012 * even though the link notification wasn't even received.
1014 netif_carrier_off(net_dev);
1016 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1018 netdev_err(net_dev, "dpni_enable() failed\n");
1022 /* If the DPMAC object has already processed the link up interrupt,
1023 * we have to learn the link state ourselves.
1025 err = link_state_update(priv);
1027 netdev_err(net_dev, "Can't update link state\n");
1028 goto link_state_err;
1035 disable_ch_napi(priv);
1040 /* The DPIO store must be empty when we call this,
1041 * at the end of every NAPI cycle.
1043 static u32 drain_channel(struct dpaa2_eth_priv *priv,
1044 struct dpaa2_eth_channel *ch)
1046 u32 drained = 0, total = 0;
1050 drained = consume_frames(ch);
1057 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1059 struct dpaa2_eth_channel *ch;
1063 for (i = 0; i < priv->num_channels; i++) {
1064 ch = priv->channel[i];
1065 drained += drain_channel(priv, ch);
1071 static int dpaa2_eth_stop(struct net_device *net_dev)
1073 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1078 netif_tx_stop_all_queues(net_dev);
1079 netif_carrier_off(net_dev);
1081 /* Loop while dpni_disable() attempts to drain the egress FQs
1082 * and confirm them back to us.
1085 dpni_disable(priv->mc_io, 0, priv->mc_token);
1086 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1088 /* Allow the hardware some slack */
1090 } while (dpni_enabled && --retries);
1092 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1093 /* Must go on and disable NAPI nonetheless, so we don't crash at
1094 * the next "ifconfig up"
1098 /* Wait for NAPI to complete on every core and disable it.
1099 * In particular, this will also prevent NAPI from being rescheduled if
1100 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1101 * don't even need to disarm the channels, except perhaps for the case
1102 * of a huge coalescing value.
1104 disable_ch_napi(priv);
1106 /* Manually drain the Rx and TxConf queues */
1107 drained = drain_ingress_frames(priv);
1109 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1111 /* Empty the buffer pool */
1117 static int dpaa2_eth_init(struct net_device *net_dev)
1120 u64 not_supported = 0;
1121 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1122 u32 options = priv->dpni_attrs.options;
1124 /* Capabilities listing */
1125 supported |= IFF_LIVE_ADDR_CHANGE;
1127 if (options & DPNI_OPT_NO_MAC_FILTER)
1128 not_supported |= IFF_UNICAST_FLT;
1130 supported |= IFF_UNICAST_FLT;
1132 net_dev->priv_flags |= supported;
1133 net_dev->priv_flags &= ~not_supported;
1136 net_dev->features = NETIF_F_RXCSUM |
1137 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1138 NETIF_F_SG | NETIF_F_HIGHDMA |
1140 net_dev->hw_features = net_dev->features;
1145 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1147 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1148 struct device *dev = net_dev->dev.parent;
1151 err = eth_mac_addr(net_dev, addr);
1153 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1157 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1160 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1167 /** Fill in counters maintained by the GPP driver. These may be different from
1168 * the hardware counters obtained by ethtool.
1170 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1171 struct rtnl_link_stats64 *stats)
1173 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1174 struct rtnl_link_stats64 *percpu_stats;
1176 u64 *netstats = (u64 *)stats;
1178 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1180 for_each_possible_cpu(i) {
1181 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1182 cpustats = (u64 *)percpu_stats;
1183 for (j = 0; j < num; j++)
1184 netstats[j] += cpustats[j];
1188 static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu)
1190 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1193 /* Set the maximum Rx frame length to match the transmit side;
1194 * account for L2 headers when computing the MFL
1196 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
1197 (u16)DPAA2_ETH_L2_MAX_FRM(mtu));
1199 netdev_err(net_dev, "dpni_set_max_frame_length() failed\n");
1207 /* Copy mac unicast addresses from @net_dev to @priv.
1208 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1210 static void add_uc_hw_addr(const struct net_device *net_dev,
1211 struct dpaa2_eth_priv *priv)
1213 struct netdev_hw_addr *ha;
1216 netdev_for_each_uc_addr(ha, net_dev) {
1217 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1220 netdev_warn(priv->net_dev,
1221 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1226 /* Copy mac multicast addresses from @net_dev to @priv
1227 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1229 static void add_mc_hw_addr(const struct net_device *net_dev,
1230 struct dpaa2_eth_priv *priv)
1232 struct netdev_hw_addr *ha;
1235 netdev_for_each_mc_addr(ha, net_dev) {
1236 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1239 netdev_warn(priv->net_dev,
1240 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1245 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1247 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1248 int uc_count = netdev_uc_count(net_dev);
1249 int mc_count = netdev_mc_count(net_dev);
1250 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1251 u32 options = priv->dpni_attrs.options;
1252 u16 mc_token = priv->mc_token;
1253 struct fsl_mc_io *mc_io = priv->mc_io;
1256 /* Basic sanity checks; these probably indicate a misconfiguration */
1257 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1258 netdev_info(net_dev,
1259 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1262 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1263 if (uc_count > max_mac) {
1264 netdev_info(net_dev,
1265 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1269 if (mc_count + uc_count > max_mac) {
1270 netdev_info(net_dev,
1271 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1272 uc_count + mc_count, max_mac);
1273 goto force_mc_promisc;
1276 /* Adjust promisc settings due to flag combinations */
1277 if (net_dev->flags & IFF_PROMISC)
1279 if (net_dev->flags & IFF_ALLMULTI) {
1280 /* First, rebuild unicast filtering table. This should be done
1281 * in promisc mode, in order to avoid frame loss while we
1282 * progressively add entries to the table.
1283 * We don't know whether we had been in promisc already, and
1284 * making an MC call to find out is expensive; so set uc promisc
1287 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1289 netdev_warn(net_dev, "Can't set uc promisc\n");
1291 /* Actual uc table reconstruction. */
1292 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1294 netdev_warn(net_dev, "Can't clear uc filters\n");
1295 add_uc_hw_addr(net_dev, priv);
1297 /* Finally, clear uc promisc and set mc promisc as requested. */
1298 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1300 netdev_warn(net_dev, "Can't clear uc promisc\n");
1301 goto force_mc_promisc;
1304 /* Neither unicast, nor multicast promisc will be on... eventually.
1305 * For now, rebuild mac filtering tables while forcing both of them on.
1307 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1309 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1310 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1312 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1314 /* Actual mac filtering tables reconstruction */
1315 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1317 netdev_warn(net_dev, "Can't clear mac filters\n");
1318 add_mc_hw_addr(net_dev, priv);
1319 add_uc_hw_addr(net_dev, priv);
1321 /* Now we can clear both ucast and mcast promisc, without risking
1322 * to drop legitimate frames anymore.
1324 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1326 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1327 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1329 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1334 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1336 netdev_warn(net_dev, "Can't set ucast promisc\n");
1338 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1340 netdev_warn(net_dev, "Can't set mcast promisc\n");
1343 static int dpaa2_eth_set_features(struct net_device *net_dev,
1344 netdev_features_t features)
1346 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1347 netdev_features_t changed = features ^ net_dev->features;
1351 if (changed & NETIF_F_RXCSUM) {
1352 enable = !!(features & NETIF_F_RXCSUM);
1353 err = set_rx_csum(priv, enable);
1358 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1359 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1360 err = set_tx_csum(priv, enable);
1368 static const struct net_device_ops dpaa2_eth_ops = {
1369 .ndo_open = dpaa2_eth_open,
1370 .ndo_start_xmit = dpaa2_eth_tx,
1371 .ndo_stop = dpaa2_eth_stop,
1372 .ndo_init = dpaa2_eth_init,
1373 .ndo_set_mac_address = dpaa2_eth_set_addr,
1374 .ndo_get_stats64 = dpaa2_eth_get_stats,
1375 .ndo_change_mtu = dpaa2_eth_change_mtu,
1376 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1377 .ndo_set_features = dpaa2_eth_set_features,
1380 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1382 struct dpaa2_eth_channel *ch;
1384 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1386 /* Update NAPI statistics */
1389 napi_schedule_irqoff(&ch->napi);
1392 /* Allocate and configure a DPCON object */
1393 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1395 struct fsl_mc_device *dpcon;
1396 struct device *dev = priv->net_dev->dev.parent;
1397 struct dpcon_attr attrs;
1400 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1401 FSL_MC_POOL_DPCON, &dpcon);
1403 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1407 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1409 dev_err(dev, "dpcon_open() failed\n");
1413 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1415 dev_err(dev, "dpcon_reset() failed\n");
1419 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1421 dev_err(dev, "dpcon_get_attributes() failed\n");
1425 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1427 dev_err(dev, "dpcon_enable() failed\n");
1434 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1436 fsl_mc_object_free(dpcon);
1441 static void free_dpcon(struct dpaa2_eth_priv *priv,
1442 struct fsl_mc_device *dpcon)
1444 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1445 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1446 fsl_mc_object_free(dpcon);
1449 static struct dpaa2_eth_channel *
1450 alloc_channel(struct dpaa2_eth_priv *priv)
1452 struct dpaa2_eth_channel *channel;
1453 struct dpcon_attr attr;
1454 struct device *dev = priv->net_dev->dev.parent;
1457 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1461 channel->dpcon = setup_dpcon(priv);
1462 if (!channel->dpcon)
1465 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1468 dev_err(dev, "dpcon_get_attributes() failed\n");
1472 channel->dpcon_id = attr.id;
1473 channel->ch_id = attr.qbman_ch_id;
1474 channel->priv = priv;
1479 free_dpcon(priv, channel->dpcon);
1485 static void free_channel(struct dpaa2_eth_priv *priv,
1486 struct dpaa2_eth_channel *channel)
1488 free_dpcon(priv, channel->dpcon);
1492 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1493 * and register data availability notifications
1495 static int setup_dpio(struct dpaa2_eth_priv *priv)
1497 struct dpaa2_io_notification_ctx *nctx;
1498 struct dpaa2_eth_channel *channel;
1499 struct dpcon_notification_cfg dpcon_notif_cfg;
1500 struct device *dev = priv->net_dev->dev.parent;
1503 /* We want the ability to spread ingress traffic (RX, TX conf) to as
1504 * many cores as possible, so we need one channel for each core
1505 * (unless there's fewer queues than cores, in which case the extra
1506 * channels would be wasted).
1507 * Allocate one channel per core and register it to the core's
1508 * affine DPIO. If not enough channels are available for all cores
1509 * or if some cores don't have an affine DPIO, there will be no
1510 * ingress frame processing on those cores.
1512 cpumask_clear(&priv->dpio_cpumask);
1513 for_each_online_cpu(i) {
1514 /* Try to allocate a channel */
1515 channel = alloc_channel(priv);
1518 "No affine channel for cpu %d and above\n", i);
1523 priv->channel[priv->num_channels] = channel;
1525 nctx = &channel->nctx;
1528 nctx->id = channel->ch_id;
1529 nctx->desired_cpu = i;
1531 /* Register the new context */
1532 channel->dpio = dpaa2_io_service_select(i);
1533 err = dpaa2_io_service_register(channel->dpio, nctx);
1535 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
1536 /* If no affine DPIO for this core, there's probably
1537 * none available for next cores either. Signal we want
1538 * to retry later, in case the DPIO devices weren't
1541 err = -EPROBE_DEFER;
1542 goto err_service_reg;
1545 /* Register DPCON notification with MC */
1546 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1547 dpcon_notif_cfg.priority = 0;
1548 dpcon_notif_cfg.user_ctx = nctx->qman64;
1549 err = dpcon_set_notification(priv->mc_io, 0,
1550 channel->dpcon->mc_handle,
1553 dev_err(dev, "dpcon_set_notification failed()\n");
1557 /* If we managed to allocate a channel and also found an affine
1558 * DPIO for this core, add it to the final mask
1560 cpumask_set_cpu(i, &priv->dpio_cpumask);
1561 priv->num_channels++;
1563 /* Stop if we already have enough channels to accommodate all
1564 * RX and TX conf queues
1566 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1573 dpaa2_io_service_deregister(channel->dpio, nctx);
1575 free_channel(priv, channel);
1577 if (cpumask_empty(&priv->dpio_cpumask)) {
1578 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1582 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1583 cpumask_pr_args(&priv->dpio_cpumask));
1588 static void free_dpio(struct dpaa2_eth_priv *priv)
1591 struct dpaa2_eth_channel *ch;
1593 /* deregister CDAN notifications and free channels */
1594 for (i = 0; i < priv->num_channels; i++) {
1595 ch = priv->channel[i];
1596 dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
1597 free_channel(priv, ch);
1601 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1604 struct device *dev = priv->net_dev->dev.parent;
1607 for (i = 0; i < priv->num_channels; i++)
1608 if (priv->channel[i]->nctx.desired_cpu == cpu)
1609 return priv->channel[i];
1611 /* We should never get here. Issue a warning and return
1612 * the first channel, because it's still better than nothing
1614 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1616 return priv->channel[0];
1619 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1621 struct device *dev = priv->net_dev->dev.parent;
1622 struct cpumask xps_mask;
1623 struct dpaa2_eth_fq *fq;
1624 int rx_cpu, txc_cpu;
1627 /* For each FQ, pick one channel/CPU to deliver frames to.
1628 * This may well change at runtime, either through irqbalance or
1629 * through direct user intervention.
1631 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1633 for (i = 0; i < priv->num_fqs; i++) {
1637 fq->target_cpu = rx_cpu;
1638 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1639 if (rx_cpu >= nr_cpu_ids)
1640 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1642 case DPAA2_TX_CONF_FQ:
1643 fq->target_cpu = txc_cpu;
1645 /* Tell the stack to affine to txc_cpu the Tx queue
1646 * associated with the confirmation one
1648 cpumask_clear(&xps_mask);
1649 cpumask_set_cpu(txc_cpu, &xps_mask);
1650 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1653 dev_err(dev, "Error setting XPS queue\n");
1655 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1656 if (txc_cpu >= nr_cpu_ids)
1657 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1660 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1662 fq->channel = get_affine_channel(priv, fq->target_cpu);
1666 static void setup_fqs(struct dpaa2_eth_priv *priv)
1670 /* We have one TxConf FQ per Tx flow.
1671 * The number of Tx and Rx queues is the same.
1672 * Tx queues come first in the fq array.
1674 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1675 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1676 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1677 priv->fq[priv->num_fqs++].flowid = (u16)i;
1680 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1681 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1682 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1683 priv->fq[priv->num_fqs++].flowid = (u16)i;
1686 /* For each FQ, decide on which core to process incoming frames */
1687 set_fq_affinity(priv);
1690 /* Allocate and configure one buffer pool for each interface */
1691 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1694 struct fsl_mc_device *dpbp_dev;
1695 struct device *dev = priv->net_dev->dev.parent;
1696 struct dpbp_attr dpbp_attrs;
1698 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1701 dev_err(dev, "DPBP device allocation failed\n");
1705 priv->dpbp_dev = dpbp_dev;
1707 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1708 &dpbp_dev->mc_handle);
1710 dev_err(dev, "dpbp_open() failed\n");
1714 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1716 dev_err(dev, "dpbp_reset() failed\n");
1720 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1722 dev_err(dev, "dpbp_enable() failed\n");
1726 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1729 dev_err(dev, "dpbp_get_attributes() failed\n");
1732 priv->bpid = dpbp_attrs.bpid;
1737 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1740 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1742 fsl_mc_object_free(dpbp_dev);
1747 static void free_dpbp(struct dpaa2_eth_priv *priv)
1750 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1751 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1752 fsl_mc_object_free(priv->dpbp_dev);
1755 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1757 struct device *dev = priv->net_dev->dev.parent;
1758 struct dpni_buffer_layout buf_layout = {0};
1761 /* We need to check for WRIOP version 1.0.0, but depending on the MC
1762 * version, this number is not always provided correctly on rev1.
1763 * We need to check for both alternatives in this situation.
1765 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
1766 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
1767 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
1769 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1772 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1773 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
1774 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1775 DPNI_QUEUE_TX, &buf_layout);
1777 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1781 /* tx-confirm buffer */
1782 buf_layout.options = 0;
1783 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1784 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1786 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1790 /* Now that we've set our tx buffer layout, retrieve the minimum
1791 * required tx data offset.
1793 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1794 &priv->tx_data_offset);
1796 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1800 if ((priv->tx_data_offset % 64) != 0)
1801 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1802 priv->tx_data_offset);
1805 buf_layout.pass_frame_status = true;
1806 buf_layout.pass_parser_result = true;
1807 buf_layout.data_align = priv->rx_buf_align;
1808 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
1809 buf_layout.private_data_size = 0;
1810 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1811 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1812 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
1813 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1814 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1815 DPNI_QUEUE_RX, &buf_layout);
1817 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1824 /* Configure the DPNI object this interface is associated with */
1825 static int setup_dpni(struct fsl_mc_device *ls_dev)
1827 struct device *dev = &ls_dev->dev;
1828 struct dpaa2_eth_priv *priv;
1829 struct net_device *net_dev;
1832 net_dev = dev_get_drvdata(dev);
1833 priv = netdev_priv(net_dev);
1835 /* get a handle for the DPNI object */
1836 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
1838 dev_err(dev, "dpni_open() failed\n");
1842 /* Check if we can work with this DPNI object */
1843 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
1844 &priv->dpni_ver_minor);
1846 dev_err(dev, "dpni_get_api_version() failed\n");
1849 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
1850 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
1851 priv->dpni_ver_major, priv->dpni_ver_minor,
1852 DPNI_VER_MAJOR, DPNI_VER_MINOR);
1857 ls_dev->mc_io = priv->mc_io;
1858 ls_dev->mc_handle = priv->mc_token;
1860 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1862 dev_err(dev, "dpni_reset() failed\n");
1866 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1869 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1873 err = set_buffer_layout(priv);
1880 dpni_close(priv->mc_io, 0, priv->mc_token);
1885 static void free_dpni(struct dpaa2_eth_priv *priv)
1889 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1891 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1894 dpni_close(priv->mc_io, 0, priv->mc_token);
1897 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1898 struct dpaa2_eth_fq *fq)
1900 struct device *dev = priv->net_dev->dev.parent;
1901 struct dpni_queue queue;
1902 struct dpni_queue_id qid;
1903 struct dpni_taildrop td;
1906 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1907 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1909 dev_err(dev, "dpni_get_queue(RX) failed\n");
1913 fq->fqid = qid.fqid;
1915 queue.destination.id = fq->channel->dpcon_id;
1916 queue.destination.type = DPNI_DEST_DPCON;
1917 queue.destination.priority = 1;
1918 queue.user_context = (u64)(uintptr_t)fq;
1919 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1920 DPNI_QUEUE_RX, 0, fq->flowid,
1921 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1924 dev_err(dev, "dpni_set_queue(RX) failed\n");
1929 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1930 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1931 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1933 dev_err(dev, "dpni_set_threshold() failed\n");
1940 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1941 struct dpaa2_eth_fq *fq)
1943 struct device *dev = priv->net_dev->dev.parent;
1944 struct dpni_queue queue;
1945 struct dpni_queue_id qid;
1948 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1949 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1951 dev_err(dev, "dpni_get_queue(TX) failed\n");
1955 fq->tx_qdbin = qid.qdbin;
1957 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1958 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1961 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1965 fq->fqid = qid.fqid;
1967 queue.destination.id = fq->channel->dpcon_id;
1968 queue.destination.type = DPNI_DEST_DPCON;
1969 queue.destination.priority = 0;
1970 queue.user_context = (u64)(uintptr_t)fq;
1971 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1972 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1973 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1976 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
1983 /* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */
1984 static const struct dpaa2_eth_hash_fields hash_fields[] = {
1987 .rxnfc_field = RXH_IP_SRC,
1988 .cls_prot = NET_PROT_IP,
1989 .cls_field = NH_FLD_IP_SRC,
1992 .rxnfc_field = RXH_IP_DST,
1993 .cls_prot = NET_PROT_IP,
1994 .cls_field = NH_FLD_IP_DST,
1997 .rxnfc_field = RXH_L3_PROTO,
1998 .cls_prot = NET_PROT_IP,
1999 .cls_field = NH_FLD_IP_PROTO,
2002 /* Using UDP ports, this is functionally equivalent to raw
2003 * byte pairs from L4 header.
2005 .rxnfc_field = RXH_L4_B_0_1,
2006 .cls_prot = NET_PROT_UDP,
2007 .cls_field = NH_FLD_UDP_PORT_SRC,
2010 .rxnfc_field = RXH_L4_B_2_3,
2011 .cls_prot = NET_PROT_UDP,
2012 .cls_field = NH_FLD_UDP_PORT_DST,
2017 /* Set RX hash options
2018 * flags is a combination of RXH_ bits
2020 static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2022 struct device *dev = net_dev->dev.parent;
2023 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2024 struct dpkg_profile_cfg cls_cfg;
2025 struct dpni_rx_tc_dist_cfg dist_cfg;
2030 if (!dpaa2_eth_hash_enabled(priv)) {
2031 dev_dbg(dev, "Hashing support is not enabled\n");
2035 memset(&cls_cfg, 0, sizeof(cls_cfg));
2037 for (i = 0; i < ARRAY_SIZE(hash_fields); i++) {
2038 struct dpkg_extract *key =
2039 &cls_cfg.extracts[cls_cfg.num_extracts];
2041 if (!(flags & hash_fields[i].rxnfc_field))
2044 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2045 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2049 key->type = DPKG_EXTRACT_FROM_HDR;
2050 key->extract.from_hdr.prot = hash_fields[i].cls_prot;
2051 key->extract.from_hdr.type = DPKG_FULL_FIELD;
2052 key->extract.from_hdr.field = hash_fields[i].cls_field;
2053 cls_cfg.num_extracts++;
2055 priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
2058 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2062 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2064 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2068 memset(&dist_cfg, 0, sizeof(dist_cfg));
2070 /* Prepare for setting the rx dist */
2071 dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
2072 DPAA2_CLASSIFIER_DMA_SIZE,
2074 if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
2075 dev_err(dev, "DMA mapping failed\n");
2080 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2081 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2083 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2084 dma_unmap_single(dev, dist_cfg.key_cfg_iova,
2085 DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
2087 dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
2095 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2096 * frame queues and channels
2098 static int bind_dpni(struct dpaa2_eth_priv *priv)
2100 struct net_device *net_dev = priv->net_dev;
2101 struct device *dev = net_dev->dev.parent;
2102 struct dpni_pools_cfg pools_params;
2103 struct dpni_error_cfg err_cfg;
2107 pools_params.num_dpbp = 1;
2108 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2109 pools_params.pools[0].backup_pool = 0;
2110 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2111 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2113 dev_err(dev, "dpni_set_pools() failed\n");
2117 /* have the interface implicitly distribute traffic based on supported
2120 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED);
2122 dev_err(dev, "Failed to configure hashing\n");
2124 /* Configure handling of error frames */
2125 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2126 err_cfg.set_frame_annotation = 1;
2127 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2128 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2131 dev_err(dev, "dpni_set_errors_behavior failed\n");
2135 /* Configure Rx and Tx conf queues to generate CDANs */
2136 for (i = 0; i < priv->num_fqs; i++) {
2137 switch (priv->fq[i].type) {
2139 err = setup_rx_flow(priv, &priv->fq[i]);
2141 case DPAA2_TX_CONF_FQ:
2142 err = setup_tx_flow(priv, &priv->fq[i]);
2145 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2152 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2153 DPNI_QUEUE_TX, &priv->tx_qdid);
2155 dev_err(dev, "dpni_get_qdid() failed\n");
2162 /* Allocate rings for storing incoming frame descriptors */
2163 static int alloc_rings(struct dpaa2_eth_priv *priv)
2165 struct net_device *net_dev = priv->net_dev;
2166 struct device *dev = net_dev->dev.parent;
2169 for (i = 0; i < priv->num_channels; i++) {
2170 priv->channel[i]->store =
2171 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2172 if (!priv->channel[i]->store) {
2173 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2181 for (i = 0; i < priv->num_channels; i++) {
2182 if (!priv->channel[i]->store)
2184 dpaa2_io_store_destroy(priv->channel[i]->store);
2190 static void free_rings(struct dpaa2_eth_priv *priv)
2194 for (i = 0; i < priv->num_channels; i++)
2195 dpaa2_io_store_destroy(priv->channel[i]->store);
2198 static int set_mac_addr(struct dpaa2_eth_priv *priv)
2200 struct net_device *net_dev = priv->net_dev;
2201 struct device *dev = net_dev->dev.parent;
2202 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2205 /* Get firmware address, if any */
2206 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2208 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2212 /* Get DPNI attributes address, if any */
2213 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2216 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2220 /* First check if firmware has any address configured by bootloader */
2221 if (!is_zero_ether_addr(mac_addr)) {
2222 /* If the DPMAC addr != DPNI addr, update it */
2223 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2224 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2228 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2232 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2233 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2234 /* No MAC address configured, fill in net_dev->dev_addr
2237 eth_hw_addr_random(net_dev);
2238 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2240 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2243 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2247 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2248 * practical purposes, this will be our "permanent" mac address,
2249 * at least until the next reboot. This move will also permit
2250 * register_netdevice() to properly fill up net_dev->perm_addr.
2252 net_dev->addr_assign_type = NET_ADDR_PERM;
2254 /* NET_ADDR_PERM is default, all we have to do is
2255 * fill in the device addr.
2257 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2263 static int netdev_init(struct net_device *net_dev)
2265 struct device *dev = net_dev->dev.parent;
2266 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2267 u8 bcast_addr[ETH_ALEN];
2271 net_dev->netdev_ops = &dpaa2_eth_ops;
2273 err = set_mac_addr(priv);
2277 /* Explicitly add the broadcast address to the MAC filtering table */
2278 eth_broadcast_addr(bcast_addr);
2279 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2281 dev_err(dev, "dpni_add_mac_addr() failed\n");
2285 /* Set MTU limits */
2286 net_dev->min_mtu = 68;
2287 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2289 /* Set actual number of queues in the net device */
2290 num_queues = dpaa2_eth_queue_count(priv);
2291 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2293 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2296 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2298 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2302 /* Our .ndo_init will be called herein */
2303 err = register_netdev(net_dev);
2305 dev_err(dev, "register_netdev() failed\n");
2312 static int poll_link_state(void *arg)
2314 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2317 while (!kthread_should_stop()) {
2318 err = link_state_update(priv);
2322 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2328 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2331 struct device *dev = (struct device *)arg;
2332 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2333 struct net_device *net_dev = dev_get_drvdata(dev);
2336 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2337 DPNI_IRQ_INDEX, &status);
2338 if (unlikely(err)) {
2339 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2343 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
2344 link_state_update(netdev_priv(net_dev));
2349 static int setup_irqs(struct fsl_mc_device *ls_dev)
2352 struct fsl_mc_device_irq *irq;
2354 err = fsl_mc_allocate_irqs(ls_dev);
2356 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2360 irq = ls_dev->irqs[0];
2361 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2362 NULL, dpni_irq0_handler_thread,
2363 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2364 dev_name(&ls_dev->dev), &ls_dev->dev);
2366 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2370 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2371 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2373 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2377 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2380 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2387 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2389 fsl_mc_free_irqs(ls_dev);
2394 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2397 struct dpaa2_eth_channel *ch;
2399 for (i = 0; i < priv->num_channels; i++) {
2400 ch = priv->channel[i];
2401 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2402 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2407 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2410 struct dpaa2_eth_channel *ch;
2412 for (i = 0; i < priv->num_channels; i++) {
2413 ch = priv->channel[i];
2414 netif_napi_del(&ch->napi);
2418 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2421 struct net_device *net_dev = NULL;
2422 struct dpaa2_eth_priv *priv = NULL;
2425 dev = &dpni_dev->dev;
2428 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2430 dev_err(dev, "alloc_etherdev_mq() failed\n");
2434 SET_NETDEV_DEV(net_dev, dev);
2435 dev_set_drvdata(dev, net_dev);
2437 priv = netdev_priv(net_dev);
2438 priv->net_dev = net_dev;
2440 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2442 /* Obtain a MC portal */
2443 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2447 err = -EPROBE_DEFER;
2449 dev_err(dev, "MC portal allocation failed\n");
2450 goto err_portal_alloc;
2453 /* MC objects initialization and configuration */
2454 err = setup_dpni(dpni_dev);
2456 goto err_dpni_setup;
2458 err = setup_dpio(priv);
2460 goto err_dpio_setup;
2464 err = setup_dpbp(priv);
2466 goto err_dpbp_setup;
2468 err = bind_dpni(priv);
2472 /* Add a NAPI context for each channel */
2475 /* Percpu statistics */
2476 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2477 if (!priv->percpu_stats) {
2478 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2480 goto err_alloc_percpu_stats;
2482 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2483 if (!priv->percpu_extras) {
2484 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2486 goto err_alloc_percpu_extras;
2489 err = netdev_init(net_dev);
2491 goto err_netdev_init;
2493 /* Configure checksum offload based on current interface flags */
2494 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2498 err = set_tx_csum(priv, !!(net_dev->features &
2499 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2503 err = alloc_rings(priv);
2505 goto err_alloc_rings;
2507 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2509 err = setup_irqs(dpni_dev);
2511 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2512 priv->poll_thread = kthread_run(poll_link_state, priv,
2513 "%s_poll_link", net_dev->name);
2514 if (IS_ERR(priv->poll_thread)) {
2515 netdev_err(net_dev, "Error starting polling thread\n");
2516 goto err_poll_thread;
2518 priv->do_link_poll = true;
2521 dev_info(dev, "Probed interface %s\n", net_dev->name);
2528 unregister_netdev(net_dev);
2530 free_percpu(priv->percpu_extras);
2531 err_alloc_percpu_extras:
2532 free_percpu(priv->percpu_stats);
2533 err_alloc_percpu_stats:
2542 fsl_mc_portal_free(priv->mc_io);
2544 dev_set_drvdata(dev, NULL);
2545 free_netdev(net_dev);
2550 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2553 struct net_device *net_dev;
2554 struct dpaa2_eth_priv *priv;
2557 net_dev = dev_get_drvdata(dev);
2558 priv = netdev_priv(net_dev);
2560 unregister_netdev(net_dev);
2562 if (priv->do_link_poll)
2563 kthread_stop(priv->poll_thread);
2565 fsl_mc_free_irqs(ls_dev);
2568 free_percpu(priv->percpu_stats);
2569 free_percpu(priv->percpu_extras);
2576 fsl_mc_portal_free(priv->mc_io);
2578 dev_set_drvdata(dev, NULL);
2579 free_netdev(net_dev);
2581 dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2586 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2588 .vendor = FSL_MC_VENDOR_FREESCALE,
2593 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2595 static struct fsl_mc_driver dpaa2_eth_driver = {
2597 .name = KBUILD_MODNAME,
2598 .owner = THIS_MODULE,
2600 .probe = dpaa2_eth_probe,
2601 .remove = dpaa2_eth_remove,
2602 .match_id_table = dpaa2_eth_match_id_table
2605 module_fsl_mc_driver(dpaa2_eth_driver);