1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
39 #include <linux/bpf_trace.h>
42 #include "mvpp2_prs.h"
43 #include "mvpp2_cls.h"
45 enum mvpp2_bm_pool_log_num {
55 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57 /* The prototype is added here to be used in start_dev when using ACPI. This
58 * will be removed once phylink is used for all modes (dt+ACPI).
60 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
61 const struct phylink_link_state *state);
62 static void mvpp2_mac_link_up(struct phylink_config *config,
63 struct phy_device *phy,
64 unsigned int mode, phy_interface_t interface,
65 int speed, int duplex,
66 bool tx_pause, bool rx_pause);
69 #define MVPP2_QDIST_SINGLE_MODE 0
70 #define MVPP2_QDIST_MULTI_MODE 1
72 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
74 module_param(queue_mode, int, 0444);
75 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
77 /* Utility/helper methods */
79 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
81 writel(data, priv->swth_base[0] + offset);
84 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
86 return readl(priv->swth_base[0] + offset);
89 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
91 return readl_relaxed(priv->swth_base[0] + offset);
94 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
96 return cpu % priv->nthreads;
99 static struct page_pool *
100 mvpp2_create_page_pool(struct device *dev, int num, int len,
101 enum dma_data_direction dma_dir)
103 struct page_pool_params pp_params = {
104 /* internal DMA mapping in page_pool */
105 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
110 .offset = MVPP2_SKB_HEADROOM,
114 return page_pool_create(&pp_params);
117 /* These accessors should be used to access:
119 * - per-thread registers, where each thread has its own copy of the
122 * MVPP2_BM_VIRT_ALLOC_REG
123 * MVPP2_BM_ADDR_HIGH_ALLOC
124 * MVPP22_BM_ADDR_HIGH_RLS_REG
125 * MVPP2_BM_VIRT_RLS_REG
126 * MVPP2_ISR_RX_TX_CAUSE_REG
127 * MVPP2_ISR_RX_TX_MASK_REG
129 * MVPP2_AGGR_TXQ_UPDATE_REG
130 * MVPP2_TXQ_RSVD_REQ_REG
131 * MVPP2_TXQ_RSVD_RSLT_REG
135 * - global registers that must be accessed through a specific thread
136 * window, because they are related to an access to a per-thread
139 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
140 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
141 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
142 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
143 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
144 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
145 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
146 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
147 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
148 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
149 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
150 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
151 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
153 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
154 u32 offset, u32 data)
156 writel(data, priv->swth_base[thread] + offset);
159 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
162 return readl(priv->swth_base[thread] + offset);
165 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
166 u32 offset, u32 data)
168 writel_relaxed(data, priv->swth_base[thread] + offset);
171 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
174 return readl_relaxed(priv->swth_base[thread] + offset);
177 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
178 struct mvpp2_tx_desc *tx_desc)
180 if (port->priv->hw_version == MVPP21)
181 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
183 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
187 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
188 struct mvpp2_tx_desc *tx_desc,
191 dma_addr_t addr, offset;
193 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
194 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
196 if (port->priv->hw_version == MVPP21) {
197 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
198 tx_desc->pp21.packet_offset = offset;
200 __le64 val = cpu_to_le64(addr);
202 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
203 tx_desc->pp22.buf_dma_addr_ptp |= val;
204 tx_desc->pp22.packet_offset = offset;
208 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
209 struct mvpp2_tx_desc *tx_desc)
211 if (port->priv->hw_version == MVPP21)
212 return le16_to_cpu(tx_desc->pp21.data_size);
214 return le16_to_cpu(tx_desc->pp22.data_size);
217 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
218 struct mvpp2_tx_desc *tx_desc,
221 if (port->priv->hw_version == MVPP21)
222 tx_desc->pp21.data_size = cpu_to_le16(size);
224 tx_desc->pp22.data_size = cpu_to_le16(size);
227 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
228 struct mvpp2_tx_desc *tx_desc,
231 if (port->priv->hw_version == MVPP21)
232 tx_desc->pp21.phys_txq = txq;
234 tx_desc->pp22.phys_txq = txq;
237 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
238 struct mvpp2_tx_desc *tx_desc,
239 unsigned int command)
241 if (port->priv->hw_version == MVPP21)
242 tx_desc->pp21.command = cpu_to_le32(command);
244 tx_desc->pp22.command = cpu_to_le32(command);
247 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
248 struct mvpp2_tx_desc *tx_desc)
250 if (port->priv->hw_version == MVPP21)
251 return tx_desc->pp21.packet_offset;
253 return tx_desc->pp22.packet_offset;
256 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
257 struct mvpp2_rx_desc *rx_desc)
259 if (port->priv->hw_version == MVPP21)
260 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
262 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
266 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
267 struct mvpp2_rx_desc *rx_desc)
269 if (port->priv->hw_version == MVPP21)
270 return le32_to_cpu(rx_desc->pp21.buf_cookie);
272 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
276 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
277 struct mvpp2_rx_desc *rx_desc)
279 if (port->priv->hw_version == MVPP21)
280 return le16_to_cpu(rx_desc->pp21.data_size);
282 return le16_to_cpu(rx_desc->pp22.data_size);
285 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
286 struct mvpp2_rx_desc *rx_desc)
288 if (port->priv->hw_version == MVPP21)
289 return le32_to_cpu(rx_desc->pp21.status);
291 return le32_to_cpu(rx_desc->pp22.status);
294 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
296 txq_pcpu->txq_get_index++;
297 if (txq_pcpu->txq_get_index == txq_pcpu->size)
298 txq_pcpu->txq_get_index = 0;
301 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
302 struct mvpp2_txq_pcpu *txq_pcpu,
304 struct mvpp2_tx_desc *tx_desc,
305 enum mvpp2_tx_buf_type buf_type)
307 struct mvpp2_txq_pcpu_buf *tx_buf =
308 txq_pcpu->buffs + txq_pcpu->txq_put_index;
309 tx_buf->type = buf_type;
310 if (buf_type == MVPP2_TYPE_SKB)
314 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
315 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
316 mvpp2_txdesc_offset_get(port, tx_desc);
317 txq_pcpu->txq_put_index++;
318 if (txq_pcpu->txq_put_index == txq_pcpu->size)
319 txq_pcpu->txq_put_index = 0;
322 /* Get number of maximum RXQ */
323 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
327 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
330 /* According to the PPv2.2 datasheet and our experiments on
331 * PPv2.1, RX queues have an allocation granularity of 4 (when
332 * more than a single one on PPv2.2).
333 * Round up to nearest multiple of 4.
335 nrxqs = (num_possible_cpus() + 3) & ~0x3;
336 if (nrxqs > MVPP2_PORT_MAX_RXQ)
337 nrxqs = MVPP2_PORT_MAX_RXQ;
342 /* Get number of physical egress port */
343 static inline int mvpp2_egress_port(struct mvpp2_port *port)
345 return MVPP2_MAX_TCONT + port->id;
348 /* Get number of physical TXQ */
349 static inline int mvpp2_txq_phys(int port, int txq)
351 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
354 /* Returns a struct page if page_pool is set, otherwise a buffer */
355 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
356 struct page_pool *page_pool)
359 return page_pool_dev_alloc_pages(page_pool);
361 if (likely(pool->frag_size <= PAGE_SIZE))
362 return netdev_alloc_frag(pool->frag_size);
364 return kmalloc(pool->frag_size, GFP_ATOMIC);
367 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
368 struct page_pool *page_pool, void *data)
371 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
372 else if (likely(pool->frag_size <= PAGE_SIZE))
378 /* Buffer Manager configuration routines */
381 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
382 struct mvpp2_bm_pool *bm_pool, int size)
386 /* Number of buffer pointers must be a multiple of 16, as per
387 * hardware constraints
389 if (!IS_ALIGNED(size, 16))
392 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
393 * bytes per buffer pointer
395 if (priv->hw_version == MVPP21)
396 bm_pool->size_bytes = 2 * sizeof(u32) * size;
398 bm_pool->size_bytes = 2 * sizeof(u64) * size;
400 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
403 if (!bm_pool->virt_addr)
406 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
407 MVPP2_BM_POOL_PTR_ALIGN)) {
408 dma_free_coherent(dev, bm_pool->size_bytes,
409 bm_pool->virt_addr, bm_pool->dma_addr);
410 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
411 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
415 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
416 lower_32_bits(bm_pool->dma_addr));
417 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
419 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
420 val |= MVPP2_BM_START_MASK;
421 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
423 bm_pool->size = size;
424 bm_pool->pkt_size = 0;
425 bm_pool->buf_num = 0;
430 /* Set pool buffer size */
431 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
432 struct mvpp2_bm_pool *bm_pool,
437 bm_pool->buf_size = buf_size;
439 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
440 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
443 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
444 struct mvpp2_bm_pool *bm_pool,
445 dma_addr_t *dma_addr,
446 phys_addr_t *phys_addr)
448 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
450 *dma_addr = mvpp2_thread_read(priv, thread,
451 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
452 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
454 if (priv->hw_version == MVPP22) {
456 u32 dma_addr_highbits, phys_addr_highbits;
458 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
459 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
460 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
461 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
463 if (sizeof(dma_addr_t) == 8)
464 *dma_addr |= (u64)dma_addr_highbits << 32;
466 if (sizeof(phys_addr_t) == 8)
467 *phys_addr |= (u64)phys_addr_highbits << 32;
473 /* Free all buffers from the pool */
474 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
475 struct mvpp2_bm_pool *bm_pool, int buf_num)
477 struct page_pool *pp = NULL;
480 if (buf_num > bm_pool->buf_num) {
481 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
482 bm_pool->id, buf_num);
483 buf_num = bm_pool->buf_num;
486 if (priv->percpu_pools)
487 pp = priv->page_pool[bm_pool->id];
489 for (i = 0; i < buf_num; i++) {
490 dma_addr_t buf_dma_addr;
491 phys_addr_t buf_phys_addr;
494 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
495 &buf_dma_addr, &buf_phys_addr);
498 dma_unmap_single(dev, buf_dma_addr,
499 bm_pool->buf_size, DMA_FROM_DEVICE);
501 data = (void *)phys_to_virt(buf_phys_addr);
505 mvpp2_frag_free(bm_pool, pp, data);
508 /* Update BM driver with number of buffers removed from pool */
509 bm_pool->buf_num -= i;
512 /* Check number of buffers in BM pool */
513 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
517 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
518 MVPP22_BM_POOL_PTRS_NUM_MASK;
519 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
520 MVPP2_BM_BPPI_PTR_NUM_MASK;
522 /* HW has one buffer ready which is not reflected in the counters */
530 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
531 struct mvpp2_bm_pool *bm_pool)
536 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
537 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
539 /* Check buffer counters after free */
540 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
542 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
543 bm_pool->id, bm_pool->buf_num);
547 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
548 val |= MVPP2_BM_STOP_MASK;
549 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
551 if (priv->percpu_pools) {
552 page_pool_destroy(priv->page_pool[bm_pool->id]);
553 priv->page_pool[bm_pool->id] = NULL;
556 dma_free_coherent(dev, bm_pool->size_bytes,
562 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
564 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
565 struct mvpp2_bm_pool *bm_pool;
567 if (priv->percpu_pools)
568 poolnum = mvpp2_get_nrxqs(priv) * 2;
570 /* Create all pools with maximum size */
571 size = MVPP2_BM_POOL_SIZE_MAX;
572 for (i = 0; i < poolnum; i++) {
573 bm_pool = &priv->bm_pools[i];
575 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
577 goto err_unroll_pools;
578 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
583 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
584 for (i = i - 1; i >= 0; i--)
585 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
589 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
591 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
592 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
593 struct mvpp2_port *port;
595 if (priv->percpu_pools) {
596 for (i = 0; i < priv->port_count; i++) {
597 port = priv->port_list[i];
598 if (port->xdp_prog) {
599 dma_dir = DMA_BIDIRECTIONAL;
604 poolnum = mvpp2_get_nrxqs(priv) * 2;
605 for (i = 0; i < poolnum; i++) {
606 /* the pool in use */
607 int pn = i / (poolnum / 2);
610 mvpp2_create_page_pool(dev,
611 mvpp2_pools[pn].buf_num,
612 mvpp2_pools[pn].pkt_size,
614 if (IS_ERR(priv->page_pool[i])) {
617 for (j = 0; j < i; j++) {
618 page_pool_destroy(priv->page_pool[j]);
619 priv->page_pool[j] = NULL;
621 return PTR_ERR(priv->page_pool[i]);
626 dev_info(dev, "using %d %s buffers\n", poolnum,
627 priv->percpu_pools ? "per-cpu" : "shared");
629 for (i = 0; i < poolnum; i++) {
630 /* Mask BM all interrupts */
631 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
632 /* Clear BM cause register */
633 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
636 /* Allocate and initialize BM pools */
637 priv->bm_pools = devm_kcalloc(dev, poolnum,
638 sizeof(*priv->bm_pools), GFP_KERNEL);
642 err = mvpp2_bm_pools_init(dev, priv);
648 static void mvpp2_setup_bm_pool(void)
651 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
652 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
655 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
656 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
659 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
660 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
663 /* Attach long pool to rxq */
664 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
665 int lrxq, int long_pool)
670 /* Get queue physical ID */
671 prxq = port->rxqs[lrxq]->id;
673 if (port->priv->hw_version == MVPP21)
674 mask = MVPP21_RXQ_POOL_LONG_MASK;
676 mask = MVPP22_RXQ_POOL_LONG_MASK;
678 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
680 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
681 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
684 /* Attach short pool to rxq */
685 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
686 int lrxq, int short_pool)
691 /* Get queue physical ID */
692 prxq = port->rxqs[lrxq]->id;
694 if (port->priv->hw_version == MVPP21)
695 mask = MVPP21_RXQ_POOL_SHORT_MASK;
697 mask = MVPP22_RXQ_POOL_SHORT_MASK;
699 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
701 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
702 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
705 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
706 struct mvpp2_bm_pool *bm_pool,
707 struct page_pool *page_pool,
708 dma_addr_t *buf_dma_addr,
709 phys_addr_t *buf_phys_addr,
716 data = mvpp2_frag_alloc(bm_pool, page_pool);
721 page = (struct page *)data;
722 dma_addr = page_pool_get_dma_addr(page);
723 data = page_to_virt(page);
725 dma_addr = dma_map_single(port->dev->dev.parent, data,
726 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
728 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
729 mvpp2_frag_free(bm_pool, NULL, data);
733 *buf_dma_addr = dma_addr;
734 *buf_phys_addr = virt_to_phys(data);
739 /* Release buffer to BM */
740 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
741 dma_addr_t buf_dma_addr,
742 phys_addr_t buf_phys_addr)
744 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
745 unsigned long flags = 0;
747 if (test_bit(thread, &port->priv->lock_map))
748 spin_lock_irqsave(&port->bm_lock[thread], flags);
750 if (port->priv->hw_version == MVPP22) {
753 if (sizeof(dma_addr_t) == 8)
754 val |= upper_32_bits(buf_dma_addr) &
755 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
757 if (sizeof(phys_addr_t) == 8)
758 val |= (upper_32_bits(buf_phys_addr)
759 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
760 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
762 mvpp2_thread_write_relaxed(port->priv, thread,
763 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
766 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
767 * returned in the "cookie" field of the RX
768 * descriptor. Instead of storing the virtual address, we
769 * store the physical address
771 mvpp2_thread_write_relaxed(port->priv, thread,
772 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
773 mvpp2_thread_write_relaxed(port->priv, thread,
774 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
776 if (test_bit(thread, &port->priv->lock_map))
777 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
782 /* Allocate buffers for the pool */
783 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
784 struct mvpp2_bm_pool *bm_pool, int buf_num)
786 int i, buf_size, total_size;
788 phys_addr_t phys_addr;
789 struct page_pool *pp = NULL;
792 if (port->priv->percpu_pools &&
793 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
794 netdev_err(port->dev,
795 "attempted to use jumbo frames with per-cpu pools");
799 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
800 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
803 (buf_num + bm_pool->buf_num > bm_pool->size)) {
804 netdev_err(port->dev,
805 "cannot allocate %d buffers for pool %d\n",
806 buf_num, bm_pool->id);
810 if (port->priv->percpu_pools)
811 pp = port->priv->page_pool[bm_pool->id];
812 for (i = 0; i < buf_num; i++) {
813 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
814 &phys_addr, GFP_KERNEL);
818 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
822 /* Update BM driver with number of buffers added to pool */
823 bm_pool->buf_num += i;
825 netdev_dbg(port->dev,
826 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
827 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
829 netdev_dbg(port->dev,
830 "pool %d: %d of %d buffers added\n",
831 bm_pool->id, i, buf_num);
835 /* Notify the driver that BM pool is being used as specific type and return the
836 * pool pointer on success
838 static struct mvpp2_bm_pool *
839 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
841 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
844 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
845 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
846 netdev_err(port->dev, "Invalid pool %d\n", pool);
850 /* Allocate buffers in case BM pool is used as long pool, but packet
851 * size doesn't match MTU or BM pool hasn't being used yet
853 if (new_pool->pkt_size == 0) {
856 /* Set default buffer number or free all the buffers in case
857 * the pool is not empty
859 pkts_num = new_pool->buf_num;
861 if (port->priv->percpu_pools) {
862 if (pool < port->nrxqs)
863 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
865 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
867 pkts_num = mvpp2_pools[pool].buf_num;
870 mvpp2_bm_bufs_free(port->dev->dev.parent,
871 port->priv, new_pool, pkts_num);
874 new_pool->pkt_size = pkt_size;
875 new_pool->frag_size =
876 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
877 MVPP2_SKB_SHINFO_SIZE;
879 /* Allocate buffers for this pool */
880 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
881 if (num != pkts_num) {
882 WARN(1, "pool %d: %d of %d allocated\n",
883 new_pool->id, num, pkts_num);
888 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
889 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
894 static struct mvpp2_bm_pool *
895 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
896 unsigned int pool, int pkt_size)
898 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
901 if (pool > port->nrxqs * 2) {
902 netdev_err(port->dev, "Invalid pool %d\n", pool);
906 /* Allocate buffers in case BM pool is used as long pool, but packet
907 * size doesn't match MTU or BM pool hasn't being used yet
909 if (new_pool->pkt_size == 0) {
912 /* Set default buffer number or free all the buffers in case
913 * the pool is not empty
915 pkts_num = new_pool->buf_num;
917 pkts_num = mvpp2_pools[type].buf_num;
919 mvpp2_bm_bufs_free(port->dev->dev.parent,
920 port->priv, new_pool, pkts_num);
922 new_pool->pkt_size = pkt_size;
923 new_pool->frag_size =
924 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
925 MVPP2_SKB_SHINFO_SIZE;
927 /* Allocate buffers for this pool */
928 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
929 if (num != pkts_num) {
930 WARN(1, "pool %d: %d of %d allocated\n",
931 new_pool->id, num, pkts_num);
936 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
937 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
942 /* Initialize pools for swf, shared buffers variant */
943 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
945 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
948 /* If port pkt_size is higher than 1518B:
949 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
950 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
952 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
953 long_log_pool = MVPP2_BM_JUMBO;
954 short_log_pool = MVPP2_BM_LONG;
956 long_log_pool = MVPP2_BM_LONG;
957 short_log_pool = MVPP2_BM_SHORT;
960 if (!port->pool_long) {
962 mvpp2_bm_pool_use(port, long_log_pool,
963 mvpp2_pools[long_log_pool].pkt_size);
964 if (!port->pool_long)
967 port->pool_long->port_map |= BIT(port->id);
969 for (rxq = 0; rxq < port->nrxqs; rxq++)
970 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
973 if (!port->pool_short) {
975 mvpp2_bm_pool_use(port, short_log_pool,
976 mvpp2_pools[short_log_pool].pkt_size);
977 if (!port->pool_short)
980 port->pool_short->port_map |= BIT(port->id);
982 for (rxq = 0; rxq < port->nrxqs; rxq++)
983 mvpp2_rxq_short_pool_set(port, rxq,
984 port->pool_short->id);
990 /* Initialize pools for swf, percpu buffers variant */
991 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
993 struct mvpp2_bm_pool *bm_pool;
996 for (i = 0; i < port->nrxqs; i++) {
997 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
998 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1002 bm_pool->port_map |= BIT(port->id);
1003 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1006 for (i = 0; i < port->nrxqs; i++) {
1007 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1008 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1012 bm_pool->port_map |= BIT(port->id);
1013 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1016 port->pool_long = NULL;
1017 port->pool_short = NULL;
1022 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1024 if (port->priv->percpu_pools)
1025 return mvpp2_swf_bm_pool_init_percpu(port);
1027 return mvpp2_swf_bm_pool_init_shared(port);
1030 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1031 enum mvpp2_bm_pool_log_num new_long_pool)
1033 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1035 /* Update L4 checksum when jumbo enable/disable on port.
1036 * Only port 0 supports hardware checksum offload due to
1037 * the Tx FIFO size limitation.
1038 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1039 * has 7 bits, so the maximum L3 offset is 128.
1041 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1042 port->dev->features &= ~csums;
1043 port->dev->hw_features &= ~csums;
1045 port->dev->features |= csums;
1046 port->dev->hw_features |= csums;
1050 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1052 struct mvpp2_port *port = netdev_priv(dev);
1053 enum mvpp2_bm_pool_log_num new_long_pool;
1054 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1056 if (port->priv->percpu_pools)
1059 /* If port MTU is higher than 1518B:
1060 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1061 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1063 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1064 new_long_pool = MVPP2_BM_JUMBO;
1066 new_long_pool = MVPP2_BM_LONG;
1068 if (new_long_pool != port->pool_long->id) {
1069 /* Remove port from old short & long pool */
1070 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1071 port->pool_long->pkt_size);
1072 port->pool_long->port_map &= ~BIT(port->id);
1073 port->pool_long = NULL;
1075 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1076 port->pool_short->pkt_size);
1077 port->pool_short->port_map &= ~BIT(port->id);
1078 port->pool_short = NULL;
1080 port->pkt_size = pkt_size;
1082 /* Add port to new short & long pool */
1083 mvpp2_swf_bm_pool_init(port);
1085 mvpp2_set_hw_csum(port, new_long_pool);
1090 dev->wanted_features = dev->features;
1092 netdev_update_features(dev);
1096 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1098 int i, sw_thread_mask = 0;
1100 for (i = 0; i < port->nqvecs; i++)
1101 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1103 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1104 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1107 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1109 int i, sw_thread_mask = 0;
1111 for (i = 0; i < port->nqvecs; i++)
1112 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1114 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1115 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1118 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1120 struct mvpp2_port *port = qvec->port;
1122 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1123 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1126 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1128 struct mvpp2_port *port = qvec->port;
1130 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1131 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1134 /* Mask the current thread's Rx/Tx interrupts
1135 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1136 * using smp_processor_id() is OK.
1138 static void mvpp2_interrupts_mask(void *arg)
1140 struct mvpp2_port *port = arg;
1142 /* If the thread isn't used, don't do anything */
1143 if (smp_processor_id() > port->priv->nthreads)
1146 mvpp2_thread_write(port->priv,
1147 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1148 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1151 /* Unmask the current thread's Rx/Tx interrupts.
1152 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1153 * using smp_processor_id() is OK.
1155 static void mvpp2_interrupts_unmask(void *arg)
1157 struct mvpp2_port *port = arg;
1160 /* If the thread isn't used, don't do anything */
1161 if (smp_processor_id() > port->priv->nthreads)
1164 val = MVPP2_CAUSE_MISC_SUM_MASK |
1165 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1166 if (port->has_tx_irqs)
1167 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1169 mvpp2_thread_write(port->priv,
1170 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1171 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1175 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1180 if (port->priv->hw_version != MVPP22)
1186 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1188 for (i = 0; i < port->nqvecs; i++) {
1189 struct mvpp2_queue_vector *v = port->qvecs + i;
1191 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1194 mvpp2_thread_write(port->priv, v->sw_thread_id,
1195 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1199 /* Only GOP port 0 has an XLG MAC */
1200 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1202 return port->gop_id == 0;
1205 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1207 return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
1210 /* Port configuration routines */
1211 static bool mvpp2_is_xlg(phy_interface_t interface)
1213 return interface == PHY_INTERFACE_MODE_10GBASER ||
1214 interface == PHY_INTERFACE_MODE_XAUI;
1217 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1221 old = val = readl(ptr);
1228 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1230 struct mvpp2 *priv = port->priv;
1233 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1234 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1235 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1237 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1238 if (port->gop_id == 2)
1239 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1240 else if (port->gop_id == 3)
1241 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1242 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1245 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1247 struct mvpp2 *priv = port->priv;
1250 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1251 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1252 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1253 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1255 if (port->gop_id > 1) {
1256 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1257 if (port->gop_id == 2)
1258 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1259 else if (port->gop_id == 3)
1260 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1261 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1265 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1267 struct mvpp2 *priv = port->priv;
1268 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1269 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1272 val = readl(xpcs + MVPP22_XPCS_CFG0);
1273 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1274 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1275 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1276 writel(val, xpcs + MVPP22_XPCS_CFG0);
1278 val = readl(mpcs + MVPP22_MPCS_CTRL);
1279 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1280 writel(val, mpcs + MVPP22_MPCS_CTRL);
1282 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1283 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1284 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1285 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1288 static int mvpp22_gop_init(struct mvpp2_port *port)
1290 struct mvpp2 *priv = port->priv;
1293 if (!priv->sysctrl_base)
1296 switch (port->phy_interface) {
1297 case PHY_INTERFACE_MODE_RGMII:
1298 case PHY_INTERFACE_MODE_RGMII_ID:
1299 case PHY_INTERFACE_MODE_RGMII_RXID:
1300 case PHY_INTERFACE_MODE_RGMII_TXID:
1301 if (!mvpp2_port_supports_rgmii(port))
1303 mvpp22_gop_init_rgmii(port);
1305 case PHY_INTERFACE_MODE_SGMII:
1306 case PHY_INTERFACE_MODE_1000BASEX:
1307 case PHY_INTERFACE_MODE_2500BASEX:
1308 mvpp22_gop_init_sgmii(port);
1310 case PHY_INTERFACE_MODE_10GBASER:
1311 if (!mvpp2_port_supports_xlg(port))
1313 mvpp22_gop_init_10gkr(port);
1316 goto unsupported_conf;
1319 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1320 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1321 GENCONF_PORT_CTRL1_EN(port->gop_id);
1322 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1324 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1325 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1326 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1328 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1329 val |= GENCONF_SOFT_RESET1_GOP;
1330 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1336 netdev_err(port->dev, "Invalid port configuration\n");
1340 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1344 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1345 phy_interface_mode_is_8023z(port->phy_interface) ||
1346 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1347 /* Enable the GMAC link status irq for this port */
1348 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1349 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1350 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1353 if (mvpp2_port_supports_xlg(port)) {
1354 /* Enable the XLG/GIG irqs for this port */
1355 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1356 if (mvpp2_is_xlg(port->phy_interface))
1357 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1359 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1360 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1364 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1368 if (mvpp2_port_supports_xlg(port)) {
1369 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1370 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1371 MVPP22_XLG_EXT_INT_MASK_GIG);
1372 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1375 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1376 phy_interface_mode_is_8023z(port->phy_interface) ||
1377 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1378 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1379 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1380 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1384 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1388 if (port->phylink ||
1389 phy_interface_mode_is_rgmii(port->phy_interface) ||
1390 phy_interface_mode_is_8023z(port->phy_interface) ||
1391 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1392 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1393 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1394 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1397 if (mvpp2_port_supports_xlg(port)) {
1398 val = readl(port->base + MVPP22_XLG_INT_MASK);
1399 val |= MVPP22_XLG_INT_MASK_LINK;
1400 writel(val, port->base + MVPP22_XLG_INT_MASK);
1403 mvpp22_gop_unmask_irq(port);
1406 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1408 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1409 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1412 * The COMPHY configures the serdes lanes regardless of the actual use of the
1413 * lanes by the physical layer. This is why configurations like
1414 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1416 static int mvpp22_comphy_init(struct mvpp2_port *port)
1423 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1424 port->phy_interface);
1428 return phy_power_on(port->comphy);
1431 static void mvpp2_port_enable(struct mvpp2_port *port)
1435 if (mvpp2_port_supports_xlg(port) &&
1436 mvpp2_is_xlg(port->phy_interface)) {
1437 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1438 val |= MVPP22_XLG_CTRL0_PORT_EN;
1439 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1440 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1442 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1443 val |= MVPP2_GMAC_PORT_EN_MASK;
1444 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1445 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1449 static void mvpp2_port_disable(struct mvpp2_port *port)
1453 if (mvpp2_port_supports_xlg(port) &&
1454 mvpp2_is_xlg(port->phy_interface)) {
1455 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1456 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1457 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1460 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1461 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1462 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1465 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1466 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1470 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1471 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1472 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1475 /* Configure loopback port */
1476 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1477 const struct phylink_link_state *state)
1481 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1483 if (state->speed == 1000)
1484 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1486 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1488 if (phy_interface_mode_is_8023z(port->phy_interface) ||
1489 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
1490 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1492 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1494 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1498 ETHTOOL_XDP_REDIRECT,
1504 ETHTOOL_XDP_XMIT_ERR,
1507 struct mvpp2_ethtool_counter {
1508 unsigned int offset;
1509 const char string[ETH_GSTRING_LEN];
1513 static u64 mvpp2_read_count(struct mvpp2_port *port,
1514 const struct mvpp2_ethtool_counter *counter)
1518 val = readl(port->stats_base + counter->offset);
1519 if (counter->reg_is_64b)
1520 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1525 /* Some counters are accessed indirectly by first writing an index to
1526 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1527 * register we access, it can be a hit counter for some classification tables,
1528 * a counter specific to a rxq, a txq or a buffer pool.
1530 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1532 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1533 return mvpp2_read(priv, reg);
1536 /* Due to the fact that software statistics and hardware statistics are, by
1537 * design, incremented at different moments in the chain of packet processing,
1538 * it is very likely that incoming packets could have been dropped after being
1539 * counted by hardware but before reaching software statistics (most probably
1540 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1541 * are added in between as well as TSO skb will be split and header bytes added.
1542 * Hence, statistics gathered from userspace with ifconfig (software) and
1543 * ethtool (hardware) cannot be compared.
1545 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1546 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1547 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1548 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1549 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1550 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1551 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1552 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1553 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1554 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1555 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1556 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1557 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1558 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1559 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1560 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1561 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1562 { MVPP2_MIB_FC_SENT, "fc_sent" },
1563 { MVPP2_MIB_FC_RCVD, "fc_received" },
1564 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1565 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1566 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1567 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1568 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1569 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1570 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1571 { MVPP2_MIB_COLLISION, "collision" },
1572 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1575 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1576 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1577 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1580 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1581 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1582 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1583 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1584 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1585 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1586 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1587 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1588 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1589 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1592 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1593 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1594 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1595 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1596 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1599 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1600 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1601 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1602 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1603 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1604 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1605 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1606 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1609 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1610 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1611 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1612 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1613 ARRAY_SIZE(mvpp2_ethtool_xdp))
1615 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1618 struct mvpp2_port *port = netdev_priv(netdev);
1621 if (sset != ETH_SS_STATS)
1624 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1625 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1627 data += ETH_GSTRING_LEN;
1630 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1631 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1633 data += ETH_GSTRING_LEN;
1636 for (q = 0; q < port->ntxqs; q++) {
1637 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1638 snprintf(data, ETH_GSTRING_LEN,
1639 mvpp2_ethtool_txq_regs[i].string, q);
1640 data += ETH_GSTRING_LEN;
1644 for (q = 0; q < port->nrxqs; q++) {
1645 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1646 snprintf(data, ETH_GSTRING_LEN,
1647 mvpp2_ethtool_rxq_regs[i].string,
1649 data += ETH_GSTRING_LEN;
1653 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1654 strscpy(data, mvpp2_ethtool_xdp[i].string,
1656 data += ETH_GSTRING_LEN;
1661 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1666 /* Gather XDP Statistics */
1667 for_each_possible_cpu(cpu) {
1668 struct mvpp2_pcpu_stats *cpu_stats;
1677 cpu_stats = per_cpu_ptr(port->stats, cpu);
1679 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1680 xdp_redirect = cpu_stats->xdp_redirect;
1681 xdp_pass = cpu_stats->xdp_pass;
1682 xdp_drop = cpu_stats->xdp_drop;
1683 xdp_xmit = cpu_stats->xdp_xmit;
1684 xdp_xmit_err = cpu_stats->xdp_xmit_err;
1685 xdp_tx = cpu_stats->xdp_tx;
1686 xdp_tx_err = cpu_stats->xdp_tx_err;
1687 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1689 xdp_stats->xdp_redirect += xdp_redirect;
1690 xdp_stats->xdp_pass += xdp_pass;
1691 xdp_stats->xdp_drop += xdp_drop;
1692 xdp_stats->xdp_xmit += xdp_xmit;
1693 xdp_stats->xdp_xmit_err += xdp_xmit_err;
1694 xdp_stats->xdp_tx += xdp_tx;
1695 xdp_stats->xdp_tx_err += xdp_tx_err;
1699 static void mvpp2_read_stats(struct mvpp2_port *port)
1701 struct mvpp2_pcpu_stats xdp_stats = {};
1702 const struct mvpp2_ethtool_counter *s;
1706 pstats = port->ethtool_stats;
1708 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1709 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1711 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1712 *pstats++ += mvpp2_read(port->priv,
1713 mvpp2_ethtool_port_regs[i].offset +
1716 for (q = 0; q < port->ntxqs; q++)
1717 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1718 *pstats++ += mvpp2_read_index(port->priv,
1719 MVPP22_CTRS_TX_CTR(port->id, q),
1720 mvpp2_ethtool_txq_regs[i].offset);
1722 /* Rxqs are numbered from 0 from the user standpoint, but not from the
1723 * driver's. We need to add the port->first_rxq offset.
1725 for (q = 0; q < port->nrxqs; q++)
1726 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1727 *pstats++ += mvpp2_read_index(port->priv,
1728 port->first_rxq + q,
1729 mvpp2_ethtool_rxq_regs[i].offset);
1731 /* Gather XDP Statistics */
1732 mvpp2_get_xdp_stats(port, &xdp_stats);
1734 for (i = 0, s = mvpp2_ethtool_xdp;
1735 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
1737 switch (s->offset) {
1738 case ETHTOOL_XDP_REDIRECT:
1739 *pstats++ = xdp_stats.xdp_redirect;
1741 case ETHTOOL_XDP_PASS:
1742 *pstats++ = xdp_stats.xdp_pass;
1744 case ETHTOOL_XDP_DROP:
1745 *pstats++ = xdp_stats.xdp_drop;
1747 case ETHTOOL_XDP_TX:
1748 *pstats++ = xdp_stats.xdp_tx;
1750 case ETHTOOL_XDP_TX_ERR:
1751 *pstats++ = xdp_stats.xdp_tx_err;
1753 case ETHTOOL_XDP_XMIT:
1754 *pstats++ = xdp_stats.xdp_xmit;
1756 case ETHTOOL_XDP_XMIT_ERR:
1757 *pstats++ = xdp_stats.xdp_xmit_err;
1763 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1765 struct delayed_work *del_work = to_delayed_work(work);
1766 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1769 mutex_lock(&port->gather_stats_lock);
1771 mvpp2_read_stats(port);
1773 /* No need to read again the counters right after this function if it
1774 * was called asynchronously by the user (ie. use of ethtool).
1776 cancel_delayed_work(&port->stats_work);
1777 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1778 MVPP2_MIB_COUNTERS_STATS_DELAY);
1780 mutex_unlock(&port->gather_stats_lock);
1783 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1784 struct ethtool_stats *stats, u64 *data)
1786 struct mvpp2_port *port = netdev_priv(dev);
1788 /* Update statistics for the given port, then take the lock to avoid
1789 * concurrent accesses on the ethtool_stats structure during its copy.
1791 mvpp2_gather_hw_statistics(&port->stats_work.work);
1793 mutex_lock(&port->gather_stats_lock);
1794 memcpy(data, port->ethtool_stats,
1795 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1796 mutex_unlock(&port->gather_stats_lock);
1799 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1801 struct mvpp2_port *port = netdev_priv(dev);
1803 if (sset == ETH_SS_STATS)
1804 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1809 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1813 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1814 MVPP2_GMAC_PORT_RESET_MASK;
1815 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1817 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1818 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1819 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1820 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1824 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1826 struct mvpp2 *priv = port->priv;
1827 void __iomem *mpcs, *xpcs;
1830 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1833 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1834 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1836 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1837 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1838 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1839 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1841 val = readl(xpcs + MVPP22_XPCS_CFG0);
1842 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1845 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1847 struct mvpp2 *priv = port->priv;
1848 void __iomem *mpcs, *xpcs;
1851 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1854 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1855 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1857 switch (port->phy_interface) {
1858 case PHY_INTERFACE_MODE_10GBASER:
1859 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1860 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1861 MAC_CLK_RESET_SD_TX;
1862 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1863 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1865 case PHY_INTERFACE_MODE_XAUI:
1866 case PHY_INTERFACE_MODE_RXAUI:
1867 val = readl(xpcs + MVPP22_XPCS_CFG0);
1868 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1875 /* Change maximum receive size of the port */
1876 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1880 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1881 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1882 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1883 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1884 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1887 /* Change maximum receive size of the port */
1888 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1892 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1893 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1894 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1895 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1896 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1899 /* Set defaults to the MVPP2 port */
1900 static void mvpp2_defaults_set(struct mvpp2_port *port)
1902 int tx_port_num, val, queue, lrxq;
1904 if (port->priv->hw_version == MVPP21) {
1905 /* Update TX FIFO MIN Threshold */
1906 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1907 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1908 /* Min. TX threshold must be less than minimal packet length */
1909 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1910 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1913 /* Disable Legacy WRR, Disable EJP, Release from reset */
1914 tx_port_num = mvpp2_egress_port(port);
1915 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1917 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1919 /* Set TXQ scheduling to Round-Robin */
1920 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1922 /* Close bandwidth for all queues */
1923 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1924 mvpp2_write(port->priv,
1925 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1927 /* Set refill period to 1 usec, refill tokens
1928 * and bucket size to maximum
1930 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1931 port->priv->tclk / USEC_PER_SEC);
1932 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1933 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1934 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1935 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1936 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1937 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1938 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1940 /* Set MaximumLowLatencyPacketSize value to 256 */
1941 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1942 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1943 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1945 /* Enable Rx cache snoop */
1946 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1947 queue = port->rxqs[lrxq]->id;
1948 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1949 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1950 MVPP2_SNOOP_BUF_HDR_MASK;
1951 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1954 /* At default, mask all interrupts to all present cpus */
1955 mvpp2_interrupts_disable(port);
1958 /* Enable/disable receiving packets */
1959 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1964 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1965 queue = port->rxqs[lrxq]->id;
1966 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1967 val &= ~MVPP2_RXQ_DISABLE_MASK;
1968 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1972 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1977 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1978 queue = port->rxqs[lrxq]->id;
1979 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1980 val |= MVPP2_RXQ_DISABLE_MASK;
1981 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1985 /* Enable transmit via physical egress queue
1986 * - HW starts take descriptors from DRAM
1988 static void mvpp2_egress_enable(struct mvpp2_port *port)
1992 int tx_port_num = mvpp2_egress_port(port);
1994 /* Enable all initialized TXs. */
1996 for (queue = 0; queue < port->ntxqs; queue++) {
1997 struct mvpp2_tx_queue *txq = port->txqs[queue];
2000 qmap |= (1 << queue);
2003 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2004 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2007 /* Disable transmit via physical egress queue
2008 * - HW doesn't take descriptors from DRAM
2010 static void mvpp2_egress_disable(struct mvpp2_port *port)
2014 int tx_port_num = mvpp2_egress_port(port);
2016 /* Issue stop command for active channels only */
2017 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2018 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2019 MVPP2_TXP_SCHED_ENQ_MASK;
2021 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2022 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2024 /* Wait for all Tx activity to terminate. */
2027 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2028 netdev_warn(port->dev,
2029 "Tx stop timed out, status=0x%08x\n",
2036 /* Check port TX Command register that all
2037 * Tx queues are stopped
2039 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2040 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2043 /* Rx descriptors helper methods */
2045 /* Get number of Rx descriptors occupied by received packets */
2047 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2049 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2051 return val & MVPP2_RXQ_OCCUPIED_MASK;
2054 /* Update Rx queue status with the number of occupied and available
2055 * Rx descriptor slots.
2058 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2059 int used_count, int free_count)
2061 /* Decrement the number of used descriptors and increment count
2062 * increment the number of free descriptors.
2064 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2066 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2069 /* Get pointer to next RX descriptor to be processed by SW */
2070 static inline struct mvpp2_rx_desc *
2071 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2073 int rx_desc = rxq->next_desc_to_proc;
2075 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2076 prefetch(rxq->descs + rxq->next_desc_to_proc);
2077 return rxq->descs + rx_desc;
2080 /* Set rx queue offset */
2081 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2082 int prxq, int offset)
2086 /* Convert offset from bytes to units of 32 bytes */
2087 offset = offset >> 5;
2089 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2090 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2093 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2094 MVPP2_RXQ_PACKET_OFFSET_MASK);
2096 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2099 /* Tx descriptors helper methods */
2101 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2102 static struct mvpp2_tx_desc *
2103 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2105 int tx_desc = txq->next_desc_to_proc;
2107 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2108 return txq->descs + tx_desc;
2111 /* Update HW with number of aggregated Tx descriptors to be sent
2113 * Called only from mvpp2_tx(), so migration is disabled, using
2114 * smp_processor_id() is OK.
2116 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2118 /* aggregated access - relevant TXQ number is written in TX desc */
2119 mvpp2_thread_write(port->priv,
2120 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2121 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2124 /* Check if there are enough free descriptors in aggregated txq.
2125 * If not, update the number of occupied descriptors and repeat the check.
2127 * Called only from mvpp2_tx(), so migration is disabled, using
2128 * smp_processor_id() is OK.
2130 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2131 struct mvpp2_tx_queue *aggr_txq, int num)
2133 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2134 /* Update number of occupied aggregated Tx descriptors */
2135 unsigned int thread =
2136 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2137 u32 val = mvpp2_read_relaxed(port->priv,
2138 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2140 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2142 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2148 /* Reserved Tx descriptors allocation request
2150 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2151 * only by mvpp2_tx(), so migration is disabled, using
2152 * smp_processor_id() is OK.
2154 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2155 struct mvpp2_tx_queue *txq, int num)
2157 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2158 struct mvpp2 *priv = port->priv;
2161 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2162 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2164 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2166 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2169 /* Check if there are enough reserved descriptors for transmission.
2170 * If not, request chunk of reserved descriptors and check again.
2172 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2173 struct mvpp2_tx_queue *txq,
2174 struct mvpp2_txq_pcpu *txq_pcpu,
2177 int req, desc_count;
2178 unsigned int thread;
2180 if (txq_pcpu->reserved_num >= num)
2183 /* Not enough descriptors reserved! Update the reserved descriptor
2184 * count and check again.
2188 /* Compute total of used descriptors */
2189 for (thread = 0; thread < port->priv->nthreads; thread++) {
2190 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2192 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2193 desc_count += txq_pcpu_aux->count;
2194 desc_count += txq_pcpu_aux->reserved_num;
2197 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2201 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2204 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2206 /* OK, the descriptor could have been updated: check again. */
2207 if (txq_pcpu->reserved_num < num)
2212 /* Release the last allocated Tx descriptor. Useful to handle DMA
2213 * mapping failures in the Tx path.
2215 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2217 if (txq->next_desc_to_proc == 0)
2218 txq->next_desc_to_proc = txq->last_desc - 1;
2220 txq->next_desc_to_proc--;
2223 /* Set Tx descriptors fields relevant for CSUM calculation */
2224 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2225 int ip_hdr_len, int l4_proto)
2229 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2230 * G_L4_chk, L4_type required only for checksum calculation
2232 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2233 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2234 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2236 if (l3_proto == htons(ETH_P_IP)) {
2237 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2238 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2240 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2243 if (l4_proto == IPPROTO_TCP) {
2244 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2245 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2246 } else if (l4_proto == IPPROTO_UDP) {
2247 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2248 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2250 command |= MVPP2_TXD_L4_CSUM_NOT;
2256 /* Get number of sent descriptors and decrement counter.
2257 * The number of sent descriptors is returned.
2260 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2261 * (migration disabled) and from the TX completion tasklet (migration
2262 * disabled) so using smp_processor_id() is OK.
2264 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2265 struct mvpp2_tx_queue *txq)
2269 /* Reading status reg resets transmitted descriptor counter */
2270 val = mvpp2_thread_read_relaxed(port->priv,
2271 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2272 MVPP2_TXQ_SENT_REG(txq->id));
2274 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2275 MVPP2_TRANSMITTED_COUNT_OFFSET;
2278 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2279 * disabled, therefore using smp_processor_id() is OK.
2281 static void mvpp2_txq_sent_counter_clear(void *arg)
2283 struct mvpp2_port *port = arg;
2286 /* If the thread isn't used, don't do anything */
2287 if (smp_processor_id() > port->priv->nthreads)
2290 for (queue = 0; queue < port->ntxqs; queue++) {
2291 int id = port->txqs[queue]->id;
2293 mvpp2_thread_read(port->priv,
2294 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2295 MVPP2_TXQ_SENT_REG(id));
2299 /* Set max sizes for Tx queues */
2300 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2303 int txq, tx_port_num;
2305 mtu = port->pkt_size * 8;
2306 if (mtu > MVPP2_TXP_MTU_MAX)
2307 mtu = MVPP2_TXP_MTU_MAX;
2309 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2312 /* Indirect access to registers */
2313 tx_port_num = mvpp2_egress_port(port);
2314 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2317 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2318 val &= ~MVPP2_TXP_MTU_MAX;
2320 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2322 /* TXP token size and all TXQs token size must be larger that MTU */
2323 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2324 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2327 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2329 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2332 for (txq = 0; txq < port->ntxqs; txq++) {
2333 val = mvpp2_read(port->priv,
2334 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2335 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2339 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2341 mvpp2_write(port->priv,
2342 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2348 /* Set the number of packets that will be received before Rx interrupt
2349 * will be generated by HW.
2351 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2352 struct mvpp2_rx_queue *rxq)
2354 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2356 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2357 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2359 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2360 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2366 /* For some reason in the LSP this is done on each CPU. Why ? */
2367 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2368 struct mvpp2_tx_queue *txq)
2370 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2373 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2374 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2376 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2377 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2378 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2383 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2385 u64 tmp = (u64)clk_hz * usec;
2387 do_div(tmp, USEC_PER_SEC);
2389 return tmp > U32_MAX ? U32_MAX : tmp;
2392 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2394 u64 tmp = (u64)cycles * USEC_PER_SEC;
2396 do_div(tmp, clk_hz);
2398 return tmp > U32_MAX ? U32_MAX : tmp;
2401 /* Set the time delay in usec before Rx interrupt */
2402 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2403 struct mvpp2_rx_queue *rxq)
2405 unsigned long freq = port->priv->tclk;
2406 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2408 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2410 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2412 /* re-evaluate to get actual register value */
2413 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2416 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2419 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2421 unsigned long freq = port->priv->tclk;
2422 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2424 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2425 port->tx_time_coal =
2426 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2428 /* re-evaluate to get actual register value */
2429 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2432 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2435 /* Free Tx queue skbuffs */
2436 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2437 struct mvpp2_tx_queue *txq,
2438 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2442 for (i = 0; i < num; i++) {
2443 struct mvpp2_txq_pcpu_buf *tx_buf =
2444 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2446 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2447 tx_buf->type != MVPP2_TYPE_XDP_TX)
2448 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2449 tx_buf->size, DMA_TO_DEVICE);
2450 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2451 dev_kfree_skb_any(tx_buf->skb);
2452 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2453 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2454 xdp_return_frame(tx_buf->xdpf);
2456 mvpp2_txq_inc_get(txq_pcpu);
2460 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2463 int queue = fls(cause) - 1;
2465 return port->rxqs[queue];
2468 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2471 int queue = fls(cause) - 1;
2473 return port->txqs[queue];
2476 /* Handle end of transmission */
2477 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2478 struct mvpp2_txq_pcpu *txq_pcpu)
2480 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2483 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2484 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2486 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2489 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2491 txq_pcpu->count -= tx_done;
2493 if (netif_tx_queue_stopped(nq))
2494 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2495 netif_tx_wake_queue(nq);
2498 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2499 unsigned int thread)
2501 struct mvpp2_tx_queue *txq;
2502 struct mvpp2_txq_pcpu *txq_pcpu;
2503 unsigned int tx_todo = 0;
2506 txq = mvpp2_get_tx_queue(port, cause);
2510 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2512 if (txq_pcpu->count) {
2513 mvpp2_txq_done(port, txq, txq_pcpu);
2514 tx_todo += txq_pcpu->count;
2517 cause &= ~(1 << txq->log_id);
2522 /* Rx/Tx queue initialization/cleanup methods */
2524 /* Allocate and initialize descriptors for aggr TXQ */
2525 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2526 struct mvpp2_tx_queue *aggr_txq,
2527 unsigned int thread, struct mvpp2 *priv)
2531 /* Allocate memory for TX descriptors */
2532 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2533 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2534 &aggr_txq->descs_dma, GFP_KERNEL);
2535 if (!aggr_txq->descs)
2538 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2540 /* Aggr TXQ no reset WA */
2541 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2542 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2544 /* Set Tx descriptors queue starting address indirect
2547 if (priv->hw_version == MVPP21)
2548 txq_dma = aggr_txq->descs_dma;
2550 txq_dma = aggr_txq->descs_dma >>
2551 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2553 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2554 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2555 MVPP2_AGGR_TXQ_SIZE);
2560 /* Create a specified Rx queue */
2561 static int mvpp2_rxq_init(struct mvpp2_port *port,
2562 struct mvpp2_rx_queue *rxq)
2564 struct mvpp2 *priv = port->priv;
2565 unsigned int thread;
2569 rxq->size = port->rx_ring_size;
2571 /* Allocate memory for RX descriptors */
2572 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2573 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2574 &rxq->descs_dma, GFP_KERNEL);
2578 rxq->last_desc = rxq->size - 1;
2580 /* Zero occupied and non-occupied counters - direct access */
2581 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2583 /* Set Rx descriptors queue starting address - indirect access */
2584 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2585 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2586 if (port->priv->hw_version == MVPP21)
2587 rxq_dma = rxq->descs_dma;
2589 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2590 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2591 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2592 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2596 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2598 /* Set coalescing pkts and time */
2599 mvpp2_rx_pkts_coal_set(port, rxq);
2600 mvpp2_rx_time_coal_set(port, rxq);
2602 /* Add number of descriptors ready for receiving packets */
2603 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2605 if (priv->percpu_pools) {
2606 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
2610 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
2612 goto err_unregister_rxq_short;
2614 /* Every RXQ has a pool for short and another for long packets */
2615 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2617 priv->page_pool[rxq->logic_rxq]);
2619 goto err_unregister_rxq_long;
2621 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2623 priv->page_pool[rxq->logic_rxq +
2626 goto err_unregister_mem_rxq_short;
2631 err_unregister_mem_rxq_short:
2632 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2633 err_unregister_rxq_long:
2634 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2635 err_unregister_rxq_short:
2636 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2638 dma_free_coherent(port->dev->dev.parent,
2639 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2640 rxq->descs, rxq->descs_dma);
2644 /* Push packets received by the RXQ to BM pool */
2645 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2646 struct mvpp2_rx_queue *rxq)
2650 rx_received = mvpp2_rxq_received(port, rxq->id);
2654 for (i = 0; i < rx_received; i++) {
2655 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2656 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2659 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2660 MVPP2_RXD_BM_POOL_ID_OFFS;
2662 mvpp2_bm_pool_put(port, pool,
2663 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2664 mvpp2_rxdesc_cookie_get(port, rx_desc));
2666 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2669 /* Cleanup Rx queue */
2670 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2671 struct mvpp2_rx_queue *rxq)
2673 unsigned int thread;
2675 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
2676 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2678 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
2679 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2681 mvpp2_rxq_drop_pkts(port, rxq);
2684 dma_free_coherent(port->dev->dev.parent,
2685 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2691 rxq->next_desc_to_proc = 0;
2694 /* Clear Rx descriptors queue starting address and size;
2695 * free descriptor number
2697 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2698 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2699 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2700 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2701 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2705 /* Create and initialize a Tx queue */
2706 static int mvpp2_txq_init(struct mvpp2_port *port,
2707 struct mvpp2_tx_queue *txq)
2710 unsigned int thread;
2711 int desc, desc_per_txq, tx_port_num;
2712 struct mvpp2_txq_pcpu *txq_pcpu;
2714 txq->size = port->tx_ring_size;
2716 /* Allocate memory for Tx descriptors */
2717 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2718 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2719 &txq->descs_dma, GFP_KERNEL);
2723 txq->last_desc = txq->size - 1;
2725 /* Set Tx descriptors queue starting address - indirect access */
2726 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2727 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2728 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2730 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2731 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2732 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2733 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2734 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2735 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2736 val &= ~MVPP2_TXQ_PENDING_MASK;
2737 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2739 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2740 * for each existing TXQ.
2741 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2742 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2745 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2746 (txq->log_id * desc_per_txq);
2748 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2749 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2750 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2753 /* WRR / EJP configuration - indirect access */
2754 tx_port_num = mvpp2_egress_port(port);
2755 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2757 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2758 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2759 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2760 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2761 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2763 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2764 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2767 for (thread = 0; thread < port->priv->nthreads; thread++) {
2768 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2769 txq_pcpu->size = txq->size;
2770 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2771 sizeof(*txq_pcpu->buffs),
2773 if (!txq_pcpu->buffs)
2776 txq_pcpu->count = 0;
2777 txq_pcpu->reserved_num = 0;
2778 txq_pcpu->txq_put_index = 0;
2779 txq_pcpu->txq_get_index = 0;
2780 txq_pcpu->tso_headers = NULL;
2782 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2783 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2785 txq_pcpu->tso_headers =
2786 dma_alloc_coherent(port->dev->dev.parent,
2787 txq_pcpu->size * TSO_HEADER_SIZE,
2788 &txq_pcpu->tso_headers_dma,
2790 if (!txq_pcpu->tso_headers)
2797 /* Free allocated TXQ resources */
2798 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2799 struct mvpp2_tx_queue *txq)
2801 struct mvpp2_txq_pcpu *txq_pcpu;
2802 unsigned int thread;
2804 for (thread = 0; thread < port->priv->nthreads; thread++) {
2805 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2806 kfree(txq_pcpu->buffs);
2808 if (txq_pcpu->tso_headers)
2809 dma_free_coherent(port->dev->dev.parent,
2810 txq_pcpu->size * TSO_HEADER_SIZE,
2811 txq_pcpu->tso_headers,
2812 txq_pcpu->tso_headers_dma);
2814 txq_pcpu->tso_headers = NULL;
2818 dma_free_coherent(port->dev->dev.parent,
2819 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2820 txq->descs, txq->descs_dma);
2824 txq->next_desc_to_proc = 0;
2827 /* Set minimum bandwidth for disabled TXQs */
2828 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2830 /* Set Tx descriptors queue starting address and size */
2831 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2832 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2833 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2834 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2838 /* Cleanup Tx ports */
2839 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2841 struct mvpp2_txq_pcpu *txq_pcpu;
2843 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2846 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2847 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2848 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2849 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2851 /* The napi queue has been stopped so wait for all packets
2852 * to be transmitted.
2856 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2857 netdev_warn(port->dev,
2858 "port %d: cleaning queue %d timed out\n",
2859 port->id, txq->log_id);
2865 pending = mvpp2_thread_read(port->priv, thread,
2866 MVPP2_TXQ_PENDING_REG);
2867 pending &= MVPP2_TXQ_PENDING_MASK;
2870 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2871 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2874 for (thread = 0; thread < port->priv->nthreads; thread++) {
2875 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2877 /* Release all packets */
2878 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2881 txq_pcpu->count = 0;
2882 txq_pcpu->txq_put_index = 0;
2883 txq_pcpu->txq_get_index = 0;
2887 /* Cleanup all Tx queues */
2888 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2890 struct mvpp2_tx_queue *txq;
2894 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2896 /* Reset Tx ports and delete Tx queues */
2897 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2898 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2900 for (queue = 0; queue < port->ntxqs; queue++) {
2901 txq = port->txqs[queue];
2902 mvpp2_txq_clean(port, txq);
2903 mvpp2_txq_deinit(port, txq);
2906 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2908 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2909 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2912 /* Cleanup all Rx queues */
2913 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2917 for (queue = 0; queue < port->nrxqs; queue++)
2918 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2921 /* Init all Rx queues for port */
2922 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2926 for (queue = 0; queue < port->nrxqs; queue++) {
2927 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2934 mvpp2_cleanup_rxqs(port);
2938 /* Init all tx queues for port */
2939 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2941 struct mvpp2_tx_queue *txq;
2944 for (queue = 0; queue < port->ntxqs; queue++) {
2945 txq = port->txqs[queue];
2946 err = mvpp2_txq_init(port, txq);
2950 /* Assign this queue to a CPU */
2951 if (queue < num_possible_cpus())
2952 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
2955 if (port->has_tx_irqs) {
2956 mvpp2_tx_time_coal_set(port);
2957 for (queue = 0; queue < port->ntxqs; queue++) {
2958 txq = port->txqs[queue];
2959 mvpp2_tx_pkts_coal_set(port, txq);
2963 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2967 mvpp2_cleanup_txqs(port);
2971 /* The callback for per-port interrupt */
2972 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2974 struct mvpp2_queue_vector *qv = dev_id;
2976 mvpp2_qvec_interrupt_disable(qv);
2978 napi_schedule(&qv->napi);
2983 /* Per-port interrupt for link status changes */
2984 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2986 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2987 struct net_device *dev = port->dev;
2988 bool event = false, link = false;
2991 mvpp22_gop_mask_irq(port);
2993 if (mvpp2_port_supports_xlg(port) &&
2994 mvpp2_is_xlg(port->phy_interface)) {
2995 val = readl(port->base + MVPP22_XLG_INT_STAT);
2996 if (val & MVPP22_XLG_INT_STAT_LINK) {
2998 val = readl(port->base + MVPP22_XLG_STATUS);
2999 if (val & MVPP22_XLG_STATUS_LINK_UP)
3002 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3003 phy_interface_mode_is_8023z(port->phy_interface) ||
3004 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3005 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3006 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3008 val = readl(port->base + MVPP2_GMAC_STATUS0);
3009 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
3014 if (port->phylink) {
3015 phylink_mac_change(port->phylink, link);
3019 if (!netif_running(dev) || !event)
3023 mvpp2_interrupts_enable(port);
3025 mvpp2_egress_enable(port);
3026 mvpp2_ingress_enable(port);
3027 netif_carrier_on(dev);
3028 netif_tx_wake_all_queues(dev);
3030 netif_tx_stop_all_queues(dev);
3031 netif_carrier_off(dev);
3032 mvpp2_ingress_disable(port);
3033 mvpp2_egress_disable(port);
3035 mvpp2_interrupts_disable(port);
3039 mvpp22_gop_unmask_irq(port);
3043 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3045 struct net_device *dev;
3046 struct mvpp2_port *port;
3047 struct mvpp2_port_pcpu *port_pcpu;
3048 unsigned int tx_todo, cause;
3050 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3051 dev = port_pcpu->dev;
3053 if (!netif_running(dev))
3054 return HRTIMER_NORESTART;
3056 port_pcpu->timer_scheduled = false;
3057 port = netdev_priv(dev);
3059 /* Process all the Tx queues */
3060 cause = (1 << port->ntxqs) - 1;
3061 tx_todo = mvpp2_tx_done(port, cause,
3062 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3064 /* Set the timer in case not all the packets were processed */
3065 if (tx_todo && !port_pcpu->timer_scheduled) {
3066 port_pcpu->timer_scheduled = true;
3067 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3068 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3070 return HRTIMER_RESTART;
3072 return HRTIMER_NORESTART;
3075 /* Main RX/TX processing routines */
3077 /* Display more error info */
3078 static void mvpp2_rx_error(struct mvpp2_port *port,
3079 struct mvpp2_rx_desc *rx_desc)
3081 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3082 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3083 char *err_str = NULL;
3085 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3086 case MVPP2_RXD_ERR_CRC:
3089 case MVPP2_RXD_ERR_OVERRUN:
3090 err_str = "overrun";
3092 case MVPP2_RXD_ERR_RESOURCE:
3093 err_str = "resource";
3096 if (err_str && net_ratelimit())
3097 netdev_err(port->dev,
3098 "bad rx status %08x (%s error), size=%zu\n",
3099 status, err_str, sz);
3102 /* Handle RX checksum offload */
3103 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
3104 struct sk_buff *skb)
3106 if (((status & MVPP2_RXD_L3_IP4) &&
3107 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3108 (status & MVPP2_RXD_L3_IP6))
3109 if (((status & MVPP2_RXD_L4_UDP) ||
3110 (status & MVPP2_RXD_L4_TCP)) &&
3111 (status & MVPP2_RXD_L4_CSUM_OK)) {
3113 skb->ip_summed = CHECKSUM_UNNECESSARY;
3117 skb->ip_summed = CHECKSUM_NONE;
3120 /* Allocate a new skb and add it to BM pool */
3121 static int mvpp2_rx_refill(struct mvpp2_port *port,
3122 struct mvpp2_bm_pool *bm_pool,
3123 struct page_pool *page_pool, int pool)
3125 dma_addr_t dma_addr;
3126 phys_addr_t phys_addr;
3129 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3130 &dma_addr, &phys_addr, GFP_ATOMIC);
3134 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3139 /* Handle tx checksum */
3140 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3142 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3145 __be16 l3_proto = vlan_get_protocol(skb);
3147 if (l3_proto == htons(ETH_P_IP)) {
3148 struct iphdr *ip4h = ip_hdr(skb);
3150 /* Calculate IPv4 checksum and L4 checksum */
3151 ip_hdr_len = ip4h->ihl;
3152 l4_proto = ip4h->protocol;
3153 } else if (l3_proto == htons(ETH_P_IPV6)) {
3154 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3156 /* Read l4_protocol from one of IPv6 extra headers */
3157 if (skb_network_header_len(skb) > 0)
3158 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3159 l4_proto = ip6h->nexthdr;
3161 return MVPP2_TXD_L4_CSUM_NOT;
3164 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3165 l3_proto, ip_hdr_len, l4_proto);
3168 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3171 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3173 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3174 struct mvpp2_tx_queue *aggr_txq;
3175 struct mvpp2_txq_pcpu *txq_pcpu;
3176 struct mvpp2_tx_queue *txq;
3177 struct netdev_queue *nq;
3179 txq = port->txqs[txq_id];
3180 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3181 nq = netdev_get_tx_queue(port->dev, txq_id);
3182 aggr_txq = &port->priv->aggr_txqs[thread];
3184 txq_pcpu->reserved_num -= nxmit;
3185 txq_pcpu->count += nxmit;
3186 aggr_txq->count += nxmit;
3188 /* Enable transmit */
3190 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3192 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3193 netif_tx_stop_queue(nq);
3195 /* Finalize TX processing */
3196 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3197 mvpp2_txq_done(port, txq, txq_pcpu);
3201 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3202 struct xdp_frame *xdpf, bool dma_map)
3204 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3205 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3206 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3207 enum mvpp2_tx_buf_type buf_type;
3208 struct mvpp2_txq_pcpu *txq_pcpu;
3209 struct mvpp2_tx_queue *aggr_txq;
3210 struct mvpp2_tx_desc *tx_desc;
3211 struct mvpp2_tx_queue *txq;
3212 int ret = MVPP2_XDP_TX;
3213 dma_addr_t dma_addr;
3215 txq = port->txqs[txq_id];
3216 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3217 aggr_txq = &port->priv->aggr_txqs[thread];
3219 /* Check number of available descriptors */
3220 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3221 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3222 ret = MVPP2_XDP_DROPPED;
3226 /* Get a descriptor for the first part of the packet */
3227 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3228 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3229 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3232 /* XDP_REDIRECT or AF_XDP */
3233 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3234 xdpf->len, DMA_TO_DEVICE);
3236 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3237 mvpp2_txq_desc_put(txq);
3238 ret = MVPP2_XDP_DROPPED;
3242 buf_type = MVPP2_TYPE_XDP_NDO;
3245 struct page *page = virt_to_page(xdpf->data);
3247 dma_addr = page_pool_get_dma_addr(page) +
3248 sizeof(*xdpf) + xdpf->headroom;
3249 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3250 xdpf->len, DMA_BIDIRECTIONAL);
3252 buf_type = MVPP2_TYPE_XDP_TX;
3255 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3257 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3258 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3265 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3267 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3268 struct xdp_frame *xdpf;
3272 xdpf = xdp_convert_buff_to_frame(xdp);
3273 if (unlikely(!xdpf))
3274 return MVPP2_XDP_DROPPED;
3276 /* The first of the TX queues are used for XPS,
3277 * the second half for XDP_TX
3279 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3281 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3282 if (ret == MVPP2_XDP_TX) {
3283 u64_stats_update_begin(&stats->syncp);
3284 stats->tx_bytes += xdpf->len;
3285 stats->tx_packets++;
3287 u64_stats_update_end(&stats->syncp);
3289 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3291 u64_stats_update_begin(&stats->syncp);
3292 stats->xdp_tx_err++;
3293 u64_stats_update_end(&stats->syncp);
3300 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3301 struct xdp_frame **frames, u32 flags)
3303 struct mvpp2_port *port = netdev_priv(dev);
3304 int i, nxmit_byte = 0, nxmit = num_frame;
3305 struct mvpp2_pcpu_stats *stats;
3309 if (unlikely(test_bit(0, &port->state)))
3312 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3315 /* The first of the TX queues are used for XPS,
3316 * the second half for XDP_TX
3318 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3320 for (i = 0; i < num_frame; i++) {
3321 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3322 if (ret == MVPP2_XDP_TX) {
3323 nxmit_byte += frames[i]->len;
3325 xdp_return_frame_rx_napi(frames[i]);
3330 if (likely(nxmit > 0))
3331 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3333 stats = this_cpu_ptr(port->stats);
3334 u64_stats_update_begin(&stats->syncp);
3335 stats->tx_bytes += nxmit_byte;
3336 stats->tx_packets += nxmit;
3337 stats->xdp_xmit += nxmit;
3338 stats->xdp_xmit_err += num_frame - nxmit;
3339 u64_stats_update_end(&stats->syncp);
3345 mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3346 struct bpf_prog *prog, struct xdp_buff *xdp,
3347 struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
3349 unsigned int len, sync, err;
3353 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3354 act = bpf_prog_run_xdp(prog, xdp);
3356 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3357 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3358 sync = max(sync, len);
3363 ret = MVPP2_XDP_PASS;
3366 err = xdp_do_redirect(port->dev, xdp, prog);
3367 if (unlikely(err)) {
3368 ret = MVPP2_XDP_DROPPED;
3369 page = virt_to_head_page(xdp->data);
3370 page_pool_put_page(pp, page, sync, true);
3372 ret = MVPP2_XDP_REDIR;
3373 stats->xdp_redirect++;
3377 ret = mvpp2_xdp_xmit_back(port, xdp);
3378 if (ret != MVPP2_XDP_TX) {
3379 page = virt_to_head_page(xdp->data);
3380 page_pool_put_page(pp, page, sync, true);
3384 bpf_warn_invalid_xdp_action(act);
3387 trace_xdp_exception(port->dev, prog, act);
3390 page = virt_to_head_page(xdp->data);
3391 page_pool_put_page(pp, page, sync, true);
3392 ret = MVPP2_XDP_DROPPED;
3400 /* Main rx processing */
3401 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3402 int rx_todo, struct mvpp2_rx_queue *rxq)
3404 struct net_device *dev = port->dev;
3405 struct mvpp2_pcpu_stats ps = {};
3406 enum dma_data_direction dma_dir;
3407 struct bpf_prog *xdp_prog;
3408 struct xdp_buff xdp;
3415 xdp_prog = READ_ONCE(port->xdp_prog);
3417 /* Get number of received packets and clamp the to-do */
3418 rx_received = mvpp2_rxq_received(port, rxq->id);
3419 if (rx_todo > rx_received)
3420 rx_todo = rx_received;
3422 while (rx_done < rx_todo) {
3423 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3424 struct mvpp2_bm_pool *bm_pool;
3425 struct page_pool *pp = NULL;
3426 struct sk_buff *skb;
3427 unsigned int frag_size;
3428 dma_addr_t dma_addr;
3429 phys_addr_t phys_addr;
3431 int pool, rx_bytes, err, ret;
3435 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3436 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3437 rx_bytes -= MVPP2_MH_SIZE;
3438 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3439 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3440 data = (void *)phys_to_virt(phys_addr);
3442 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3443 MVPP2_RXD_BM_POOL_ID_OFFS;
3444 bm_pool = &port->priv->bm_pools[pool];
3446 /* In case of an error, release the requested buffer pointer
3447 * to the Buffer Manager. This request process is controlled
3448 * by the hardware, and the information about the buffer is
3449 * comprised by the RX descriptor.
3451 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3452 goto err_drop_frame;
3454 if (port->priv->percpu_pools) {
3455 pp = port->priv->page_pool[pool];
3456 dma_dir = page_pool_get_dma_dir(pp);
3458 dma_dir = DMA_FROM_DEVICE;
3461 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3462 rx_bytes + MVPP2_MH_SIZE,
3465 /* Prefetch header */
3468 if (bm_pool->frag_size > PAGE_SIZE)
3471 frag_size = bm_pool->frag_size;
3474 xdp.data_hard_start = data;
3475 xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
3476 xdp.data_end = xdp.data + rx_bytes;
3477 xdp.frame_sz = PAGE_SIZE;
3479 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3480 xdp.rxq = &rxq->xdp_rxq_short;
3482 xdp.rxq = &rxq->xdp_rxq_long;
3484 xdp_set_data_meta_invalid(&xdp);
3486 ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
3490 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3492 netdev_err(port->dev, "failed to refill BM pools\n");
3493 goto err_drop_frame;
3497 ps.rx_bytes += rx_bytes;
3502 skb = build_skb(data, frag_size);
3504 netdev_warn(port->dev, "skb build failed\n");
3505 goto err_drop_frame;
3508 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3510 netdev_err(port->dev, "failed to refill BM pools\n");
3511 dev_kfree_skb_any(skb);
3512 goto err_drop_frame;
3516 page_pool_release_page(pp, virt_to_page(data));
3518 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
3519 bm_pool->buf_size, DMA_FROM_DEVICE,
3520 DMA_ATTR_SKIP_CPU_SYNC);
3523 ps.rx_bytes += rx_bytes;
3525 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3526 skb_put(skb, rx_bytes);
3527 skb->protocol = eth_type_trans(skb, dev);
3528 mvpp2_rx_csum(port, rx_status, skb);
3530 napi_gro_receive(napi, skb);
3534 dev->stats.rx_errors++;
3535 mvpp2_rx_error(port, rx_desc);
3536 /* Return the buffer to the pool */
3537 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3542 if (xdp_ret & MVPP2_XDP_REDIR)
3545 if (ps.rx_packets) {
3546 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3548 u64_stats_update_begin(&stats->syncp);
3549 stats->rx_packets += ps.rx_packets;
3550 stats->rx_bytes += ps.rx_bytes;
3552 stats->xdp_redirect += ps.xdp_redirect;
3553 stats->xdp_pass += ps.xdp_pass;
3554 stats->xdp_drop += ps.xdp_drop;
3555 u64_stats_update_end(&stats->syncp);
3558 /* Update Rx queue management counters */
3560 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3566 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3567 struct mvpp2_tx_desc *desc)
3569 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3570 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3572 dma_addr_t buf_dma_addr =
3573 mvpp2_txdesc_dma_addr_get(port, desc);
3575 mvpp2_txdesc_size_get(port, desc);
3576 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3577 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3578 buf_sz, DMA_TO_DEVICE);
3579 mvpp2_txq_desc_put(txq);
3582 /* Handle tx fragmentation processing */
3583 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3584 struct mvpp2_tx_queue *aggr_txq,
3585 struct mvpp2_tx_queue *txq)
3587 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3588 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3589 struct mvpp2_tx_desc *tx_desc;
3591 dma_addr_t buf_dma_addr;
3593 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3594 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3595 void *addr = skb_frag_address(frag);
3597 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3598 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3599 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3601 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3602 skb_frag_size(frag),
3604 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3605 mvpp2_txq_desc_put(txq);
3609 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3611 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3612 /* Last descriptor */
3613 mvpp2_txdesc_cmd_set(port, tx_desc,
3615 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3617 /* Descriptor in the middle: Not First, Not Last */
3618 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3619 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3625 /* Release all descriptors that were used to map fragments of
3626 * this packet, as well as the corresponding DMA mappings
3628 for (i = i - 1; i >= 0; i--) {
3629 tx_desc = txq->descs + i;
3630 tx_desc_unmap_put(port, txq, tx_desc);
3636 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3637 struct net_device *dev,
3638 struct mvpp2_tx_queue *txq,
3639 struct mvpp2_tx_queue *aggr_txq,
3640 struct mvpp2_txq_pcpu *txq_pcpu,
3643 struct mvpp2_port *port = netdev_priv(dev);
3644 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3647 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3648 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3650 addr = txq_pcpu->tso_headers_dma +
3651 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3652 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3654 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3656 MVPP2_TXD_PADDING_DISABLE);
3657 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3660 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3661 struct net_device *dev, struct tso_t *tso,
3662 struct mvpp2_tx_queue *txq,
3663 struct mvpp2_tx_queue *aggr_txq,
3664 struct mvpp2_txq_pcpu *txq_pcpu,
3665 int sz, bool left, bool last)
3667 struct mvpp2_port *port = netdev_priv(dev);
3668 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3669 dma_addr_t buf_dma_addr;
3671 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3672 mvpp2_txdesc_size_set(port, tx_desc, sz);
3674 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3676 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3677 mvpp2_txq_desc_put(txq);
3681 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3684 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3686 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3690 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3693 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3697 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3698 struct mvpp2_tx_queue *txq,
3699 struct mvpp2_tx_queue *aggr_txq,
3700 struct mvpp2_txq_pcpu *txq_pcpu)
3702 struct mvpp2_port *port = netdev_priv(dev);
3703 int hdr_sz, i, len, descs = 0;
3706 /* Check number of available descriptors */
3707 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3708 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3709 tso_count_descs(skb)))
3712 hdr_sz = tso_start(skb, &tso);
3714 len = skb->len - hdr_sz;
3716 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3717 char *hdr = txq_pcpu->tso_headers +
3718 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3723 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3724 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3727 int sz = min_t(int, tso.size, left);
3731 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3732 txq_pcpu, sz, left, len == 0))
3734 tso_build_data(skb, &tso, sz);
3741 for (i = descs - 1; i >= 0; i--) {
3742 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3743 tx_desc_unmap_put(port, txq, tx_desc);
3748 /* Main tx processing */
3749 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3751 struct mvpp2_port *port = netdev_priv(dev);
3752 struct mvpp2_tx_queue *txq, *aggr_txq;
3753 struct mvpp2_txq_pcpu *txq_pcpu;
3754 struct mvpp2_tx_desc *tx_desc;
3755 dma_addr_t buf_dma_addr;
3756 unsigned long flags = 0;
3757 unsigned int thread;
3762 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3764 txq_id = skb_get_queue_mapping(skb);
3765 txq = port->txqs[txq_id];
3766 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3767 aggr_txq = &port->priv->aggr_txqs[thread];
3769 if (test_bit(thread, &port->priv->lock_map))
3770 spin_lock_irqsave(&port->tx_lock[thread], flags);
3772 if (skb_is_gso(skb)) {
3773 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3776 frags = skb_shinfo(skb)->nr_frags + 1;
3778 /* Check number of available descriptors */
3779 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3780 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3785 /* Get a descriptor for the first part of the packet */
3786 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3787 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3788 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3790 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3791 skb_headlen(skb), DMA_TO_DEVICE);
3792 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3793 mvpp2_txq_desc_put(txq);
3798 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3800 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3803 /* First and Last descriptor */
3804 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3805 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3806 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3808 /* First but not Last */
3809 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
3810 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3811 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3813 /* Continue with other skb fragments */
3814 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
3815 tx_desc_unmap_put(port, txq, tx_desc);
3822 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
3823 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
3825 txq_pcpu->reserved_num -= frags;
3826 txq_pcpu->count += frags;
3827 aggr_txq->count += frags;
3829 /* Enable transmit */
3831 mvpp2_aggr_txq_pend_desc_add(port, frags);
3833 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3834 netif_tx_stop_queue(nq);
3836 u64_stats_update_begin(&stats->syncp);
3837 stats->tx_packets++;
3838 stats->tx_bytes += skb->len;
3839 u64_stats_update_end(&stats->syncp);
3841 dev->stats.tx_dropped++;
3842 dev_kfree_skb_any(skb);
3845 /* Finalize TX processing */
3846 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3847 mvpp2_txq_done(port, txq, txq_pcpu);
3849 /* Set the timer in case not all frags were processed */
3850 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3851 txq_pcpu->count > 0) {
3852 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
3854 if (!port_pcpu->timer_scheduled) {
3855 port_pcpu->timer_scheduled = true;
3856 hrtimer_start(&port_pcpu->tx_done_timer,
3857 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
3858 HRTIMER_MODE_REL_PINNED_SOFT);
3862 if (test_bit(thread, &port->priv->lock_map))
3863 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
3865 return NETDEV_TX_OK;
3868 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3870 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3871 netdev_err(dev, "FCS error\n");
3872 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3873 netdev_err(dev, "rx fifo overrun error\n");
3874 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3875 netdev_err(dev, "tx fifo underrun error\n");
3878 static int mvpp2_poll(struct napi_struct *napi, int budget)
3880 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3882 struct mvpp2_port *port = netdev_priv(napi->dev);
3883 struct mvpp2_queue_vector *qv;
3884 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3886 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3888 /* Rx/Tx cause register
3890 * Bits 0-15: each bit indicates received packets on the Rx queue
3891 * (bit 0 is for Rx queue 0).
3893 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3894 * (bit 16 is for Tx queue 0).
3896 * Each CPU has its own Rx/Tx cause register
3898 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
3899 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3901 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3903 mvpp2_cause_error(port->dev, cause_misc);
3905 /* Clear the cause register */
3906 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3907 mvpp2_thread_write(port->priv, thread,
3908 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3909 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3912 if (port->has_tx_irqs) {
3913 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3915 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3916 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3920 /* Process RX packets */
3921 cause_rx = cause_rx_tx &
3922 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3923 cause_rx <<= qv->first_rxq;
3924 cause_rx |= qv->pending_cause_rx;
3925 while (cause_rx && budget > 0) {
3927 struct mvpp2_rx_queue *rxq;
3929 rxq = mvpp2_get_rx_queue(port, cause_rx);
3933 count = mvpp2_rx(port, napi, budget, rxq);
3937 /* Clear the bit associated to this Rx queue
3938 * so that next iteration will continue from
3939 * the next Rx queue.
3941 cause_rx &= ~(1 << rxq->logic_rxq);
3947 napi_complete_done(napi, rx_done);
3949 mvpp2_qvec_interrupt_enable(qv);
3951 qv->pending_cause_rx = cause_rx;
3955 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3959 /* Set the GMAC & XLG MAC in reset */
3960 mvpp2_mac_reset_assert(port);
3962 /* Set the MPCS and XPCS in reset */
3963 mvpp22_pcs_reset_assert(port);
3965 /* comphy reconfiguration */
3966 mvpp22_comphy_init(port);
3968 /* gop reconfiguration */
3969 mvpp22_gop_init(port);
3971 mvpp22_pcs_reset_deassert(port);
3973 if (mvpp2_port_supports_xlg(port)) {
3974 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3975 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3977 if (mvpp2_is_xlg(port->phy_interface))
3978 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
3980 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3982 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
3985 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
3986 mvpp2_xlg_max_rx_size_set(port);
3988 mvpp2_gmac_max_rx_size_set(port);
3991 /* Set hw internals when starting port */
3992 static void mvpp2_start_dev(struct mvpp2_port *port)
3996 mvpp2_txp_max_tx_size_set(port);
3998 for (i = 0; i < port->nqvecs; i++)
3999 napi_enable(&port->qvecs[i].napi);
4001 /* Enable interrupts on all threads */
4002 mvpp2_interrupts_enable(port);
4004 if (port->priv->hw_version == MVPP22)
4005 mvpp22_mode_reconfigure(port);
4007 if (port->phylink) {
4008 phylink_start(port->phylink);
4010 /* Phylink isn't used as of now for ACPI, so the MAC has to be
4011 * configured manually when the interface is started. This will
4012 * be removed as soon as the phylink ACPI support lands in.
4014 struct phylink_link_state state = {
4015 .interface = port->phy_interface,
4017 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
4018 mvpp2_mac_link_up(&port->phylink_config, NULL,
4019 MLO_AN_INBAND, port->phy_interface,
4020 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
4023 netif_tx_start_all_queues(port->dev);
4025 clear_bit(0, &port->state);
4028 /* Set hw internals when stopping port */
4029 static void mvpp2_stop_dev(struct mvpp2_port *port)
4033 set_bit(0, &port->state);
4035 /* Disable interrupts on all threads */
4036 mvpp2_interrupts_disable(port);
4038 for (i = 0; i < port->nqvecs; i++)
4039 napi_disable(&port->qvecs[i].napi);
4042 phylink_stop(port->phylink);
4043 phy_power_off(port->comphy);
4046 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4047 struct ethtool_ringparam *ring)
4049 u16 new_rx_pending = ring->rx_pending;
4050 u16 new_tx_pending = ring->tx_pending;
4052 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4055 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4056 new_rx_pending = MVPP2_MAX_RXD_MAX;
4057 else if (!IS_ALIGNED(ring->rx_pending, 16))
4058 new_rx_pending = ALIGN(ring->rx_pending, 16);
4060 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4061 new_tx_pending = MVPP2_MAX_TXD_MAX;
4062 else if (!IS_ALIGNED(ring->tx_pending, 32))
4063 new_tx_pending = ALIGN(ring->tx_pending, 32);
4065 /* The Tx ring size cannot be smaller than the minimum number of
4066 * descriptors needed for TSO.
4068 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4069 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4071 if (ring->rx_pending != new_rx_pending) {
4072 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4073 ring->rx_pending, new_rx_pending);
4074 ring->rx_pending = new_rx_pending;
4077 if (ring->tx_pending != new_tx_pending) {
4078 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4079 ring->tx_pending, new_tx_pending);
4080 ring->tx_pending = new_tx_pending;
4086 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4088 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4090 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4091 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4092 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4093 addr[0] = (mac_addr_h >> 24) & 0xFF;
4094 addr[1] = (mac_addr_h >> 16) & 0xFF;
4095 addr[2] = (mac_addr_h >> 8) & 0xFF;
4096 addr[3] = mac_addr_h & 0xFF;
4097 addr[4] = mac_addr_m & 0xFF;
4098 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4101 static int mvpp2_irqs_init(struct mvpp2_port *port)
4105 for (i = 0; i < port->nqvecs; i++) {
4106 struct mvpp2_queue_vector *qv = port->qvecs + i;
4108 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4109 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4115 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4118 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4122 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4125 for_each_present_cpu(cpu) {
4126 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4128 cpumask_set_cpu(cpu, qv->mask);
4131 irq_set_affinity_hint(qv->irq, qv->mask);
4137 for (i = 0; i < port->nqvecs; i++) {
4138 struct mvpp2_queue_vector *qv = port->qvecs + i;
4140 irq_set_affinity_hint(qv->irq, NULL);
4143 free_irq(qv->irq, qv);
4149 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4153 for (i = 0; i < port->nqvecs; i++) {
4154 struct mvpp2_queue_vector *qv = port->qvecs + i;
4156 irq_set_affinity_hint(qv->irq, NULL);
4159 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4160 free_irq(qv->irq, qv);
4164 static bool mvpp22_rss_is_supported(void)
4166 return queue_mode == MVPP2_QDIST_MULTI_MODE;
4169 static int mvpp2_open(struct net_device *dev)
4171 struct mvpp2_port *port = netdev_priv(dev);
4172 struct mvpp2 *priv = port->priv;
4173 unsigned char mac_bcast[ETH_ALEN] = {
4174 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4178 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4180 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4183 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4185 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4188 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4190 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4193 err = mvpp2_prs_def_flow(port);
4195 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4199 /* Allocate the Rx/Tx queues */
4200 err = mvpp2_setup_rxqs(port);
4202 netdev_err(port->dev, "cannot allocate Rx queues\n");
4206 err = mvpp2_setup_txqs(port);
4208 netdev_err(port->dev, "cannot allocate Tx queues\n");
4209 goto err_cleanup_rxqs;
4212 err = mvpp2_irqs_init(port);
4214 netdev_err(port->dev, "cannot init IRQs\n");
4215 goto err_cleanup_txqs;
4218 /* Phylink isn't supported yet in ACPI mode */
4219 if (port->of_node) {
4220 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
4222 netdev_err(port->dev, "could not attach PHY (%d)\n",
4230 if (priv->hw_version == MVPP22 && port->link_irq) {
4231 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
4234 netdev_err(port->dev, "cannot request link IRQ %d\n",
4239 mvpp22_gop_setup_irq(port);
4241 /* In default link is down */
4242 netif_carrier_off(port->dev);
4250 netdev_err(port->dev,
4251 "invalid configuration: no dt or link IRQ");
4255 /* Unmask interrupts on all CPUs */
4256 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4257 mvpp2_shared_interrupt_mask_unmask(port, false);
4259 mvpp2_start_dev(port);
4261 /* Start hardware statistics gathering */
4262 queue_delayed_work(priv->stats_queue, &port->stats_work,
4263 MVPP2_MIB_COUNTERS_STATS_DELAY);
4268 mvpp2_irqs_deinit(port);
4270 mvpp2_cleanup_txqs(port);
4272 mvpp2_cleanup_rxqs(port);
4276 static int mvpp2_stop(struct net_device *dev)
4278 struct mvpp2_port *port = netdev_priv(dev);
4279 struct mvpp2_port_pcpu *port_pcpu;
4280 unsigned int thread;
4282 mvpp2_stop_dev(port);
4284 /* Mask interrupts on all threads */
4285 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4286 mvpp2_shared_interrupt_mask_unmask(port, true);
4289 phylink_disconnect_phy(port->phylink);
4291 free_irq(port->link_irq, port);
4293 mvpp2_irqs_deinit(port);
4294 if (!port->has_tx_irqs) {
4295 for (thread = 0; thread < port->priv->nthreads; thread++) {
4296 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4298 hrtimer_cancel(&port_pcpu->tx_done_timer);
4299 port_pcpu->timer_scheduled = false;
4302 mvpp2_cleanup_rxqs(port);
4303 mvpp2_cleanup_txqs(port);
4305 cancel_delayed_work_sync(&port->stats_work);
4307 mvpp2_mac_reset_assert(port);
4308 mvpp22_pcs_reset_assert(port);
4313 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4314 struct netdev_hw_addr_list *list)
4316 struct netdev_hw_addr *ha;
4319 netdev_hw_addr_list_for_each(ha, list) {
4320 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4328 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4330 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4331 mvpp2_prs_vid_enable_filtering(port);
4333 mvpp2_prs_vid_disable_filtering(port);
4335 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4336 MVPP2_PRS_L2_UNI_CAST, enable);
4338 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4339 MVPP2_PRS_L2_MULTI_CAST, enable);
4342 static void mvpp2_set_rx_mode(struct net_device *dev)
4344 struct mvpp2_port *port = netdev_priv(dev);
4346 /* Clear the whole UC and MC list */
4347 mvpp2_prs_mac_del_all(port);
4349 if (dev->flags & IFF_PROMISC) {
4350 mvpp2_set_rx_promisc(port, true);
4354 mvpp2_set_rx_promisc(port, false);
4356 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4357 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4358 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4359 MVPP2_PRS_L2_UNI_CAST, true);
4361 if (dev->flags & IFF_ALLMULTI) {
4362 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4363 MVPP2_PRS_L2_MULTI_CAST, true);
4367 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4368 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4369 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4370 MVPP2_PRS_L2_MULTI_CAST, true);
4373 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4375 const struct sockaddr *addr = p;
4378 if (!is_valid_ether_addr(addr->sa_data))
4379 return -EADDRNOTAVAIL;
4381 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4383 /* Reconfigure parser accept the original MAC address */
4384 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4385 netdev_err(dev, "failed to change MAC address\n");
4390 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4391 * then bring up again all ports.
4393 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4395 int numbufs = MVPP2_BM_POOLS_NUM, i;
4396 struct mvpp2_port *port = NULL;
4397 bool status[MVPP2_MAX_PORTS];
4399 for (i = 0; i < priv->port_count; i++) {
4400 port = priv->port_list[i];
4401 status[i] = netif_running(port->dev);
4403 mvpp2_stop(port->dev);
4406 /* nrxqs is the same for all ports */
4407 if (priv->percpu_pools)
4408 numbufs = port->nrxqs * 2;
4410 for (i = 0; i < numbufs; i++)
4411 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4413 devm_kfree(port->dev->dev.parent, priv->bm_pools);
4414 priv->percpu_pools = percpu;
4415 mvpp2_bm_init(port->dev->dev.parent, priv);
4417 for (i = 0; i < priv->port_count; i++) {
4418 port = priv->port_list[i];
4419 mvpp2_swf_bm_pool_init(port);
4421 mvpp2_open(port->dev);
4427 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
4429 struct mvpp2_port *port = netdev_priv(dev);
4430 bool running = netif_running(dev);
4431 struct mvpp2 *priv = port->priv;
4434 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
4435 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
4436 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
4437 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
4440 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
4441 if (port->xdp_prog) {
4442 netdev_err(dev, "Jumbo frames are not supported with XDP\n");
4445 if (priv->percpu_pools) {
4446 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
4447 mvpp2_bm_switch_buffers(priv, false);
4453 for (i = 0; i < priv->port_count; i++)
4454 if (priv->port_list[i] != port &&
4455 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
4456 MVPP2_BM_LONG_PKT_SIZE) {
4461 /* No port is using jumbo frames */
4463 dev_info(port->dev->dev.parent,
4464 "all ports have a low MTU, switching to per-cpu buffers");
4465 mvpp2_bm_switch_buffers(priv, true);
4470 mvpp2_stop_dev(port);
4472 err = mvpp2_bm_update_mtu(dev, mtu);
4474 netdev_err(dev, "failed to change MTU\n");
4475 /* Reconfigure BM to the original MTU */
4476 mvpp2_bm_update_mtu(dev, dev->mtu);
4478 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4482 mvpp2_start_dev(port);
4483 mvpp2_egress_enable(port);
4484 mvpp2_ingress_enable(port);
4490 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
4492 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
4493 struct mvpp2 *priv = port->priv;
4496 if (!priv->percpu_pools)
4499 if (!priv->page_pool[0])
4502 for (i = 0; i < priv->port_count; i++) {
4503 port = priv->port_list[i];
4504 if (port->xdp_prog) {
4505 dma_dir = DMA_BIDIRECTIONAL;
4510 /* All pools are equal in terms of DMA direction */
4511 if (priv->page_pool[0]->p.dma_dir != dma_dir)
4512 err = mvpp2_bm_switch_buffers(priv, true);
4518 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4520 struct mvpp2_port *port = netdev_priv(dev);
4524 for_each_possible_cpu(cpu) {
4525 struct mvpp2_pcpu_stats *cpu_stats;
4531 cpu_stats = per_cpu_ptr(port->stats, cpu);
4533 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4534 rx_packets = cpu_stats->rx_packets;
4535 rx_bytes = cpu_stats->rx_bytes;
4536 tx_packets = cpu_stats->tx_packets;
4537 tx_bytes = cpu_stats->tx_bytes;
4538 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4540 stats->rx_packets += rx_packets;
4541 stats->rx_bytes += rx_bytes;
4542 stats->tx_packets += tx_packets;
4543 stats->tx_bytes += tx_bytes;
4546 stats->rx_errors = dev->stats.rx_errors;
4547 stats->rx_dropped = dev->stats.rx_dropped;
4548 stats->tx_dropped = dev->stats.tx_dropped;
4551 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4553 struct mvpp2_port *port = netdev_priv(dev);
4558 return phylink_mii_ioctl(port->phylink, ifr, cmd);
4561 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4563 struct mvpp2_port *port = netdev_priv(dev);
4566 ret = mvpp2_prs_vid_entry_add(port, vid);
4568 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
4569 MVPP2_PRS_VLAN_FILT_MAX - 1);
4573 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4575 struct mvpp2_port *port = netdev_priv(dev);
4577 mvpp2_prs_vid_entry_remove(port, vid);
4581 static int mvpp2_set_features(struct net_device *dev,
4582 netdev_features_t features)
4584 netdev_features_t changed = dev->features ^ features;
4585 struct mvpp2_port *port = netdev_priv(dev);
4587 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4588 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4589 mvpp2_prs_vid_enable_filtering(port);
4591 /* Invalidate all registered VID filters for this
4594 mvpp2_prs_vid_remove_all(port);
4596 mvpp2_prs_vid_disable_filtering(port);
4600 if (changed & NETIF_F_RXHASH) {
4601 if (features & NETIF_F_RXHASH)
4602 mvpp22_port_rss_enable(port);
4604 mvpp22_port_rss_disable(port);
4610 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
4612 struct bpf_prog *prog = bpf->prog, *old_prog;
4613 bool running = netif_running(port->dev);
4614 bool reset = !prog != !port->xdp_prog;
4616 if (port->dev->mtu > ETH_DATA_LEN) {
4617 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
4621 if (!port->priv->percpu_pools) {
4622 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
4626 if (port->ntxqs < num_possible_cpus() * 2) {
4627 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
4631 /* device is up and bpf is added/removed, must setup the RX queues */
4632 if (running && reset)
4633 mvpp2_stop(port->dev);
4635 old_prog = xchg(&port->xdp_prog, prog);
4637 bpf_prog_put(old_prog);
4639 /* bpf is just replaced, RXQ and MTU are already setup */
4643 /* device was up, restore the link */
4645 mvpp2_open(port->dev);
4647 /* Check Page Pool DMA Direction */
4648 mvpp2_check_pagepool_dma(port);
4653 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4655 struct mvpp2_port *port = netdev_priv(dev);
4657 switch (xdp->command) {
4658 case XDP_SETUP_PROG:
4659 return mvpp2_xdp_setup(port, xdp);
4665 /* Ethtool methods */
4667 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4669 struct mvpp2_port *port = netdev_priv(dev);
4674 return phylink_ethtool_nway_reset(port->phylink);
4677 /* Set interrupt coalescing for ethtools */
4678 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4679 struct ethtool_coalesce *c)
4681 struct mvpp2_port *port = netdev_priv(dev);
4684 for (queue = 0; queue < port->nrxqs; queue++) {
4685 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4687 rxq->time_coal = c->rx_coalesce_usecs;
4688 rxq->pkts_coal = c->rx_max_coalesced_frames;
4689 mvpp2_rx_pkts_coal_set(port, rxq);
4690 mvpp2_rx_time_coal_set(port, rxq);
4693 if (port->has_tx_irqs) {
4694 port->tx_time_coal = c->tx_coalesce_usecs;
4695 mvpp2_tx_time_coal_set(port);
4698 for (queue = 0; queue < port->ntxqs; queue++) {
4699 struct mvpp2_tx_queue *txq = port->txqs[queue];
4701 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4703 if (port->has_tx_irqs)
4704 mvpp2_tx_pkts_coal_set(port, txq);
4710 /* get coalescing for ethtools */
4711 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
4712 struct ethtool_coalesce *c)
4714 struct mvpp2_port *port = netdev_priv(dev);
4716 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
4717 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
4718 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
4719 c->tx_coalesce_usecs = port->tx_time_coal;
4723 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
4724 struct ethtool_drvinfo *drvinfo)
4726 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
4727 sizeof(drvinfo->driver));
4728 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
4729 sizeof(drvinfo->version));
4730 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4731 sizeof(drvinfo->bus_info));
4734 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
4735 struct ethtool_ringparam *ring)
4737 struct mvpp2_port *port = netdev_priv(dev);
4739 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
4740 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
4741 ring->rx_pending = port->rx_ring_size;
4742 ring->tx_pending = port->tx_ring_size;
4745 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
4746 struct ethtool_ringparam *ring)
4748 struct mvpp2_port *port = netdev_priv(dev);
4749 u16 prev_rx_ring_size = port->rx_ring_size;
4750 u16 prev_tx_ring_size = port->tx_ring_size;
4753 err = mvpp2_check_ringparam_valid(dev, ring);
4757 if (!netif_running(dev)) {
4758 port->rx_ring_size = ring->rx_pending;
4759 port->tx_ring_size = ring->tx_pending;
4763 /* The interface is running, so we have to force a
4764 * reallocation of the queues
4766 mvpp2_stop_dev(port);
4767 mvpp2_cleanup_rxqs(port);
4768 mvpp2_cleanup_txqs(port);
4770 port->rx_ring_size = ring->rx_pending;
4771 port->tx_ring_size = ring->tx_pending;
4773 err = mvpp2_setup_rxqs(port);
4775 /* Reallocate Rx queues with the original ring size */
4776 port->rx_ring_size = prev_rx_ring_size;
4777 ring->rx_pending = prev_rx_ring_size;
4778 err = mvpp2_setup_rxqs(port);
4782 err = mvpp2_setup_txqs(port);
4784 /* Reallocate Tx queues with the original ring size */
4785 port->tx_ring_size = prev_tx_ring_size;
4786 ring->tx_pending = prev_tx_ring_size;
4787 err = mvpp2_setup_txqs(port);
4789 goto err_clean_rxqs;
4792 mvpp2_start_dev(port);
4793 mvpp2_egress_enable(port);
4794 mvpp2_ingress_enable(port);
4799 mvpp2_cleanup_rxqs(port);
4801 netdev_err(dev, "failed to change ring parameters");
4805 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
4806 struct ethtool_pauseparam *pause)
4808 struct mvpp2_port *port = netdev_priv(dev);
4813 phylink_ethtool_get_pauseparam(port->phylink, pause);
4816 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
4817 struct ethtool_pauseparam *pause)
4819 struct mvpp2_port *port = netdev_priv(dev);
4824 return phylink_ethtool_set_pauseparam(port->phylink, pause);
4827 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
4828 struct ethtool_link_ksettings *cmd)
4830 struct mvpp2_port *port = netdev_priv(dev);
4835 return phylink_ethtool_ksettings_get(port->phylink, cmd);
4838 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
4839 const struct ethtool_link_ksettings *cmd)
4841 struct mvpp2_port *port = netdev_priv(dev);
4846 return phylink_ethtool_ksettings_set(port->phylink, cmd);
4849 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
4850 struct ethtool_rxnfc *info, u32 *rules)
4852 struct mvpp2_port *port = netdev_priv(dev);
4853 int ret = 0, i, loc = 0;
4855 if (!mvpp22_rss_is_supported())
4858 switch (info->cmd) {
4860 ret = mvpp2_ethtool_rxfh_get(port, info);
4862 case ETHTOOL_GRXRINGS:
4863 info->data = port->nrxqs;
4865 case ETHTOOL_GRXCLSRLCNT:
4866 info->rule_cnt = port->n_rfs_rules;
4868 case ETHTOOL_GRXCLSRULE:
4869 ret = mvpp2_ethtool_cls_rule_get(port, info);
4871 case ETHTOOL_GRXCLSRLALL:
4872 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
4873 if (port->rfs_rules[i])
4884 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
4885 struct ethtool_rxnfc *info)
4887 struct mvpp2_port *port = netdev_priv(dev);
4890 if (!mvpp22_rss_is_supported())
4893 switch (info->cmd) {
4895 ret = mvpp2_ethtool_rxfh_set(port, info);
4897 case ETHTOOL_SRXCLSRLINS:
4898 ret = mvpp2_ethtool_cls_rule_ins(port, info);
4900 case ETHTOOL_SRXCLSRLDEL:
4901 ret = mvpp2_ethtool_cls_rule_del(port, info);
4909 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
4911 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
4914 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4917 struct mvpp2_port *port = netdev_priv(dev);
4920 if (!mvpp22_rss_is_supported())
4924 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
4927 *hfunc = ETH_RSS_HASH_CRC32;
4932 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4933 const u8 *key, const u8 hfunc)
4935 struct mvpp2_port *port = netdev_priv(dev);
4938 if (!mvpp22_rss_is_supported())
4941 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4948 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
4953 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
4954 u8 *key, u8 *hfunc, u32 rss_context)
4956 struct mvpp2_port *port = netdev_priv(dev);
4959 if (!mvpp22_rss_is_supported())
4961 if (rss_context >= MVPP22_N_RSS_TABLES)
4965 *hfunc = ETH_RSS_HASH_CRC32;
4968 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
4973 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
4974 const u32 *indir, const u8 *key,
4975 const u8 hfunc, u32 *rss_context,
4978 struct mvpp2_port *port = netdev_priv(dev);
4981 if (!mvpp22_rss_is_supported())
4984 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4991 return mvpp22_port_rss_ctx_delete(port, *rss_context);
4993 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
4994 ret = mvpp22_port_rss_ctx_create(port, rss_context);
4999 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5003 static const struct net_device_ops mvpp2_netdev_ops = {
5004 .ndo_open = mvpp2_open,
5005 .ndo_stop = mvpp2_stop,
5006 .ndo_start_xmit = mvpp2_tx,
5007 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5008 .ndo_set_mac_address = mvpp2_set_mac_address,
5009 .ndo_change_mtu = mvpp2_change_mtu,
5010 .ndo_get_stats64 = mvpp2_get_stats64,
5011 .ndo_do_ioctl = mvpp2_ioctl,
5012 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5013 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5014 .ndo_set_features = mvpp2_set_features,
5015 .ndo_bpf = mvpp2_xdp,
5016 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5019 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5020 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5021 ETHTOOL_COALESCE_MAX_FRAMES,
5022 .nway_reset = mvpp2_ethtool_nway_reset,
5023 .get_link = ethtool_op_get_link,
5024 .set_coalesce = mvpp2_ethtool_set_coalesce,
5025 .get_coalesce = mvpp2_ethtool_get_coalesce,
5026 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5027 .get_ringparam = mvpp2_ethtool_get_ringparam,
5028 .set_ringparam = mvpp2_ethtool_set_ringparam,
5029 .get_strings = mvpp2_ethtool_get_strings,
5030 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5031 .get_sset_count = mvpp2_ethtool_get_sset_count,
5032 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5033 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5034 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5035 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5036 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5037 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5038 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5039 .get_rxfh = mvpp2_ethtool_get_rxfh,
5040 .set_rxfh = mvpp2_ethtool_set_rxfh,
5041 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5042 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
5045 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5046 * had a single IRQ defined per-port.
5048 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5049 struct device_node *port_node)
5051 struct mvpp2_queue_vector *v = &port->qvecs[0];
5054 v->nrxqs = port->nrxqs;
5055 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5056 v->sw_thread_id = 0;
5057 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5059 v->irq = irq_of_parse_and_map(port_node, 0);
5062 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5070 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5071 struct device_node *port_node)
5073 struct mvpp2 *priv = port->priv;
5074 struct mvpp2_queue_vector *v;
5077 switch (queue_mode) {
5078 case MVPP2_QDIST_SINGLE_MODE:
5079 port->nqvecs = priv->nthreads + 1;
5081 case MVPP2_QDIST_MULTI_MODE:
5082 port->nqvecs = priv->nthreads;
5086 for (i = 0; i < port->nqvecs; i++) {
5089 v = port->qvecs + i;
5092 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5093 v->sw_thread_id = i;
5094 v->sw_thread_mask = BIT(i);
5096 if (port->flags & MVPP2_F_DT_COMPAT)
5097 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5099 snprintf(irqname, sizeof(irqname), "hif%d", i);
5101 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5104 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5105 i == (port->nqvecs - 1)) {
5107 v->nrxqs = port->nrxqs;
5108 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5110 if (port->flags & MVPP2_F_DT_COMPAT)
5111 strncpy(irqname, "rx-shared", sizeof(irqname));
5115 v->irq = of_irq_get_byname(port_node, irqname);
5117 v->irq = fwnode_irq_get(port->fwnode, i);
5123 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5130 for (i = 0; i < port->nqvecs; i++)
5131 irq_dispose_mapping(port->qvecs[i].irq);
5135 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5136 struct device_node *port_node)
5138 if (port->has_tx_irqs)
5139 return mvpp2_multi_queue_vectors_init(port, port_node);
5141 return mvpp2_simple_queue_vectors_init(port, port_node);
5144 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5148 for (i = 0; i < port->nqvecs; i++)
5149 irq_dispose_mapping(port->qvecs[i].irq);
5152 /* Configure Rx queue group interrupt for this port */
5153 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5155 struct mvpp2 *priv = port->priv;
5159 if (priv->hw_version == MVPP21) {
5160 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5165 /* Handle the more complicated PPv2.2 case */
5166 for (i = 0; i < port->nqvecs; i++) {
5167 struct mvpp2_queue_vector *qv = port->qvecs + i;
5172 val = qv->sw_thread_id;
5173 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5174 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5176 val = qv->first_rxq;
5177 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5178 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5182 /* Initialize port HW */
5183 static int mvpp2_port_init(struct mvpp2_port *port)
5185 struct device *dev = port->dev->dev.parent;
5186 struct mvpp2 *priv = port->priv;
5187 struct mvpp2_txq_pcpu *txq_pcpu;
5188 unsigned int thread;
5191 /* Checks for hardware constraints */
5192 if (port->first_rxq + port->nrxqs >
5193 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5196 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5200 mvpp2_egress_disable(port);
5201 mvpp2_port_disable(port);
5203 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5205 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5210 /* Associate physical Tx queues to this port and initialize.
5211 * The mapping is predefined.
5213 for (queue = 0; queue < port->ntxqs; queue++) {
5214 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5215 struct mvpp2_tx_queue *txq;
5217 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5220 goto err_free_percpu;
5223 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5226 goto err_free_percpu;
5229 txq->id = queue_phy_id;
5230 txq->log_id = queue;
5231 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5232 for (thread = 0; thread < priv->nthreads; thread++) {
5233 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5234 txq_pcpu->thread = thread;
5237 port->txqs[queue] = txq;
5240 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5244 goto err_free_percpu;
5247 /* Allocate and initialize Rx queue for this port */
5248 for (queue = 0; queue < port->nrxqs; queue++) {
5249 struct mvpp2_rx_queue *rxq;
5251 /* Map physical Rx queue to port's logical Rx queue */
5252 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5255 goto err_free_percpu;
5257 /* Map this Rx queue to a physical queue */
5258 rxq->id = port->first_rxq + queue;
5259 rxq->port = port->id;
5260 rxq->logic_rxq = queue;
5262 port->rxqs[queue] = rxq;
5265 mvpp2_rx_irqs_setup(port);
5267 /* Create Rx descriptor rings */
5268 for (queue = 0; queue < port->nrxqs; queue++) {
5269 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5271 rxq->size = port->rx_ring_size;
5272 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5273 rxq->time_coal = MVPP2_RX_COAL_USEC;
5276 mvpp2_ingress_disable(port);
5278 /* Port default configuration */
5279 mvpp2_defaults_set(port);
5281 /* Port's classifier configuration */
5282 mvpp2_cls_oversize_rxq_set(port);
5283 mvpp2_cls_port_config(port);
5285 if (mvpp22_rss_is_supported())
5286 mvpp22_port_rss_init(port);
5288 /* Provide an initial Rx packet size */
5289 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5291 /* Initialize pools for swf */
5292 err = mvpp2_swf_bm_pool_init(port);
5294 goto err_free_percpu;
5296 /* Clear all port stats */
5297 mvpp2_read_stats(port);
5298 memset(port->ethtool_stats, 0,
5299 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
5304 for (queue = 0; queue < port->ntxqs; queue++) {
5305 if (!port->txqs[queue])
5307 free_percpu(port->txqs[queue]->pcpu);
5312 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
5313 unsigned long *flags)
5315 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
5319 for (i = 0; i < 5; i++)
5320 if (of_property_match_string(port_node, "interrupt-names",
5324 *flags |= MVPP2_F_DT_COMPAT;
5328 /* Checks if the port dt description has the required Tx interrupts:
5329 * - PPv2.1: there are no such interrupts.
5331 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
5332 * - The new ones have: "hifX" with X in [0..8]
5334 * All those variants are supported to keep the backward compatibility.
5336 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
5337 struct device_node *port_node,
5338 unsigned long *flags)
5347 if (priv->hw_version == MVPP21)
5350 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
5353 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5354 snprintf(name, 5, "hif%d", i);
5355 if (of_property_match_string(port_node, "interrupt-names",
5363 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
5364 struct fwnode_handle *fwnode,
5367 struct mvpp2_port *port = netdev_priv(dev);
5368 char hw_mac_addr[ETH_ALEN] = {0};
5369 char fw_mac_addr[ETH_ALEN];
5371 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
5372 *mac_from = "firmware node";
5373 ether_addr_copy(dev->dev_addr, fw_mac_addr);
5377 if (priv->hw_version == MVPP21) {
5378 mvpp21_get_mac_address(port, hw_mac_addr);
5379 if (is_valid_ether_addr(hw_mac_addr)) {
5380 *mac_from = "hardware";
5381 ether_addr_copy(dev->dev_addr, hw_mac_addr);
5386 *mac_from = "random";
5387 eth_hw_addr_random(dev);
5390 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
5392 return container_of(config, struct mvpp2_port, phylink_config);
5395 static void mvpp2_phylink_validate(struct phylink_config *config,
5396 unsigned long *supported,
5397 struct phylink_link_state *state)
5399 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5400 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5402 /* Invalid combinations */
5403 switch (state->interface) {
5404 case PHY_INTERFACE_MODE_10GBASER:
5405 case PHY_INTERFACE_MODE_XAUI:
5406 if (!mvpp2_port_supports_xlg(port))
5409 case PHY_INTERFACE_MODE_RGMII:
5410 case PHY_INTERFACE_MODE_RGMII_ID:
5411 case PHY_INTERFACE_MODE_RGMII_RXID:
5412 case PHY_INTERFACE_MODE_RGMII_TXID:
5413 if (!mvpp2_port_supports_rgmii(port))
5420 phylink_set(mask, Autoneg);
5421 phylink_set_port_modes(mask);
5422 phylink_set(mask, Pause);
5423 phylink_set(mask, Asym_Pause);
5425 switch (state->interface) {
5426 case PHY_INTERFACE_MODE_10GBASER:
5427 case PHY_INTERFACE_MODE_XAUI:
5428 case PHY_INTERFACE_MODE_NA:
5429 if (mvpp2_port_supports_xlg(port)) {
5430 phylink_set(mask, 10000baseT_Full);
5431 phylink_set(mask, 10000baseCR_Full);
5432 phylink_set(mask, 10000baseSR_Full);
5433 phylink_set(mask, 10000baseLR_Full);
5434 phylink_set(mask, 10000baseLRM_Full);
5435 phylink_set(mask, 10000baseER_Full);
5436 phylink_set(mask, 10000baseKR_Full);
5438 if (state->interface != PHY_INTERFACE_MODE_NA)
5441 case PHY_INTERFACE_MODE_RGMII:
5442 case PHY_INTERFACE_MODE_RGMII_ID:
5443 case PHY_INTERFACE_MODE_RGMII_RXID:
5444 case PHY_INTERFACE_MODE_RGMII_TXID:
5445 case PHY_INTERFACE_MODE_SGMII:
5446 phylink_set(mask, 10baseT_Half);
5447 phylink_set(mask, 10baseT_Full);
5448 phylink_set(mask, 100baseT_Half);
5449 phylink_set(mask, 100baseT_Full);
5450 phylink_set(mask, 1000baseT_Full);
5451 phylink_set(mask, 1000baseX_Full);
5452 if (state->interface != PHY_INTERFACE_MODE_NA)
5455 case PHY_INTERFACE_MODE_1000BASEX:
5456 case PHY_INTERFACE_MODE_2500BASEX:
5458 state->interface != PHY_INTERFACE_MODE_2500BASEX) {
5459 phylink_set(mask, 1000baseT_Full);
5460 phylink_set(mask, 1000baseX_Full);
5463 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
5464 phylink_set(mask, 2500baseT_Full);
5465 phylink_set(mask, 2500baseX_Full);
5472 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
5473 bitmap_and(state->advertising, state->advertising, mask,
5474 __ETHTOOL_LINK_MODE_MASK_NBITS);
5476 phylink_helper_basex_speed(state);
5480 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
5483 static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
5484 struct phylink_link_state *state)
5488 state->speed = SPEED_10000;
5490 state->an_complete = 1;
5492 val = readl(port->base + MVPP22_XLG_STATUS);
5493 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
5496 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5497 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
5498 state->pause |= MLO_PAUSE_TX;
5499 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
5500 state->pause |= MLO_PAUSE_RX;
5503 static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
5504 struct phylink_link_state *state)
5508 val = readl(port->base + MVPP2_GMAC_STATUS0);
5510 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
5511 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
5512 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
5514 switch (port->phy_interface) {
5515 case PHY_INTERFACE_MODE_1000BASEX:
5516 state->speed = SPEED_1000;
5518 case PHY_INTERFACE_MODE_2500BASEX:
5519 state->speed = SPEED_2500;
5522 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
5523 state->speed = SPEED_1000;
5524 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
5525 state->speed = SPEED_100;
5527 state->speed = SPEED_10;
5531 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
5532 state->pause |= MLO_PAUSE_RX;
5533 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
5534 state->pause |= MLO_PAUSE_TX;
5537 static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
5538 struct phylink_link_state *state)
5540 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5542 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
5543 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
5544 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
5546 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
5547 mvpp22_xlg_pcs_get_state(port, state);
5552 mvpp2_gmac_pcs_get_state(port, state);
5555 static void mvpp2_mac_an_restart(struct phylink_config *config)
5557 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5558 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5560 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
5561 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5562 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
5563 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5566 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
5567 const struct phylink_link_state *state)
5571 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5572 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
5573 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5574 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
5575 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
5576 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
5577 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
5578 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
5580 /* Wait for reset to deassert */
5582 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5583 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
5586 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
5587 const struct phylink_link_state *state)
5590 u32 old_ctrl0, ctrl0;
5591 u32 old_ctrl2, ctrl2;
5592 u32 old_ctrl4, ctrl4;
5594 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5595 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5596 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5597 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5599 an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
5600 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5601 MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
5602 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
5603 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5604 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
5605 MVPP2_GMAC_PCS_ENABLE_MASK);
5607 /* Configure port type */
5608 if (phy_interface_mode_is_8023z(state->interface)) {
5609 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
5610 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5611 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5612 MVPP22_CTRL4_DP_CLK_SEL |
5613 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5614 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5615 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
5616 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5617 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5618 MVPP22_CTRL4_DP_CLK_SEL |
5619 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5620 } else if (phy_interface_mode_is_rgmii(state->interface)) {
5621 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
5622 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5623 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5624 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5627 /* Configure advertisement bits */
5628 if (phylink_test(state->advertising, Pause))
5629 an |= MVPP2_GMAC_FC_ADV_EN;
5630 if (phylink_test(state->advertising, Asym_Pause))
5631 an |= MVPP2_GMAC_FC_ADV_ASM_EN;
5633 /* Configure negotiation style */
5634 if (!phylink_autoneg_inband(mode)) {
5635 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
5636 * configured speed, duplex and flow control as-is.
5638 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5639 /* SGMII in-band mode receives the speed and duplex from
5640 * the PHY. Flow control information is not received. */
5641 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
5642 MVPP2_GMAC_FORCE_LINK_PASS |
5643 MVPP2_GMAC_CONFIG_MII_SPEED |
5644 MVPP2_GMAC_CONFIG_GMII_SPEED |
5645 MVPP2_GMAC_CONFIG_FULL_DUPLEX);
5646 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5647 MVPP2_GMAC_AN_SPEED_EN |
5648 MVPP2_GMAC_AN_DUPLEX_EN;
5649 } else if (phy_interface_mode_is_8023z(state->interface)) {
5650 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
5651 * they negotiate duplex: they are always operating with a fixed
5652 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
5653 * speed and full duplex here.
5655 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
5656 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
5657 MVPP2_GMAC_FORCE_LINK_PASS |
5658 MVPP2_GMAC_CONFIG_MII_SPEED |
5659 MVPP2_GMAC_CONFIG_GMII_SPEED |
5660 MVPP2_GMAC_CONFIG_FULL_DUPLEX);
5661 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
5662 MVPP2_GMAC_CONFIG_GMII_SPEED |
5663 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5665 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
5666 an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5669 /* Some fields of the auto-negotiation register require the port to be down when
5670 * their value is updated.
5672 #define MVPP2_GMAC_AN_PORT_DOWN_MASK \
5673 (MVPP2_GMAC_IN_BAND_AUTONEG | \
5674 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
5675 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
5676 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
5677 MVPP2_GMAC_AN_DUPLEX_EN)
5679 if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
5680 (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
5681 (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
5682 /* Force link down */
5683 old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5684 old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
5685 writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5687 /* Set the GMAC in a reset state - do this in a way that
5688 * ensures we clear it below.
5690 old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
5691 writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5694 if (old_ctrl0 != ctrl0)
5695 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
5696 if (old_ctrl2 != ctrl2)
5697 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5698 if (old_ctrl4 != ctrl4)
5699 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
5701 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5703 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
5704 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5705 MVPP2_GMAC_PORT_RESET_MASK)
5710 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
5711 const struct phylink_link_state *state)
5713 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5714 bool change_interface = port->phy_interface != state->interface;
5716 /* Check for invalid configuration */
5717 if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
5718 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
5722 /* Make sure the port is disabled when reconfiguring the mode */
5723 mvpp2_port_disable(port);
5725 if (port->priv->hw_version == MVPP22 && change_interface) {
5726 mvpp22_gop_mask_irq(port);
5728 port->phy_interface = state->interface;
5730 /* Reconfigure the serdes lanes */
5731 phy_power_off(port->comphy);
5732 mvpp22_mode_reconfigure(port);
5735 /* mac (re)configuration */
5736 if (mvpp2_is_xlg(state->interface))
5737 mvpp2_xlg_config(port, mode, state);
5738 else if (phy_interface_mode_is_rgmii(state->interface) ||
5739 phy_interface_mode_is_8023z(state->interface) ||
5740 state->interface == PHY_INTERFACE_MODE_SGMII)
5741 mvpp2_gmac_config(port, mode, state);
5743 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
5744 mvpp2_port_loopback_set(port, state);
5746 if (port->priv->hw_version == MVPP22 && change_interface)
5747 mvpp22_gop_unmask_irq(port);
5749 mvpp2_port_enable(port);
5752 static void mvpp2_mac_link_up(struct phylink_config *config,
5753 struct phy_device *phy,
5754 unsigned int mode, phy_interface_t interface,
5755 int speed, int duplex,
5756 bool tx_pause, bool rx_pause)
5758 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5761 if (mvpp2_is_xlg(interface)) {
5762 if (!phylink_autoneg_inband(mode)) {
5763 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5765 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
5767 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5769 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5770 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
5771 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
5772 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
5773 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
5776 if (!phylink_autoneg_inband(mode)) {
5777 val = MVPP2_GMAC_FORCE_LINK_PASS;
5779 if (speed == SPEED_1000 || speed == SPEED_2500)
5780 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5781 else if (speed == SPEED_100)
5782 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5784 if (duplex == DUPLEX_FULL)
5785 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5787 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
5788 MVPP2_GMAC_FORCE_LINK_DOWN |
5789 MVPP2_GMAC_FORCE_LINK_PASS |
5790 MVPP2_GMAC_CONFIG_MII_SPEED |
5791 MVPP2_GMAC_CONFIG_GMII_SPEED |
5792 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
5795 /* We can always update the flow control enable bits;
5796 * these will only be effective if flow control AN
5797 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
5801 val |= MVPP22_CTRL4_TX_FC_EN;
5803 val |= MVPP22_CTRL4_RX_FC_EN;
5805 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
5806 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
5810 mvpp2_port_enable(port);
5812 mvpp2_egress_enable(port);
5813 mvpp2_ingress_enable(port);
5814 netif_tx_wake_all_queues(port->dev);
5817 static void mvpp2_mac_link_down(struct phylink_config *config,
5818 unsigned int mode, phy_interface_t interface)
5820 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5823 if (!phylink_autoneg_inband(mode)) {
5824 if (mvpp2_is_xlg(interface)) {
5825 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5826 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5827 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5828 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5830 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5831 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5832 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5833 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5837 netif_tx_stop_all_queues(port->dev);
5838 mvpp2_egress_disable(port);
5839 mvpp2_ingress_disable(port);
5841 mvpp2_port_disable(port);
5844 static const struct phylink_mac_ops mvpp2_phylink_ops = {
5845 .validate = mvpp2_phylink_validate,
5846 .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
5847 .mac_an_restart = mvpp2_mac_an_restart,
5848 .mac_config = mvpp2_mac_config,
5849 .mac_link_up = mvpp2_mac_link_up,
5850 .mac_link_down = mvpp2_mac_link_down,
5853 /* Ports initialization */
5854 static int mvpp2_port_probe(struct platform_device *pdev,
5855 struct fwnode_handle *port_fwnode,
5858 struct phy *comphy = NULL;
5859 struct mvpp2_port *port;
5860 struct mvpp2_port_pcpu *port_pcpu;
5861 struct device_node *port_node = to_of_node(port_fwnode);
5862 netdev_features_t features;
5863 struct net_device *dev;
5864 struct phylink *phylink;
5865 char *mac_from = "";
5866 unsigned int ntxqs, nrxqs, thread;
5867 unsigned long flags = 0;
5873 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
5874 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
5876 "not enough IRQs to support multi queue mode\n");
5880 ntxqs = MVPP2_MAX_TXQ;
5881 nrxqs = mvpp2_get_nrxqs(priv);
5883 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
5887 phy_mode = fwnode_get_phy_mode(port_fwnode);
5889 dev_err(&pdev->dev, "incorrect phy mode\n");
5891 goto err_free_netdev;
5895 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
5896 * Existing usage of 10GBASE-KR is not correct; no backplane
5897 * negotiation is done, and this driver does not actually support
5900 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
5901 phy_mode = PHY_INTERFACE_MODE_10GBASER;
5904 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
5905 if (IS_ERR(comphy)) {
5906 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
5907 err = -EPROBE_DEFER;
5908 goto err_free_netdev;
5914 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
5916 dev_err(&pdev->dev, "missing port-id value\n");
5917 goto err_free_netdev;
5920 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
5921 dev->watchdog_timeo = 5 * HZ;
5922 dev->netdev_ops = &mvpp2_netdev_ops;
5923 dev->ethtool_ops = &mvpp2_eth_tool_ops;
5925 port = netdev_priv(dev);
5927 port->fwnode = port_fwnode;
5928 port->has_phy = !!of_find_property(port_node, "phy", NULL);
5929 port->ntxqs = ntxqs;
5930 port->nrxqs = nrxqs;
5932 port->has_tx_irqs = has_tx_irqs;
5933 port->flags = flags;
5935 err = mvpp2_queue_vectors_init(port, port_node);
5937 goto err_free_netdev;
5940 port->link_irq = of_irq_get_byname(port_node, "link");
5942 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
5943 if (port->link_irq == -EPROBE_DEFER) {
5944 err = -EPROBE_DEFER;
5945 goto err_deinit_qvecs;
5947 if (port->link_irq <= 0)
5948 /* the link irq is optional */
5951 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
5952 port->flags |= MVPP2_F_LOOPBACK;
5955 if (priv->hw_version == MVPP21)
5956 port->first_rxq = port->id * port->nrxqs;
5958 port->first_rxq = port->id * priv->max_port_rxqs;
5960 port->of_node = port_node;
5961 port->phy_interface = phy_mode;
5962 port->comphy = comphy;
5964 if (priv->hw_version == MVPP21) {
5965 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
5966 if (IS_ERR(port->base)) {
5967 err = PTR_ERR(port->base);
5971 port->stats_base = port->priv->lms_base +
5972 MVPP21_MIB_COUNTERS_OFFSET +
5973 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
5975 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
5978 dev_err(&pdev->dev, "missing gop-port-id value\n");
5979 goto err_deinit_qvecs;
5982 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
5983 port->stats_base = port->priv->iface_base +
5984 MVPP22_MIB_COUNTERS_OFFSET +
5985 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
5988 /* Alloc per-cpu and ethtool stats */
5989 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
5995 port->ethtool_stats = devm_kcalloc(&pdev->dev,
5996 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
5997 sizeof(u64), GFP_KERNEL);
5998 if (!port->ethtool_stats) {
6000 goto err_free_stats;
6003 mutex_init(&port->gather_stats_lock);
6004 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6006 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6008 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6009 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6010 SET_NETDEV_DEV(dev, &pdev->dev);
6012 err = mvpp2_port_init(port);
6014 dev_err(&pdev->dev, "failed to init port %d\n", id);
6015 goto err_free_stats;
6018 mvpp2_port_periodic_xon_disable(port);
6020 mvpp2_mac_reset_assert(port);
6021 mvpp22_pcs_reset_assert(port);
6023 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6026 goto err_free_txq_pcpu;
6029 if (!port->has_tx_irqs) {
6030 for (thread = 0; thread < priv->nthreads; thread++) {
6031 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6033 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6034 HRTIMER_MODE_REL_PINNED_SOFT);
6035 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6036 port_pcpu->timer_scheduled = false;
6037 port_pcpu->dev = dev;
6041 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6043 dev->features = features | NETIF_F_RXCSUM;
6044 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6045 NETIF_F_HW_VLAN_CTAG_FILTER;
6047 if (mvpp22_rss_is_supported()) {
6048 dev->hw_features |= NETIF_F_RXHASH;
6049 dev->features |= NETIF_F_NTUPLE;
6052 if (!port->priv->percpu_pools)
6053 mvpp2_set_hw_csum(port, port->pool_long->id);
6055 dev->vlan_features |= features;
6056 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
6057 dev->priv_flags |= IFF_UNICAST_FLT;
6059 /* MTU range: 68 - 9704 */
6060 dev->min_mtu = ETH_MIN_MTU;
6061 /* 9704 == 9728 - 20 and rounding to 8 */
6062 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6063 dev->dev.of_node = port_node;
6065 /* Phylink isn't used w/ ACPI as of now */
6067 port->phylink_config.dev = &dev->dev;
6068 port->phylink_config.type = PHYLINK_NETDEV;
6070 phylink = phylink_create(&port->phylink_config, port_fwnode,
6071 phy_mode, &mvpp2_phylink_ops);
6072 if (IS_ERR(phylink)) {
6073 err = PTR_ERR(phylink);
6074 goto err_free_port_pcpu;
6076 port->phylink = phylink;
6078 port->phylink = NULL;
6081 /* Cycle the comphy to power it down, saving 270mW per port -
6082 * don't worry about an error powering it up. When the comphy
6083 * driver does this, we can remove this code.
6086 err = mvpp22_comphy_init(port);
6088 phy_power_off(port->comphy);
6091 err = register_netdev(dev);
6093 dev_err(&pdev->dev, "failed to register netdev\n");
6096 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6098 priv->port_list[priv->port_count++] = port;
6104 phylink_destroy(port->phylink);
6106 free_percpu(port->pcpu);
6108 for (i = 0; i < port->ntxqs; i++)
6109 free_percpu(port->txqs[i]->pcpu);
6111 free_percpu(port->stats);
6114 irq_dispose_mapping(port->link_irq);
6116 mvpp2_queue_vectors_deinit(port);
6122 /* Ports removal routine */
6123 static void mvpp2_port_remove(struct mvpp2_port *port)
6127 unregister_netdev(port->dev);
6129 phylink_destroy(port->phylink);
6130 free_percpu(port->pcpu);
6131 free_percpu(port->stats);
6132 for (i = 0; i < port->ntxqs; i++)
6133 free_percpu(port->txqs[i]->pcpu);
6134 mvpp2_queue_vectors_deinit(port);
6136 irq_dispose_mapping(port->link_irq);
6137 free_netdev(port->dev);
6140 /* Initialize decoding windows */
6141 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6147 for (i = 0; i < 6; i++) {
6148 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6149 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6152 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6157 for (i = 0; i < dram->num_cs; i++) {
6158 const struct mbus_dram_window *cs = dram->cs + i;
6160 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6161 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6162 dram->mbus_dram_target_id);
6164 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6165 (cs->size - 1) & 0xffff0000);
6167 win_enable |= (1 << i);
6170 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6173 /* Initialize Rx FIFO's */
6174 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6178 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6179 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6180 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6181 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6182 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6185 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6186 MVPP2_RX_FIFO_PORT_MIN_PKT);
6187 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6190 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
6194 /* The FIFO size parameters are set depending on the maximum speed a
6195 * given port can handle:
6198 * - Ports 2 and 3: 1Gbps
6201 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
6202 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
6203 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
6204 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
6206 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
6207 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
6208 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
6209 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
6211 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
6212 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6213 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6214 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6215 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6218 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6219 MVPP2_RX_FIFO_PORT_MIN_PKT);
6220 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6223 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
6224 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
6225 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
6227 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
6229 int port, size, thrs;
6231 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6233 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
6234 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
6236 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
6237 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
6239 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
6240 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
6244 static void mvpp2_axi_init(struct mvpp2 *priv)
6246 u32 val, rdval, wrval;
6248 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6250 /* AXI Bridge Configuration */
6252 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6253 << MVPP22_AXI_ATTR_CACHE_OFFS;
6254 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6255 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6257 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6258 << MVPP22_AXI_ATTR_CACHE_OFFS;
6259 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6260 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6263 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6264 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6267 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6268 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6269 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6270 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6273 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6274 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6276 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6277 << MVPP22_AXI_CODE_CACHE_OFFS;
6278 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6279 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6280 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6281 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6283 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6284 << MVPP22_AXI_CODE_CACHE_OFFS;
6285 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6286 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6288 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6290 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6291 << MVPP22_AXI_CODE_CACHE_OFFS;
6292 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6293 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6295 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6298 /* Initialize network controller common part HW */
6299 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6301 const struct mbus_dram_target_info *dram_target_info;
6305 /* MBUS windows configuration */
6306 dram_target_info = mv_mbus_dram_info();
6307 if (dram_target_info)
6308 mvpp2_conf_mbus_windows(dram_target_info, priv);
6310 if (priv->hw_version == MVPP22)
6311 mvpp2_axi_init(priv);
6313 /* Disable HW PHY polling */
6314 if (priv->hw_version == MVPP21) {
6315 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6316 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6317 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6319 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6320 val &= ~MVPP22_SMI_POLLING_EN;
6321 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6324 /* Allocate and initialize aggregated TXQs */
6325 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
6326 sizeof(*priv->aggr_txqs),
6328 if (!priv->aggr_txqs)
6331 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6332 priv->aggr_txqs[i].id = i;
6333 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6334 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
6340 if (priv->hw_version == MVPP21) {
6341 mvpp2_rx_fifo_init(priv);
6343 mvpp22_rx_fifo_init(priv);
6344 mvpp22_tx_fifo_init(priv);
6347 if (priv->hw_version == MVPP21)
6348 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6349 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6351 /* Allow cache snoop when transmiting packets */
6352 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6354 /* Buffer Manager initialization */
6355 err = mvpp2_bm_init(&pdev->dev, priv);
6359 /* Parser default initialization */
6360 err = mvpp2_prs_default_init(pdev, priv);
6364 /* Classifier default initialization */
6365 mvpp2_cls_init(priv);
6370 static int mvpp2_probe(struct platform_device *pdev)
6372 const struct acpi_device_id *acpi_id;
6373 struct fwnode_handle *fwnode = pdev->dev.fwnode;
6374 struct fwnode_handle *port_fwnode;
6376 struct resource *res;
6381 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6385 if (has_acpi_companion(&pdev->dev)) {
6386 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
6390 priv->hw_version = (unsigned long)acpi_id->driver_data;
6393 (unsigned long)of_device_get_match_data(&pdev->dev);
6396 /* multi queue mode isn't supported on PPV2.1, fallback to single
6399 if (priv->hw_version == MVPP21)
6400 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6402 base = devm_platform_ioremap_resource(pdev, 0);
6404 return PTR_ERR(base);
6406 if (priv->hw_version == MVPP21) {
6407 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
6408 if (IS_ERR(priv->lms_base))
6409 return PTR_ERR(priv->lms_base);
6411 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6412 if (has_acpi_companion(&pdev->dev)) {
6413 /* In case the MDIO memory region is declared in
6414 * the ACPI, it can already appear as 'in-use'
6415 * in the OS. Because it is overlapped by second
6416 * region of the network controller, make
6417 * sure it is released, before requesting it again.
6418 * The care is taken by mvpp2 driver to avoid
6419 * concurrent access to this memory region.
6421 release_resource(res);
6423 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6424 if (IS_ERR(priv->iface_base))
6425 return PTR_ERR(priv->iface_base);
6428 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
6429 priv->sysctrl_base =
6430 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
6431 "marvell,system-controller");
6432 if (IS_ERR(priv->sysctrl_base))
6433 /* The system controller regmap is optional for dt
6434 * compatibility reasons. When not provided, the
6435 * configuration of the GoP relies on the
6436 * firmware/bootloader.
6438 priv->sysctrl_base = NULL;
6441 if (priv->hw_version == MVPP22 &&
6442 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
6443 priv->percpu_pools = 1;
6445 mvpp2_setup_bm_pool();
6448 priv->nthreads = min_t(unsigned int, num_present_cpus(),
6451 shared = num_present_cpus() - priv->nthreads;
6453 bitmap_fill(&priv->lock_map,
6454 min_t(int, shared, MVPP2_MAX_THREADS));
6456 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6459 addr_space_sz = (priv->hw_version == MVPP21 ?
6460 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6461 priv->swth_base[i] = base + i * addr_space_sz;
6464 if (priv->hw_version == MVPP21)
6465 priv->max_port_rxqs = 8;
6467 priv->max_port_rxqs = 32;
6469 if (dev_of_node(&pdev->dev)) {
6470 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6471 if (IS_ERR(priv->pp_clk))
6472 return PTR_ERR(priv->pp_clk);
6473 err = clk_prepare_enable(priv->pp_clk);
6477 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6478 if (IS_ERR(priv->gop_clk)) {
6479 err = PTR_ERR(priv->gop_clk);
6482 err = clk_prepare_enable(priv->gop_clk);
6486 if (priv->hw_version == MVPP22) {
6487 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6488 if (IS_ERR(priv->mg_clk)) {
6489 err = PTR_ERR(priv->mg_clk);
6493 err = clk_prepare_enable(priv->mg_clk);
6497 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
6498 if (IS_ERR(priv->mg_core_clk)) {
6499 priv->mg_core_clk = NULL;
6501 err = clk_prepare_enable(priv->mg_core_clk);
6507 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
6508 if (IS_ERR(priv->axi_clk)) {
6509 err = PTR_ERR(priv->axi_clk);
6510 if (err == -EPROBE_DEFER)
6511 goto err_mg_core_clk;
6512 priv->axi_clk = NULL;
6514 err = clk_prepare_enable(priv->axi_clk);
6516 goto err_mg_core_clk;
6519 /* Get system's tclk rate */
6520 priv->tclk = clk_get_rate(priv->pp_clk);
6521 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
6523 dev_err(&pdev->dev, "missing clock-frequency value\n");
6527 if (priv->hw_version == MVPP22) {
6528 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
6531 /* Sadly, the BM pools all share the same register to
6532 * store the high 32 bits of their address. So they
6533 * must all have the same high 32 bits, which forces
6534 * us to restrict coherent memory to DMA_BIT_MASK(32).
6536 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6541 /* Initialize network controller */
6542 err = mvpp2_init(pdev, priv);
6544 dev_err(&pdev->dev, "failed to initialize controller\n");
6548 /* Initialize ports */
6549 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6550 err = mvpp2_port_probe(pdev, port_fwnode, priv);
6552 goto err_port_probe;
6555 if (priv->port_count == 0) {
6556 dev_err(&pdev->dev, "no ports enabled\n");
6561 /* Statistics must be gathered regularly because some of them (like
6562 * packets counters) are 32-bit registers and could overflow quite
6563 * quickly. For instance, a 10Gb link used at full bandwidth with the
6564 * smallest packets (64B) will overflow a 32-bit counter in less than
6565 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
6567 snprintf(priv->queue_name, sizeof(priv->queue_name),
6568 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
6569 priv->port_count > 1 ? "+" : "");
6570 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
6571 if (!priv->stats_queue) {
6573 goto err_port_probe;
6576 mvpp2_dbgfs_init(priv, pdev->name);
6578 platform_set_drvdata(pdev, priv);
6583 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6584 if (priv->port_list[i])
6585 mvpp2_port_remove(priv->port_list[i]);
6589 clk_disable_unprepare(priv->axi_clk);
6592 if (priv->hw_version == MVPP22)
6593 clk_disable_unprepare(priv->mg_core_clk);
6595 if (priv->hw_version == MVPP22)
6596 clk_disable_unprepare(priv->mg_clk);
6598 clk_disable_unprepare(priv->gop_clk);
6600 clk_disable_unprepare(priv->pp_clk);
6604 static int mvpp2_remove(struct platform_device *pdev)
6606 struct mvpp2 *priv = platform_get_drvdata(pdev);
6607 struct fwnode_handle *fwnode = pdev->dev.fwnode;
6608 int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
6609 struct fwnode_handle *port_fwnode;
6611 mvpp2_dbgfs_cleanup(priv);
6613 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6614 if (priv->port_list[i]) {
6615 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
6616 mvpp2_port_remove(priv->port_list[i]);
6621 destroy_workqueue(priv->stats_queue);
6623 if (priv->percpu_pools)
6624 poolnum = mvpp2_get_nrxqs(priv) * 2;
6626 for (i = 0; i < poolnum; i++) {
6627 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6629 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
6632 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6633 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6635 dma_free_coherent(&pdev->dev,
6636 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6638 aggr_txq->descs_dma);
6641 if (is_acpi_node(port_fwnode))
6644 clk_disable_unprepare(priv->axi_clk);
6645 clk_disable_unprepare(priv->mg_core_clk);
6646 clk_disable_unprepare(priv->mg_clk);
6647 clk_disable_unprepare(priv->pp_clk);
6648 clk_disable_unprepare(priv->gop_clk);
6653 static const struct of_device_id mvpp2_match[] = {
6655 .compatible = "marvell,armada-375-pp2",
6656 .data = (void *)MVPP21,
6659 .compatible = "marvell,armada-7k-pp22",
6660 .data = (void *)MVPP22,
6664 MODULE_DEVICE_TABLE(of, mvpp2_match);
6666 static const struct acpi_device_id mvpp2_acpi_match[] = {
6667 { "MRVL0110", MVPP22 },
6670 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
6672 static struct platform_driver mvpp2_driver = {
6673 .probe = mvpp2_probe,
6674 .remove = mvpp2_remove,
6676 .name = MVPP2_DRIVER_NAME,
6677 .of_match_table = mvpp2_match,
6678 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
6682 module_platform_driver(mvpp2_driver);
6684 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6685 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6686 MODULE_LICENSE("GPL v2");