1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
39 #include <linux/bpf_trace.h>
42 #include "mvpp2_prs.h"
43 #include "mvpp2_cls.h"
45 enum mvpp2_bm_pool_log_num {
55 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57 /* The prototype is added here to be used in start_dev when using ACPI. This
58 * will be removed once phylink is used for all modes (dt+ACPI).
60 static void mvpp2_acpi_start(struct mvpp2_port *port);
63 #define MVPP2_QDIST_SINGLE_MODE 0
64 #define MVPP2_QDIST_MULTI_MODE 1
66 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68 module_param(queue_mode, int, 0444);
69 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71 /* Utility/helper methods */
73 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75 writel(data, priv->swth_base[0] + offset);
78 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80 return readl(priv->swth_base[0] + offset);
83 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85 return readl_relaxed(priv->swth_base[0] + offset);
88 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90 return cpu % priv->nthreads;
93 static struct page_pool *
94 mvpp2_create_page_pool(struct device *dev, int num, int len,
95 enum dma_data_direction dma_dir)
97 struct page_pool_params pp_params = {
98 /* internal DMA mapping in page_pool */
99 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
104 .offset = MVPP2_SKB_HEADROOM,
108 return page_pool_create(&pp_params);
111 /* These accessors should be used to access:
113 * - per-thread registers, where each thread has its own copy of the
116 * MVPP2_BM_VIRT_ALLOC_REG
117 * MVPP2_BM_ADDR_HIGH_ALLOC
118 * MVPP22_BM_ADDR_HIGH_RLS_REG
119 * MVPP2_BM_VIRT_RLS_REG
120 * MVPP2_ISR_RX_TX_CAUSE_REG
121 * MVPP2_ISR_RX_TX_MASK_REG
123 * MVPP2_AGGR_TXQ_UPDATE_REG
124 * MVPP2_TXQ_RSVD_REQ_REG
125 * MVPP2_TXQ_RSVD_RSLT_REG
129 * - global registers that must be accessed through a specific thread
130 * window, because they are related to an access to a per-thread
133 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
134 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
135 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
136 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
137 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
138 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
139 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
140 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
141 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
142 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
143 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
144 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
145 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
147 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
148 u32 offset, u32 data)
150 writel(data, priv->swth_base[thread] + offset);
153 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
156 return readl(priv->swth_base[thread] + offset);
159 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
160 u32 offset, u32 data)
162 writel_relaxed(data, priv->swth_base[thread] + offset);
165 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
168 return readl_relaxed(priv->swth_base[thread] + offset);
171 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
172 struct mvpp2_tx_desc *tx_desc)
174 if (port->priv->hw_version == MVPP21)
175 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
177 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
181 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
182 struct mvpp2_tx_desc *tx_desc,
185 dma_addr_t addr, offset;
187 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
188 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
190 if (port->priv->hw_version == MVPP21) {
191 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
192 tx_desc->pp21.packet_offset = offset;
194 __le64 val = cpu_to_le64(addr);
196 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
197 tx_desc->pp22.buf_dma_addr_ptp |= val;
198 tx_desc->pp22.packet_offset = offset;
202 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
203 struct mvpp2_tx_desc *tx_desc)
205 if (port->priv->hw_version == MVPP21)
206 return le16_to_cpu(tx_desc->pp21.data_size);
208 return le16_to_cpu(tx_desc->pp22.data_size);
211 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
212 struct mvpp2_tx_desc *tx_desc,
215 if (port->priv->hw_version == MVPP21)
216 tx_desc->pp21.data_size = cpu_to_le16(size);
218 tx_desc->pp22.data_size = cpu_to_le16(size);
221 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
222 struct mvpp2_tx_desc *tx_desc,
225 if (port->priv->hw_version == MVPP21)
226 tx_desc->pp21.phys_txq = txq;
228 tx_desc->pp22.phys_txq = txq;
231 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
232 struct mvpp2_tx_desc *tx_desc,
233 unsigned int command)
235 if (port->priv->hw_version == MVPP21)
236 tx_desc->pp21.command = cpu_to_le32(command);
238 tx_desc->pp22.command = cpu_to_le32(command);
241 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
242 struct mvpp2_tx_desc *tx_desc)
244 if (port->priv->hw_version == MVPP21)
245 return tx_desc->pp21.packet_offset;
247 return tx_desc->pp22.packet_offset;
250 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
251 struct mvpp2_rx_desc *rx_desc)
253 if (port->priv->hw_version == MVPP21)
254 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
256 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
260 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
261 struct mvpp2_rx_desc *rx_desc)
263 if (port->priv->hw_version == MVPP21)
264 return le32_to_cpu(rx_desc->pp21.buf_cookie);
266 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
270 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
271 struct mvpp2_rx_desc *rx_desc)
273 if (port->priv->hw_version == MVPP21)
274 return le16_to_cpu(rx_desc->pp21.data_size);
276 return le16_to_cpu(rx_desc->pp22.data_size);
279 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
280 struct mvpp2_rx_desc *rx_desc)
282 if (port->priv->hw_version == MVPP21)
283 return le32_to_cpu(rx_desc->pp21.status);
285 return le32_to_cpu(rx_desc->pp22.status);
288 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
290 txq_pcpu->txq_get_index++;
291 if (txq_pcpu->txq_get_index == txq_pcpu->size)
292 txq_pcpu->txq_get_index = 0;
295 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
296 struct mvpp2_txq_pcpu *txq_pcpu,
298 struct mvpp2_tx_desc *tx_desc,
299 enum mvpp2_tx_buf_type buf_type)
301 struct mvpp2_txq_pcpu_buf *tx_buf =
302 txq_pcpu->buffs + txq_pcpu->txq_put_index;
303 tx_buf->type = buf_type;
304 if (buf_type == MVPP2_TYPE_SKB)
308 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
309 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
310 mvpp2_txdesc_offset_get(port, tx_desc);
311 txq_pcpu->txq_put_index++;
312 if (txq_pcpu->txq_put_index == txq_pcpu->size)
313 txq_pcpu->txq_put_index = 0;
316 /* Get number of maximum RXQ */
317 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
321 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
324 /* According to the PPv2.2 datasheet and our experiments on
325 * PPv2.1, RX queues have an allocation granularity of 4 (when
326 * more than a single one on PPv2.2).
327 * Round up to nearest multiple of 4.
329 nrxqs = (num_possible_cpus() + 3) & ~0x3;
330 if (nrxqs > MVPP2_PORT_MAX_RXQ)
331 nrxqs = MVPP2_PORT_MAX_RXQ;
336 /* Get number of physical egress port */
337 static inline int mvpp2_egress_port(struct mvpp2_port *port)
339 return MVPP2_MAX_TCONT + port->id;
342 /* Get number of physical TXQ */
343 static inline int mvpp2_txq_phys(int port, int txq)
345 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
348 /* Returns a struct page if page_pool is set, otherwise a buffer */
349 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
350 struct page_pool *page_pool)
353 return page_pool_dev_alloc_pages(page_pool);
355 if (likely(pool->frag_size <= PAGE_SIZE))
356 return netdev_alloc_frag(pool->frag_size);
358 return kmalloc(pool->frag_size, GFP_ATOMIC);
361 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
362 struct page_pool *page_pool, void *data)
365 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
366 else if (likely(pool->frag_size <= PAGE_SIZE))
372 /* Buffer Manager configuration routines */
375 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
376 struct mvpp2_bm_pool *bm_pool, int size)
380 /* Number of buffer pointers must be a multiple of 16, as per
381 * hardware constraints
383 if (!IS_ALIGNED(size, 16))
386 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
387 * bytes per buffer pointer
389 if (priv->hw_version == MVPP21)
390 bm_pool->size_bytes = 2 * sizeof(u32) * size;
392 bm_pool->size_bytes = 2 * sizeof(u64) * size;
394 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
397 if (!bm_pool->virt_addr)
400 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
401 MVPP2_BM_POOL_PTR_ALIGN)) {
402 dma_free_coherent(dev, bm_pool->size_bytes,
403 bm_pool->virt_addr, bm_pool->dma_addr);
404 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
405 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
409 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
410 lower_32_bits(bm_pool->dma_addr));
411 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
413 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
414 val |= MVPP2_BM_START_MASK;
415 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
417 bm_pool->size = size;
418 bm_pool->pkt_size = 0;
419 bm_pool->buf_num = 0;
424 /* Set pool buffer size */
425 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
426 struct mvpp2_bm_pool *bm_pool,
431 bm_pool->buf_size = buf_size;
433 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
434 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
437 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
438 struct mvpp2_bm_pool *bm_pool,
439 dma_addr_t *dma_addr,
440 phys_addr_t *phys_addr)
442 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
444 *dma_addr = mvpp2_thread_read(priv, thread,
445 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
446 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
448 if (priv->hw_version == MVPP22) {
450 u32 dma_addr_highbits, phys_addr_highbits;
452 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
453 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
454 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
455 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
457 if (sizeof(dma_addr_t) == 8)
458 *dma_addr |= (u64)dma_addr_highbits << 32;
460 if (sizeof(phys_addr_t) == 8)
461 *phys_addr |= (u64)phys_addr_highbits << 32;
467 /* Free all buffers from the pool */
468 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
469 struct mvpp2_bm_pool *bm_pool, int buf_num)
471 struct page_pool *pp = NULL;
474 if (buf_num > bm_pool->buf_num) {
475 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
476 bm_pool->id, buf_num);
477 buf_num = bm_pool->buf_num;
480 if (priv->percpu_pools)
481 pp = priv->page_pool[bm_pool->id];
483 for (i = 0; i < buf_num; i++) {
484 dma_addr_t buf_dma_addr;
485 phys_addr_t buf_phys_addr;
488 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
489 &buf_dma_addr, &buf_phys_addr);
492 dma_unmap_single(dev, buf_dma_addr,
493 bm_pool->buf_size, DMA_FROM_DEVICE);
495 data = (void *)phys_to_virt(buf_phys_addr);
499 mvpp2_frag_free(bm_pool, pp, data);
502 /* Update BM driver with number of buffers removed from pool */
503 bm_pool->buf_num -= i;
506 /* Check number of buffers in BM pool */
507 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
511 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
512 MVPP22_BM_POOL_PTRS_NUM_MASK;
513 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
514 MVPP2_BM_BPPI_PTR_NUM_MASK;
516 /* HW has one buffer ready which is not reflected in the counters */
524 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
525 struct mvpp2_bm_pool *bm_pool)
530 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
531 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
533 /* Check buffer counters after free */
534 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
536 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
537 bm_pool->id, bm_pool->buf_num);
541 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
542 val |= MVPP2_BM_STOP_MASK;
543 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
545 if (priv->percpu_pools) {
546 page_pool_destroy(priv->page_pool[bm_pool->id]);
547 priv->page_pool[bm_pool->id] = NULL;
550 dma_free_coherent(dev, bm_pool->size_bytes,
556 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
558 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
559 struct mvpp2_bm_pool *bm_pool;
561 if (priv->percpu_pools)
562 poolnum = mvpp2_get_nrxqs(priv) * 2;
564 /* Create all pools with maximum size */
565 size = MVPP2_BM_POOL_SIZE_MAX;
566 for (i = 0; i < poolnum; i++) {
567 bm_pool = &priv->bm_pools[i];
569 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
571 goto err_unroll_pools;
572 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
577 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
578 for (i = i - 1; i >= 0; i--)
579 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
583 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
585 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
586 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
587 struct mvpp2_port *port;
589 if (priv->percpu_pools) {
590 for (i = 0; i < priv->port_count; i++) {
591 port = priv->port_list[i];
592 if (port->xdp_prog) {
593 dma_dir = DMA_BIDIRECTIONAL;
598 poolnum = mvpp2_get_nrxqs(priv) * 2;
599 for (i = 0; i < poolnum; i++) {
600 /* the pool in use */
601 int pn = i / (poolnum / 2);
604 mvpp2_create_page_pool(dev,
605 mvpp2_pools[pn].buf_num,
606 mvpp2_pools[pn].pkt_size,
608 if (IS_ERR(priv->page_pool[i])) {
611 for (j = 0; j < i; j++) {
612 page_pool_destroy(priv->page_pool[j]);
613 priv->page_pool[j] = NULL;
615 return PTR_ERR(priv->page_pool[i]);
620 dev_info(dev, "using %d %s buffers\n", poolnum,
621 priv->percpu_pools ? "per-cpu" : "shared");
623 for (i = 0; i < poolnum; i++) {
624 /* Mask BM all interrupts */
625 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
626 /* Clear BM cause register */
627 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
630 /* Allocate and initialize BM pools */
631 priv->bm_pools = devm_kcalloc(dev, poolnum,
632 sizeof(*priv->bm_pools), GFP_KERNEL);
636 err = mvpp2_bm_pools_init(dev, priv);
642 static void mvpp2_setup_bm_pool(void)
645 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
646 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
649 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
650 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
653 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
654 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
657 /* Attach long pool to rxq */
658 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
659 int lrxq, int long_pool)
664 /* Get queue physical ID */
665 prxq = port->rxqs[lrxq]->id;
667 if (port->priv->hw_version == MVPP21)
668 mask = MVPP21_RXQ_POOL_LONG_MASK;
670 mask = MVPP22_RXQ_POOL_LONG_MASK;
672 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
674 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
675 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
678 /* Attach short pool to rxq */
679 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
680 int lrxq, int short_pool)
685 /* Get queue physical ID */
686 prxq = port->rxqs[lrxq]->id;
688 if (port->priv->hw_version == MVPP21)
689 mask = MVPP21_RXQ_POOL_SHORT_MASK;
691 mask = MVPP22_RXQ_POOL_SHORT_MASK;
693 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
695 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
696 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
699 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
700 struct mvpp2_bm_pool *bm_pool,
701 struct page_pool *page_pool,
702 dma_addr_t *buf_dma_addr,
703 phys_addr_t *buf_phys_addr,
710 data = mvpp2_frag_alloc(bm_pool, page_pool);
715 page = (struct page *)data;
716 dma_addr = page_pool_get_dma_addr(page);
717 data = page_to_virt(page);
719 dma_addr = dma_map_single(port->dev->dev.parent, data,
720 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
722 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
723 mvpp2_frag_free(bm_pool, NULL, data);
727 *buf_dma_addr = dma_addr;
728 *buf_phys_addr = virt_to_phys(data);
733 /* Release buffer to BM */
734 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
735 dma_addr_t buf_dma_addr,
736 phys_addr_t buf_phys_addr)
738 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
739 unsigned long flags = 0;
741 if (test_bit(thread, &port->priv->lock_map))
742 spin_lock_irqsave(&port->bm_lock[thread], flags);
744 if (port->priv->hw_version == MVPP22) {
747 if (sizeof(dma_addr_t) == 8)
748 val |= upper_32_bits(buf_dma_addr) &
749 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
751 if (sizeof(phys_addr_t) == 8)
752 val |= (upper_32_bits(buf_phys_addr)
753 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
754 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
756 mvpp2_thread_write_relaxed(port->priv, thread,
757 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
760 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
761 * returned in the "cookie" field of the RX
762 * descriptor. Instead of storing the virtual address, we
763 * store the physical address
765 mvpp2_thread_write_relaxed(port->priv, thread,
766 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
767 mvpp2_thread_write_relaxed(port->priv, thread,
768 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
770 if (test_bit(thread, &port->priv->lock_map))
771 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
776 /* Allocate buffers for the pool */
777 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
778 struct mvpp2_bm_pool *bm_pool, int buf_num)
780 int i, buf_size, total_size;
782 phys_addr_t phys_addr;
783 struct page_pool *pp = NULL;
786 if (port->priv->percpu_pools &&
787 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
788 netdev_err(port->dev,
789 "attempted to use jumbo frames with per-cpu pools");
793 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
794 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
797 (buf_num + bm_pool->buf_num > bm_pool->size)) {
798 netdev_err(port->dev,
799 "cannot allocate %d buffers for pool %d\n",
800 buf_num, bm_pool->id);
804 if (port->priv->percpu_pools)
805 pp = port->priv->page_pool[bm_pool->id];
806 for (i = 0; i < buf_num; i++) {
807 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
808 &phys_addr, GFP_KERNEL);
812 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
816 /* Update BM driver with number of buffers added to pool */
817 bm_pool->buf_num += i;
819 netdev_dbg(port->dev,
820 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
821 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
823 netdev_dbg(port->dev,
824 "pool %d: %d of %d buffers added\n",
825 bm_pool->id, i, buf_num);
829 /* Notify the driver that BM pool is being used as specific type and return the
830 * pool pointer on success
832 static struct mvpp2_bm_pool *
833 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
835 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
838 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
839 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
840 netdev_err(port->dev, "Invalid pool %d\n", pool);
844 /* Allocate buffers in case BM pool is used as long pool, but packet
845 * size doesn't match MTU or BM pool hasn't being used yet
847 if (new_pool->pkt_size == 0) {
850 /* Set default buffer number or free all the buffers in case
851 * the pool is not empty
853 pkts_num = new_pool->buf_num;
855 if (port->priv->percpu_pools) {
856 if (pool < port->nrxqs)
857 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
859 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
861 pkts_num = mvpp2_pools[pool].buf_num;
864 mvpp2_bm_bufs_free(port->dev->dev.parent,
865 port->priv, new_pool, pkts_num);
868 new_pool->pkt_size = pkt_size;
869 new_pool->frag_size =
870 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
871 MVPP2_SKB_SHINFO_SIZE;
873 /* Allocate buffers for this pool */
874 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
875 if (num != pkts_num) {
876 WARN(1, "pool %d: %d of %d allocated\n",
877 new_pool->id, num, pkts_num);
882 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
883 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
888 static struct mvpp2_bm_pool *
889 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
890 unsigned int pool, int pkt_size)
892 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
895 if (pool > port->nrxqs * 2) {
896 netdev_err(port->dev, "Invalid pool %d\n", pool);
900 /* Allocate buffers in case BM pool is used as long pool, but packet
901 * size doesn't match MTU or BM pool hasn't being used yet
903 if (new_pool->pkt_size == 0) {
906 /* Set default buffer number or free all the buffers in case
907 * the pool is not empty
909 pkts_num = new_pool->buf_num;
911 pkts_num = mvpp2_pools[type].buf_num;
913 mvpp2_bm_bufs_free(port->dev->dev.parent,
914 port->priv, new_pool, pkts_num);
916 new_pool->pkt_size = pkt_size;
917 new_pool->frag_size =
918 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
919 MVPP2_SKB_SHINFO_SIZE;
921 /* Allocate buffers for this pool */
922 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
923 if (num != pkts_num) {
924 WARN(1, "pool %d: %d of %d allocated\n",
925 new_pool->id, num, pkts_num);
930 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
931 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
936 /* Initialize pools for swf, shared buffers variant */
937 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
939 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
942 /* If port pkt_size is higher than 1518B:
943 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
944 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
946 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
947 long_log_pool = MVPP2_BM_JUMBO;
948 short_log_pool = MVPP2_BM_LONG;
950 long_log_pool = MVPP2_BM_LONG;
951 short_log_pool = MVPP2_BM_SHORT;
954 if (!port->pool_long) {
956 mvpp2_bm_pool_use(port, long_log_pool,
957 mvpp2_pools[long_log_pool].pkt_size);
958 if (!port->pool_long)
961 port->pool_long->port_map |= BIT(port->id);
963 for (rxq = 0; rxq < port->nrxqs; rxq++)
964 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
967 if (!port->pool_short) {
969 mvpp2_bm_pool_use(port, short_log_pool,
970 mvpp2_pools[short_log_pool].pkt_size);
971 if (!port->pool_short)
974 port->pool_short->port_map |= BIT(port->id);
976 for (rxq = 0; rxq < port->nrxqs; rxq++)
977 mvpp2_rxq_short_pool_set(port, rxq,
978 port->pool_short->id);
984 /* Initialize pools for swf, percpu buffers variant */
985 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
987 struct mvpp2_bm_pool *bm_pool;
990 for (i = 0; i < port->nrxqs; i++) {
991 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
992 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
996 bm_pool->port_map |= BIT(port->id);
997 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1000 for (i = 0; i < port->nrxqs; i++) {
1001 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1002 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1006 bm_pool->port_map |= BIT(port->id);
1007 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1010 port->pool_long = NULL;
1011 port->pool_short = NULL;
1016 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1018 if (port->priv->percpu_pools)
1019 return mvpp2_swf_bm_pool_init_percpu(port);
1021 return mvpp2_swf_bm_pool_init_shared(port);
1024 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1025 enum mvpp2_bm_pool_log_num new_long_pool)
1027 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1029 /* Update L4 checksum when jumbo enable/disable on port.
1030 * Only port 0 supports hardware checksum offload due to
1031 * the Tx FIFO size limitation.
1032 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1033 * has 7 bits, so the maximum L3 offset is 128.
1035 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1036 port->dev->features &= ~csums;
1037 port->dev->hw_features &= ~csums;
1039 port->dev->features |= csums;
1040 port->dev->hw_features |= csums;
1044 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1046 struct mvpp2_port *port = netdev_priv(dev);
1047 enum mvpp2_bm_pool_log_num new_long_pool;
1048 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1050 if (port->priv->percpu_pools)
1053 /* If port MTU is higher than 1518B:
1054 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1055 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1057 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1058 new_long_pool = MVPP2_BM_JUMBO;
1060 new_long_pool = MVPP2_BM_LONG;
1062 if (new_long_pool != port->pool_long->id) {
1063 /* Remove port from old short & long pool */
1064 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1065 port->pool_long->pkt_size);
1066 port->pool_long->port_map &= ~BIT(port->id);
1067 port->pool_long = NULL;
1069 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1070 port->pool_short->pkt_size);
1071 port->pool_short->port_map &= ~BIT(port->id);
1072 port->pool_short = NULL;
1074 port->pkt_size = pkt_size;
1076 /* Add port to new short & long pool */
1077 mvpp2_swf_bm_pool_init(port);
1079 mvpp2_set_hw_csum(port, new_long_pool);
1084 dev->wanted_features = dev->features;
1086 netdev_update_features(dev);
1090 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1092 int i, sw_thread_mask = 0;
1094 for (i = 0; i < port->nqvecs; i++)
1095 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1097 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1098 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1101 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1103 int i, sw_thread_mask = 0;
1105 for (i = 0; i < port->nqvecs; i++)
1106 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1108 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1109 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1112 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1114 struct mvpp2_port *port = qvec->port;
1116 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1117 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1120 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1122 struct mvpp2_port *port = qvec->port;
1124 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1125 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1128 /* Mask the current thread's Rx/Tx interrupts
1129 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1130 * using smp_processor_id() is OK.
1132 static void mvpp2_interrupts_mask(void *arg)
1134 struct mvpp2_port *port = arg;
1136 /* If the thread isn't used, don't do anything */
1137 if (smp_processor_id() > port->priv->nthreads)
1140 mvpp2_thread_write(port->priv,
1141 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1142 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1145 /* Unmask the current thread's Rx/Tx interrupts.
1146 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1147 * using smp_processor_id() is OK.
1149 static void mvpp2_interrupts_unmask(void *arg)
1151 struct mvpp2_port *port = arg;
1154 /* If the thread isn't used, don't do anything */
1155 if (smp_processor_id() > port->priv->nthreads)
1158 val = MVPP2_CAUSE_MISC_SUM_MASK |
1159 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1160 if (port->has_tx_irqs)
1161 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1163 mvpp2_thread_write(port->priv,
1164 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1165 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1169 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1174 if (port->priv->hw_version != MVPP22)
1180 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1182 for (i = 0; i < port->nqvecs; i++) {
1183 struct mvpp2_queue_vector *v = port->qvecs + i;
1185 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1188 mvpp2_thread_write(port->priv, v->sw_thread_id,
1189 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1193 /* Only GOP port 0 has an XLG MAC */
1194 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1196 return port->gop_id == 0;
1199 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1201 return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
1204 /* Port configuration routines */
1205 static bool mvpp2_is_xlg(phy_interface_t interface)
1207 return interface == PHY_INTERFACE_MODE_10GBASER ||
1208 interface == PHY_INTERFACE_MODE_XAUI;
1211 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1215 old = val = readl(ptr);
1222 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1224 struct mvpp2 *priv = port->priv;
1227 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1228 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1229 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1231 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1232 if (port->gop_id == 2)
1233 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1234 else if (port->gop_id == 3)
1235 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1236 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1239 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1241 struct mvpp2 *priv = port->priv;
1244 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1245 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1246 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1247 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1249 if (port->gop_id > 1) {
1250 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1251 if (port->gop_id == 2)
1252 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1253 else if (port->gop_id == 3)
1254 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1255 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1259 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1261 struct mvpp2 *priv = port->priv;
1262 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1263 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1266 val = readl(xpcs + MVPP22_XPCS_CFG0);
1267 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1268 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1269 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1270 writel(val, xpcs + MVPP22_XPCS_CFG0);
1272 val = readl(mpcs + MVPP22_MPCS_CTRL);
1273 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1274 writel(val, mpcs + MVPP22_MPCS_CTRL);
1276 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1277 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1278 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1279 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1282 static int mvpp22_gop_init(struct mvpp2_port *port)
1284 struct mvpp2 *priv = port->priv;
1287 if (!priv->sysctrl_base)
1290 switch (port->phy_interface) {
1291 case PHY_INTERFACE_MODE_RGMII:
1292 case PHY_INTERFACE_MODE_RGMII_ID:
1293 case PHY_INTERFACE_MODE_RGMII_RXID:
1294 case PHY_INTERFACE_MODE_RGMII_TXID:
1295 if (!mvpp2_port_supports_rgmii(port))
1297 mvpp22_gop_init_rgmii(port);
1299 case PHY_INTERFACE_MODE_SGMII:
1300 case PHY_INTERFACE_MODE_1000BASEX:
1301 case PHY_INTERFACE_MODE_2500BASEX:
1302 mvpp22_gop_init_sgmii(port);
1304 case PHY_INTERFACE_MODE_10GBASER:
1305 if (!mvpp2_port_supports_xlg(port))
1307 mvpp22_gop_init_10gkr(port);
1310 goto unsupported_conf;
1313 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1314 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1315 GENCONF_PORT_CTRL1_EN(port->gop_id);
1316 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1318 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1319 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1320 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1322 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1323 val |= GENCONF_SOFT_RESET1_GOP;
1324 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1330 netdev_err(port->dev, "Invalid port configuration\n");
1334 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1338 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1339 phy_interface_mode_is_8023z(port->phy_interface) ||
1340 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1341 /* Enable the GMAC link status irq for this port */
1342 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1343 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1344 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1347 if (mvpp2_port_supports_xlg(port)) {
1348 /* Enable the XLG/GIG irqs for this port */
1349 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1350 if (mvpp2_is_xlg(port->phy_interface))
1351 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1353 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1354 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1358 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1362 if (mvpp2_port_supports_xlg(port)) {
1363 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1364 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1365 MVPP22_XLG_EXT_INT_MASK_GIG);
1366 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1369 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1370 phy_interface_mode_is_8023z(port->phy_interface) ||
1371 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1372 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1373 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1374 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1378 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1382 if (port->phylink ||
1383 phy_interface_mode_is_rgmii(port->phy_interface) ||
1384 phy_interface_mode_is_8023z(port->phy_interface) ||
1385 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1386 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1387 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1388 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1391 if (mvpp2_port_supports_xlg(port)) {
1392 val = readl(port->base + MVPP22_XLG_INT_MASK);
1393 val |= MVPP22_XLG_INT_MASK_LINK;
1394 writel(val, port->base + MVPP22_XLG_INT_MASK);
1397 mvpp22_gop_unmask_irq(port);
1400 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1402 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1403 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1406 * The COMPHY configures the serdes lanes regardless of the actual use of the
1407 * lanes by the physical layer. This is why configurations like
1408 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1410 static int mvpp22_comphy_init(struct mvpp2_port *port)
1417 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1418 port->phy_interface);
1422 return phy_power_on(port->comphy);
1425 static void mvpp2_port_enable(struct mvpp2_port *port)
1429 if (mvpp2_port_supports_xlg(port) &&
1430 mvpp2_is_xlg(port->phy_interface)) {
1431 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1432 val |= MVPP22_XLG_CTRL0_PORT_EN;
1433 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1434 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1436 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1437 val |= MVPP2_GMAC_PORT_EN_MASK;
1438 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1439 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1443 static void mvpp2_port_disable(struct mvpp2_port *port)
1447 if (mvpp2_port_supports_xlg(port) &&
1448 mvpp2_is_xlg(port->phy_interface)) {
1449 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1450 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1451 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1454 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1455 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1456 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1459 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1460 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1464 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1465 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1466 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1469 /* Configure loopback port */
1470 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1471 const struct phylink_link_state *state)
1475 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1477 if (state->speed == 1000)
1478 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1480 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1482 if (phy_interface_mode_is_8023z(state->interface) ||
1483 state->interface == PHY_INTERFACE_MODE_SGMII)
1484 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1486 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1488 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1492 ETHTOOL_XDP_REDIRECT,
1498 ETHTOOL_XDP_XMIT_ERR,
1501 struct mvpp2_ethtool_counter {
1502 unsigned int offset;
1503 const char string[ETH_GSTRING_LEN];
1507 static u64 mvpp2_read_count(struct mvpp2_port *port,
1508 const struct mvpp2_ethtool_counter *counter)
1512 val = readl(port->stats_base + counter->offset);
1513 if (counter->reg_is_64b)
1514 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1519 /* Some counters are accessed indirectly by first writing an index to
1520 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1521 * register we access, it can be a hit counter for some classification tables,
1522 * a counter specific to a rxq, a txq or a buffer pool.
1524 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1526 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1527 return mvpp2_read(priv, reg);
1530 /* Due to the fact that software statistics and hardware statistics are, by
1531 * design, incremented at different moments in the chain of packet processing,
1532 * it is very likely that incoming packets could have been dropped after being
1533 * counted by hardware but before reaching software statistics (most probably
1534 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1535 * are added in between as well as TSO skb will be split and header bytes added.
1536 * Hence, statistics gathered from userspace with ifconfig (software) and
1537 * ethtool (hardware) cannot be compared.
1539 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1540 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1541 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1542 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1543 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1544 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1545 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1546 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1547 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1548 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1549 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1550 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1551 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1552 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1553 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1554 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1555 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1556 { MVPP2_MIB_FC_SENT, "fc_sent" },
1557 { MVPP2_MIB_FC_RCVD, "fc_received" },
1558 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1559 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1560 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1561 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1562 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1563 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1564 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1565 { MVPP2_MIB_COLLISION, "collision" },
1566 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1569 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1570 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1571 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1574 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1575 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1576 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1577 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1578 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1579 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1580 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1581 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1582 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1583 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1586 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1587 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1588 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1589 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1590 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1593 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1594 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1595 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1596 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1597 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1598 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1599 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1600 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1603 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1604 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1605 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1606 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1607 ARRAY_SIZE(mvpp2_ethtool_xdp))
1609 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1612 struct mvpp2_port *port = netdev_priv(netdev);
1615 if (sset != ETH_SS_STATS)
1618 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1619 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1621 data += ETH_GSTRING_LEN;
1624 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1625 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1627 data += ETH_GSTRING_LEN;
1630 for (q = 0; q < port->ntxqs; q++) {
1631 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1632 snprintf(data, ETH_GSTRING_LEN,
1633 mvpp2_ethtool_txq_regs[i].string, q);
1634 data += ETH_GSTRING_LEN;
1638 for (q = 0; q < port->nrxqs; q++) {
1639 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1640 snprintf(data, ETH_GSTRING_LEN,
1641 mvpp2_ethtool_rxq_regs[i].string,
1643 data += ETH_GSTRING_LEN;
1647 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1648 strscpy(data, mvpp2_ethtool_xdp[i].string,
1650 data += ETH_GSTRING_LEN;
1655 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1660 /* Gather XDP Statistics */
1661 for_each_possible_cpu(cpu) {
1662 struct mvpp2_pcpu_stats *cpu_stats;
1671 cpu_stats = per_cpu_ptr(port->stats, cpu);
1673 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1674 xdp_redirect = cpu_stats->xdp_redirect;
1675 xdp_pass = cpu_stats->xdp_pass;
1676 xdp_drop = cpu_stats->xdp_drop;
1677 xdp_xmit = cpu_stats->xdp_xmit;
1678 xdp_xmit_err = cpu_stats->xdp_xmit_err;
1679 xdp_tx = cpu_stats->xdp_tx;
1680 xdp_tx_err = cpu_stats->xdp_tx_err;
1681 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1683 xdp_stats->xdp_redirect += xdp_redirect;
1684 xdp_stats->xdp_pass += xdp_pass;
1685 xdp_stats->xdp_drop += xdp_drop;
1686 xdp_stats->xdp_xmit += xdp_xmit;
1687 xdp_stats->xdp_xmit_err += xdp_xmit_err;
1688 xdp_stats->xdp_tx += xdp_tx;
1689 xdp_stats->xdp_tx_err += xdp_tx_err;
1693 static void mvpp2_read_stats(struct mvpp2_port *port)
1695 struct mvpp2_pcpu_stats xdp_stats = {};
1696 const struct mvpp2_ethtool_counter *s;
1700 pstats = port->ethtool_stats;
1702 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1703 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1705 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1706 *pstats++ += mvpp2_read(port->priv,
1707 mvpp2_ethtool_port_regs[i].offset +
1710 for (q = 0; q < port->ntxqs; q++)
1711 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1712 *pstats++ += mvpp2_read_index(port->priv,
1713 MVPP22_CTRS_TX_CTR(port->id, q),
1714 mvpp2_ethtool_txq_regs[i].offset);
1716 /* Rxqs are numbered from 0 from the user standpoint, but not from the
1717 * driver's. We need to add the port->first_rxq offset.
1719 for (q = 0; q < port->nrxqs; q++)
1720 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1721 *pstats++ += mvpp2_read_index(port->priv,
1722 port->first_rxq + q,
1723 mvpp2_ethtool_rxq_regs[i].offset);
1725 /* Gather XDP Statistics */
1726 mvpp2_get_xdp_stats(port, &xdp_stats);
1728 for (i = 0, s = mvpp2_ethtool_xdp;
1729 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
1731 switch (s->offset) {
1732 case ETHTOOL_XDP_REDIRECT:
1733 *pstats++ = xdp_stats.xdp_redirect;
1735 case ETHTOOL_XDP_PASS:
1736 *pstats++ = xdp_stats.xdp_pass;
1738 case ETHTOOL_XDP_DROP:
1739 *pstats++ = xdp_stats.xdp_drop;
1741 case ETHTOOL_XDP_TX:
1742 *pstats++ = xdp_stats.xdp_tx;
1744 case ETHTOOL_XDP_TX_ERR:
1745 *pstats++ = xdp_stats.xdp_tx_err;
1747 case ETHTOOL_XDP_XMIT:
1748 *pstats++ = xdp_stats.xdp_xmit;
1750 case ETHTOOL_XDP_XMIT_ERR:
1751 *pstats++ = xdp_stats.xdp_xmit_err;
1757 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1759 struct delayed_work *del_work = to_delayed_work(work);
1760 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1763 mutex_lock(&port->gather_stats_lock);
1765 mvpp2_read_stats(port);
1767 /* No need to read again the counters right after this function if it
1768 * was called asynchronously by the user (ie. use of ethtool).
1770 cancel_delayed_work(&port->stats_work);
1771 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1772 MVPP2_MIB_COUNTERS_STATS_DELAY);
1774 mutex_unlock(&port->gather_stats_lock);
1777 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1778 struct ethtool_stats *stats, u64 *data)
1780 struct mvpp2_port *port = netdev_priv(dev);
1782 /* Update statistics for the given port, then take the lock to avoid
1783 * concurrent accesses on the ethtool_stats structure during its copy.
1785 mvpp2_gather_hw_statistics(&port->stats_work.work);
1787 mutex_lock(&port->gather_stats_lock);
1788 memcpy(data, port->ethtool_stats,
1789 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1790 mutex_unlock(&port->gather_stats_lock);
1793 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1795 struct mvpp2_port *port = netdev_priv(dev);
1797 if (sset == ETH_SS_STATS)
1798 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1803 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1807 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1808 MVPP2_GMAC_PORT_RESET_MASK;
1809 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1811 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1812 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1813 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1814 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1818 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1820 struct mvpp2 *priv = port->priv;
1821 void __iomem *mpcs, *xpcs;
1824 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1827 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1828 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1830 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1831 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1832 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1833 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1835 val = readl(xpcs + MVPP22_XPCS_CFG0);
1836 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1839 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1841 struct mvpp2 *priv = port->priv;
1842 void __iomem *mpcs, *xpcs;
1845 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1848 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1849 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1851 switch (port->phy_interface) {
1852 case PHY_INTERFACE_MODE_10GBASER:
1853 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1854 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1855 MAC_CLK_RESET_SD_TX;
1856 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1857 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1859 case PHY_INTERFACE_MODE_XAUI:
1860 case PHY_INTERFACE_MODE_RXAUI:
1861 val = readl(xpcs + MVPP22_XPCS_CFG0);
1862 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1869 /* Change maximum receive size of the port */
1870 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1874 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1875 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1876 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1877 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1878 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1881 /* Change maximum receive size of the port */
1882 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1886 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1887 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1888 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1889 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1890 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1893 /* Set defaults to the MVPP2 port */
1894 static void mvpp2_defaults_set(struct mvpp2_port *port)
1896 int tx_port_num, val, queue, lrxq;
1898 if (port->priv->hw_version == MVPP21) {
1899 /* Update TX FIFO MIN Threshold */
1900 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1901 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1902 /* Min. TX threshold must be less than minimal packet length */
1903 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1904 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1907 /* Disable Legacy WRR, Disable EJP, Release from reset */
1908 tx_port_num = mvpp2_egress_port(port);
1909 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1911 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1913 /* Set TXQ scheduling to Round-Robin */
1914 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1916 /* Close bandwidth for all queues */
1917 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1918 mvpp2_write(port->priv,
1919 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1921 /* Set refill period to 1 usec, refill tokens
1922 * and bucket size to maximum
1924 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1925 port->priv->tclk / USEC_PER_SEC);
1926 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1927 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1928 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1929 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1930 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1931 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1932 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1934 /* Set MaximumLowLatencyPacketSize value to 256 */
1935 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1936 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1937 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1939 /* Enable Rx cache snoop */
1940 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1941 queue = port->rxqs[lrxq]->id;
1942 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1943 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1944 MVPP2_SNOOP_BUF_HDR_MASK;
1945 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1948 /* At default, mask all interrupts to all present cpus */
1949 mvpp2_interrupts_disable(port);
1952 /* Enable/disable receiving packets */
1953 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1958 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1959 queue = port->rxqs[lrxq]->id;
1960 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1961 val &= ~MVPP2_RXQ_DISABLE_MASK;
1962 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1966 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1971 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1972 queue = port->rxqs[lrxq]->id;
1973 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1974 val |= MVPP2_RXQ_DISABLE_MASK;
1975 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1979 /* Enable transmit via physical egress queue
1980 * - HW starts take descriptors from DRAM
1982 static void mvpp2_egress_enable(struct mvpp2_port *port)
1986 int tx_port_num = mvpp2_egress_port(port);
1988 /* Enable all initialized TXs. */
1990 for (queue = 0; queue < port->ntxqs; queue++) {
1991 struct mvpp2_tx_queue *txq = port->txqs[queue];
1994 qmap |= (1 << queue);
1997 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1998 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2001 /* Disable transmit via physical egress queue
2002 * - HW doesn't take descriptors from DRAM
2004 static void mvpp2_egress_disable(struct mvpp2_port *port)
2008 int tx_port_num = mvpp2_egress_port(port);
2010 /* Issue stop command for active channels only */
2011 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2012 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2013 MVPP2_TXP_SCHED_ENQ_MASK;
2015 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2016 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2018 /* Wait for all Tx activity to terminate. */
2021 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2022 netdev_warn(port->dev,
2023 "Tx stop timed out, status=0x%08x\n",
2030 /* Check port TX Command register that all
2031 * Tx queues are stopped
2033 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2034 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2037 /* Rx descriptors helper methods */
2039 /* Get number of Rx descriptors occupied by received packets */
2041 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2043 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2045 return val & MVPP2_RXQ_OCCUPIED_MASK;
2048 /* Update Rx queue status with the number of occupied and available
2049 * Rx descriptor slots.
2052 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2053 int used_count, int free_count)
2055 /* Decrement the number of used descriptors and increment count
2056 * increment the number of free descriptors.
2058 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2060 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2063 /* Get pointer to next RX descriptor to be processed by SW */
2064 static inline struct mvpp2_rx_desc *
2065 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2067 int rx_desc = rxq->next_desc_to_proc;
2069 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2070 prefetch(rxq->descs + rxq->next_desc_to_proc);
2071 return rxq->descs + rx_desc;
2074 /* Set rx queue offset */
2075 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2076 int prxq, int offset)
2080 /* Convert offset from bytes to units of 32 bytes */
2081 offset = offset >> 5;
2083 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2084 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2087 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2088 MVPP2_RXQ_PACKET_OFFSET_MASK);
2090 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2093 /* Tx descriptors helper methods */
2095 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2096 static struct mvpp2_tx_desc *
2097 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2099 int tx_desc = txq->next_desc_to_proc;
2101 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2102 return txq->descs + tx_desc;
2105 /* Update HW with number of aggregated Tx descriptors to be sent
2107 * Called only from mvpp2_tx(), so migration is disabled, using
2108 * smp_processor_id() is OK.
2110 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2112 /* aggregated access - relevant TXQ number is written in TX desc */
2113 mvpp2_thread_write(port->priv,
2114 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2115 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2118 /* Check if there are enough free descriptors in aggregated txq.
2119 * If not, update the number of occupied descriptors and repeat the check.
2121 * Called only from mvpp2_tx(), so migration is disabled, using
2122 * smp_processor_id() is OK.
2124 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2125 struct mvpp2_tx_queue *aggr_txq, int num)
2127 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2128 /* Update number of occupied aggregated Tx descriptors */
2129 unsigned int thread =
2130 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2131 u32 val = mvpp2_read_relaxed(port->priv,
2132 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2134 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2136 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2142 /* Reserved Tx descriptors allocation request
2144 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2145 * only by mvpp2_tx(), so migration is disabled, using
2146 * smp_processor_id() is OK.
2148 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2149 struct mvpp2_tx_queue *txq, int num)
2151 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2152 struct mvpp2 *priv = port->priv;
2155 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2156 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2158 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2160 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2163 /* Check if there are enough reserved descriptors for transmission.
2164 * If not, request chunk of reserved descriptors and check again.
2166 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2167 struct mvpp2_tx_queue *txq,
2168 struct mvpp2_txq_pcpu *txq_pcpu,
2171 int req, desc_count;
2172 unsigned int thread;
2174 if (txq_pcpu->reserved_num >= num)
2177 /* Not enough descriptors reserved! Update the reserved descriptor
2178 * count and check again.
2182 /* Compute total of used descriptors */
2183 for (thread = 0; thread < port->priv->nthreads; thread++) {
2184 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2186 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2187 desc_count += txq_pcpu_aux->count;
2188 desc_count += txq_pcpu_aux->reserved_num;
2191 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2195 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2198 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2200 /* OK, the descriptor could have been updated: check again. */
2201 if (txq_pcpu->reserved_num < num)
2206 /* Release the last allocated Tx descriptor. Useful to handle DMA
2207 * mapping failures in the Tx path.
2209 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2211 if (txq->next_desc_to_proc == 0)
2212 txq->next_desc_to_proc = txq->last_desc - 1;
2214 txq->next_desc_to_proc--;
2217 /* Set Tx descriptors fields relevant for CSUM calculation */
2218 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2219 int ip_hdr_len, int l4_proto)
2223 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2224 * G_L4_chk, L4_type required only for checksum calculation
2226 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2227 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2228 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2230 if (l3_proto == htons(ETH_P_IP)) {
2231 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2232 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2234 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2237 if (l4_proto == IPPROTO_TCP) {
2238 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2239 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2240 } else if (l4_proto == IPPROTO_UDP) {
2241 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2242 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2244 command |= MVPP2_TXD_L4_CSUM_NOT;
2250 /* Get number of sent descriptors and decrement counter.
2251 * The number of sent descriptors is returned.
2254 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2255 * (migration disabled) and from the TX completion tasklet (migration
2256 * disabled) so using smp_processor_id() is OK.
2258 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2259 struct mvpp2_tx_queue *txq)
2263 /* Reading status reg resets transmitted descriptor counter */
2264 val = mvpp2_thread_read_relaxed(port->priv,
2265 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2266 MVPP2_TXQ_SENT_REG(txq->id));
2268 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2269 MVPP2_TRANSMITTED_COUNT_OFFSET;
2272 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2273 * disabled, therefore using smp_processor_id() is OK.
2275 static void mvpp2_txq_sent_counter_clear(void *arg)
2277 struct mvpp2_port *port = arg;
2280 /* If the thread isn't used, don't do anything */
2281 if (smp_processor_id() > port->priv->nthreads)
2284 for (queue = 0; queue < port->ntxqs; queue++) {
2285 int id = port->txqs[queue]->id;
2287 mvpp2_thread_read(port->priv,
2288 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2289 MVPP2_TXQ_SENT_REG(id));
2293 /* Set max sizes for Tx queues */
2294 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2297 int txq, tx_port_num;
2299 mtu = port->pkt_size * 8;
2300 if (mtu > MVPP2_TXP_MTU_MAX)
2301 mtu = MVPP2_TXP_MTU_MAX;
2303 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2306 /* Indirect access to registers */
2307 tx_port_num = mvpp2_egress_port(port);
2308 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2311 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2312 val &= ~MVPP2_TXP_MTU_MAX;
2314 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2316 /* TXP token size and all TXQs token size must be larger that MTU */
2317 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2318 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2321 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2323 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2326 for (txq = 0; txq < port->ntxqs; txq++) {
2327 val = mvpp2_read(port->priv,
2328 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2329 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2333 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2335 mvpp2_write(port->priv,
2336 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2342 /* Set the number of packets that will be received before Rx interrupt
2343 * will be generated by HW.
2345 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2346 struct mvpp2_rx_queue *rxq)
2348 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2350 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2351 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2353 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2354 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2360 /* For some reason in the LSP this is done on each CPU. Why ? */
2361 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2362 struct mvpp2_tx_queue *txq)
2364 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2367 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2368 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2370 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2371 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2372 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2377 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2379 u64 tmp = (u64)clk_hz * usec;
2381 do_div(tmp, USEC_PER_SEC);
2383 return tmp > U32_MAX ? U32_MAX : tmp;
2386 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2388 u64 tmp = (u64)cycles * USEC_PER_SEC;
2390 do_div(tmp, clk_hz);
2392 return tmp > U32_MAX ? U32_MAX : tmp;
2395 /* Set the time delay in usec before Rx interrupt */
2396 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2397 struct mvpp2_rx_queue *rxq)
2399 unsigned long freq = port->priv->tclk;
2400 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2402 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2404 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2406 /* re-evaluate to get actual register value */
2407 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2410 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2413 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2415 unsigned long freq = port->priv->tclk;
2416 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2418 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2419 port->tx_time_coal =
2420 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2422 /* re-evaluate to get actual register value */
2423 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2426 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2429 /* Free Tx queue skbuffs */
2430 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2431 struct mvpp2_tx_queue *txq,
2432 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2436 for (i = 0; i < num; i++) {
2437 struct mvpp2_txq_pcpu_buf *tx_buf =
2438 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2440 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2441 tx_buf->type != MVPP2_TYPE_XDP_TX)
2442 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2443 tx_buf->size, DMA_TO_DEVICE);
2444 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2445 dev_kfree_skb_any(tx_buf->skb);
2446 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2447 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2448 xdp_return_frame(tx_buf->xdpf);
2450 mvpp2_txq_inc_get(txq_pcpu);
2454 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2457 int queue = fls(cause) - 1;
2459 return port->rxqs[queue];
2462 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2465 int queue = fls(cause) - 1;
2467 return port->txqs[queue];
2470 /* Handle end of transmission */
2471 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2472 struct mvpp2_txq_pcpu *txq_pcpu)
2474 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2477 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2478 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2480 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2483 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2485 txq_pcpu->count -= tx_done;
2487 if (netif_tx_queue_stopped(nq))
2488 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2489 netif_tx_wake_queue(nq);
2492 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2493 unsigned int thread)
2495 struct mvpp2_tx_queue *txq;
2496 struct mvpp2_txq_pcpu *txq_pcpu;
2497 unsigned int tx_todo = 0;
2500 txq = mvpp2_get_tx_queue(port, cause);
2504 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2506 if (txq_pcpu->count) {
2507 mvpp2_txq_done(port, txq, txq_pcpu);
2508 tx_todo += txq_pcpu->count;
2511 cause &= ~(1 << txq->log_id);
2516 /* Rx/Tx queue initialization/cleanup methods */
2518 /* Allocate and initialize descriptors for aggr TXQ */
2519 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2520 struct mvpp2_tx_queue *aggr_txq,
2521 unsigned int thread, struct mvpp2 *priv)
2525 /* Allocate memory for TX descriptors */
2526 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2527 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2528 &aggr_txq->descs_dma, GFP_KERNEL);
2529 if (!aggr_txq->descs)
2532 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2534 /* Aggr TXQ no reset WA */
2535 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2536 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2538 /* Set Tx descriptors queue starting address indirect
2541 if (priv->hw_version == MVPP21)
2542 txq_dma = aggr_txq->descs_dma;
2544 txq_dma = aggr_txq->descs_dma >>
2545 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2547 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2548 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2549 MVPP2_AGGR_TXQ_SIZE);
2554 /* Create a specified Rx queue */
2555 static int mvpp2_rxq_init(struct mvpp2_port *port,
2556 struct mvpp2_rx_queue *rxq)
2558 struct mvpp2 *priv = port->priv;
2559 unsigned int thread;
2563 rxq->size = port->rx_ring_size;
2565 /* Allocate memory for RX descriptors */
2566 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2567 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2568 &rxq->descs_dma, GFP_KERNEL);
2572 rxq->last_desc = rxq->size - 1;
2574 /* Zero occupied and non-occupied counters - direct access */
2575 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2577 /* Set Rx descriptors queue starting address - indirect access */
2578 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2579 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2580 if (port->priv->hw_version == MVPP21)
2581 rxq_dma = rxq->descs_dma;
2583 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2584 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2585 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2586 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2590 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2592 /* Set coalescing pkts and time */
2593 mvpp2_rx_pkts_coal_set(port, rxq);
2594 mvpp2_rx_time_coal_set(port, rxq);
2596 /* Add number of descriptors ready for receiving packets */
2597 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2599 if (priv->percpu_pools) {
2600 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
2604 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
2606 goto err_unregister_rxq_short;
2608 /* Every RXQ has a pool for short and another for long packets */
2609 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2611 priv->page_pool[rxq->logic_rxq]);
2613 goto err_unregister_rxq_long;
2615 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2617 priv->page_pool[rxq->logic_rxq +
2620 goto err_unregister_mem_rxq_short;
2625 err_unregister_mem_rxq_short:
2626 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2627 err_unregister_rxq_long:
2628 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2629 err_unregister_rxq_short:
2630 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2632 dma_free_coherent(port->dev->dev.parent,
2633 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2634 rxq->descs, rxq->descs_dma);
2638 /* Push packets received by the RXQ to BM pool */
2639 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2640 struct mvpp2_rx_queue *rxq)
2644 rx_received = mvpp2_rxq_received(port, rxq->id);
2648 for (i = 0; i < rx_received; i++) {
2649 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2650 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2653 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2654 MVPP2_RXD_BM_POOL_ID_OFFS;
2656 mvpp2_bm_pool_put(port, pool,
2657 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2658 mvpp2_rxdesc_cookie_get(port, rx_desc));
2660 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2663 /* Cleanup Rx queue */
2664 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2665 struct mvpp2_rx_queue *rxq)
2667 unsigned int thread;
2669 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
2670 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2672 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
2673 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2675 mvpp2_rxq_drop_pkts(port, rxq);
2678 dma_free_coherent(port->dev->dev.parent,
2679 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2685 rxq->next_desc_to_proc = 0;
2688 /* Clear Rx descriptors queue starting address and size;
2689 * free descriptor number
2691 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2692 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2693 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2694 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2695 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2699 /* Create and initialize a Tx queue */
2700 static int mvpp2_txq_init(struct mvpp2_port *port,
2701 struct mvpp2_tx_queue *txq)
2704 unsigned int thread;
2705 int desc, desc_per_txq, tx_port_num;
2706 struct mvpp2_txq_pcpu *txq_pcpu;
2708 txq->size = port->tx_ring_size;
2710 /* Allocate memory for Tx descriptors */
2711 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2712 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2713 &txq->descs_dma, GFP_KERNEL);
2717 txq->last_desc = txq->size - 1;
2719 /* Set Tx descriptors queue starting address - indirect access */
2720 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2721 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2722 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2724 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2725 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2726 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2727 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2728 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2729 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2730 val &= ~MVPP2_TXQ_PENDING_MASK;
2731 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2733 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2734 * for each existing TXQ.
2735 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2736 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2739 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2740 (txq->log_id * desc_per_txq);
2742 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2743 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2744 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2747 /* WRR / EJP configuration - indirect access */
2748 tx_port_num = mvpp2_egress_port(port);
2749 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2751 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2752 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2753 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2754 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2755 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2757 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2758 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2761 for (thread = 0; thread < port->priv->nthreads; thread++) {
2762 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2763 txq_pcpu->size = txq->size;
2764 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2765 sizeof(*txq_pcpu->buffs),
2767 if (!txq_pcpu->buffs)
2770 txq_pcpu->count = 0;
2771 txq_pcpu->reserved_num = 0;
2772 txq_pcpu->txq_put_index = 0;
2773 txq_pcpu->txq_get_index = 0;
2774 txq_pcpu->tso_headers = NULL;
2776 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2777 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2779 txq_pcpu->tso_headers =
2780 dma_alloc_coherent(port->dev->dev.parent,
2781 txq_pcpu->size * TSO_HEADER_SIZE,
2782 &txq_pcpu->tso_headers_dma,
2784 if (!txq_pcpu->tso_headers)
2791 /* Free allocated TXQ resources */
2792 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2793 struct mvpp2_tx_queue *txq)
2795 struct mvpp2_txq_pcpu *txq_pcpu;
2796 unsigned int thread;
2798 for (thread = 0; thread < port->priv->nthreads; thread++) {
2799 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2800 kfree(txq_pcpu->buffs);
2802 if (txq_pcpu->tso_headers)
2803 dma_free_coherent(port->dev->dev.parent,
2804 txq_pcpu->size * TSO_HEADER_SIZE,
2805 txq_pcpu->tso_headers,
2806 txq_pcpu->tso_headers_dma);
2808 txq_pcpu->tso_headers = NULL;
2812 dma_free_coherent(port->dev->dev.parent,
2813 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2814 txq->descs, txq->descs_dma);
2818 txq->next_desc_to_proc = 0;
2821 /* Set minimum bandwidth for disabled TXQs */
2822 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2824 /* Set Tx descriptors queue starting address and size */
2825 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2826 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2827 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2828 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2832 /* Cleanup Tx ports */
2833 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2835 struct mvpp2_txq_pcpu *txq_pcpu;
2837 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2840 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2841 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2842 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2843 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2845 /* The napi queue has been stopped so wait for all packets
2846 * to be transmitted.
2850 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2851 netdev_warn(port->dev,
2852 "port %d: cleaning queue %d timed out\n",
2853 port->id, txq->log_id);
2859 pending = mvpp2_thread_read(port->priv, thread,
2860 MVPP2_TXQ_PENDING_REG);
2861 pending &= MVPP2_TXQ_PENDING_MASK;
2864 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2865 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2868 for (thread = 0; thread < port->priv->nthreads; thread++) {
2869 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2871 /* Release all packets */
2872 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2875 txq_pcpu->count = 0;
2876 txq_pcpu->txq_put_index = 0;
2877 txq_pcpu->txq_get_index = 0;
2881 /* Cleanup all Tx queues */
2882 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2884 struct mvpp2_tx_queue *txq;
2888 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2890 /* Reset Tx ports and delete Tx queues */
2891 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2892 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2894 for (queue = 0; queue < port->ntxqs; queue++) {
2895 txq = port->txqs[queue];
2896 mvpp2_txq_clean(port, txq);
2897 mvpp2_txq_deinit(port, txq);
2900 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2902 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2903 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2906 /* Cleanup all Rx queues */
2907 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2911 for (queue = 0; queue < port->nrxqs; queue++)
2912 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2915 /* Init all Rx queues for port */
2916 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2920 for (queue = 0; queue < port->nrxqs; queue++) {
2921 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2928 mvpp2_cleanup_rxqs(port);
2932 /* Init all tx queues for port */
2933 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2935 struct mvpp2_tx_queue *txq;
2938 for (queue = 0; queue < port->ntxqs; queue++) {
2939 txq = port->txqs[queue];
2940 err = mvpp2_txq_init(port, txq);
2944 /* Assign this queue to a CPU */
2945 if (queue < num_possible_cpus())
2946 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
2949 if (port->has_tx_irqs) {
2950 mvpp2_tx_time_coal_set(port);
2951 for (queue = 0; queue < port->ntxqs; queue++) {
2952 txq = port->txqs[queue];
2953 mvpp2_tx_pkts_coal_set(port, txq);
2957 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2961 mvpp2_cleanup_txqs(port);
2965 /* The callback for per-port interrupt */
2966 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2968 struct mvpp2_queue_vector *qv = dev_id;
2970 mvpp2_qvec_interrupt_disable(qv);
2972 napi_schedule(&qv->napi);
2977 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
2979 struct net_device *dev = port->dev;
2981 if (port->phylink) {
2982 phylink_mac_change(port->phylink, link);
2986 if (!netif_running(dev))
2990 mvpp2_interrupts_enable(port);
2992 mvpp2_egress_enable(port);
2993 mvpp2_ingress_enable(port);
2994 netif_carrier_on(dev);
2995 netif_tx_wake_all_queues(dev);
2997 netif_tx_stop_all_queues(dev);
2998 netif_carrier_off(dev);
2999 mvpp2_ingress_disable(port);
3000 mvpp2_egress_disable(port);
3002 mvpp2_interrupts_disable(port);
3006 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3011 val = readl(port->base + MVPP22_XLG_INT_STAT);
3012 if (val & MVPP22_XLG_INT_STAT_LINK) {
3013 val = readl(port->base + MVPP22_XLG_STATUS);
3014 if (val & MVPP22_XLG_STATUS_LINK_UP)
3016 mvpp2_isr_handle_link(port, link);
3020 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3025 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3026 phy_interface_mode_is_8023z(port->phy_interface) ||
3027 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3028 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3029 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3030 val = readl(port->base + MVPP2_GMAC_STATUS0);
3031 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
3033 mvpp2_isr_handle_link(port, link);
3038 /* Per-port interrupt for link status changes */
3039 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3041 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3044 mvpp22_gop_mask_irq(port);
3046 if (mvpp2_port_supports_xlg(port) &&
3047 mvpp2_is_xlg(port->phy_interface)) {
3048 /* Check the external status register */
3049 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3050 if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3051 mvpp2_isr_handle_xlg(port);
3053 /* If it's not the XLG, we must be using the GMAC.
3054 * Check the summary status.
3056 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3057 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3058 mvpp2_isr_handle_gmac_internal(port);
3061 mvpp22_gop_unmask_irq(port);
3065 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3067 struct net_device *dev;
3068 struct mvpp2_port *port;
3069 struct mvpp2_port_pcpu *port_pcpu;
3070 unsigned int tx_todo, cause;
3072 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3073 dev = port_pcpu->dev;
3075 if (!netif_running(dev))
3076 return HRTIMER_NORESTART;
3078 port_pcpu->timer_scheduled = false;
3079 port = netdev_priv(dev);
3081 /* Process all the Tx queues */
3082 cause = (1 << port->ntxqs) - 1;
3083 tx_todo = mvpp2_tx_done(port, cause,
3084 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3086 /* Set the timer in case not all the packets were processed */
3087 if (tx_todo && !port_pcpu->timer_scheduled) {
3088 port_pcpu->timer_scheduled = true;
3089 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3090 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3092 return HRTIMER_RESTART;
3094 return HRTIMER_NORESTART;
3097 /* Main RX/TX processing routines */
3099 /* Display more error info */
3100 static void mvpp2_rx_error(struct mvpp2_port *port,
3101 struct mvpp2_rx_desc *rx_desc)
3103 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3104 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3105 char *err_str = NULL;
3107 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3108 case MVPP2_RXD_ERR_CRC:
3111 case MVPP2_RXD_ERR_OVERRUN:
3112 err_str = "overrun";
3114 case MVPP2_RXD_ERR_RESOURCE:
3115 err_str = "resource";
3118 if (err_str && net_ratelimit())
3119 netdev_err(port->dev,
3120 "bad rx status %08x (%s error), size=%zu\n",
3121 status, err_str, sz);
3124 /* Handle RX checksum offload */
3125 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
3126 struct sk_buff *skb)
3128 if (((status & MVPP2_RXD_L3_IP4) &&
3129 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3130 (status & MVPP2_RXD_L3_IP6))
3131 if (((status & MVPP2_RXD_L4_UDP) ||
3132 (status & MVPP2_RXD_L4_TCP)) &&
3133 (status & MVPP2_RXD_L4_CSUM_OK)) {
3135 skb->ip_summed = CHECKSUM_UNNECESSARY;
3139 skb->ip_summed = CHECKSUM_NONE;
3142 /* Allocate a new skb and add it to BM pool */
3143 static int mvpp2_rx_refill(struct mvpp2_port *port,
3144 struct mvpp2_bm_pool *bm_pool,
3145 struct page_pool *page_pool, int pool)
3147 dma_addr_t dma_addr;
3148 phys_addr_t phys_addr;
3151 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3152 &dma_addr, &phys_addr, GFP_ATOMIC);
3156 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3161 /* Handle tx checksum */
3162 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3164 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3167 __be16 l3_proto = vlan_get_protocol(skb);
3169 if (l3_proto == htons(ETH_P_IP)) {
3170 struct iphdr *ip4h = ip_hdr(skb);
3172 /* Calculate IPv4 checksum and L4 checksum */
3173 ip_hdr_len = ip4h->ihl;
3174 l4_proto = ip4h->protocol;
3175 } else if (l3_proto == htons(ETH_P_IPV6)) {
3176 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3178 /* Read l4_protocol from one of IPv6 extra headers */
3179 if (skb_network_header_len(skb) > 0)
3180 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3181 l4_proto = ip6h->nexthdr;
3183 return MVPP2_TXD_L4_CSUM_NOT;
3186 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3187 l3_proto, ip_hdr_len, l4_proto);
3190 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3193 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3195 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3196 struct mvpp2_tx_queue *aggr_txq;
3197 struct mvpp2_txq_pcpu *txq_pcpu;
3198 struct mvpp2_tx_queue *txq;
3199 struct netdev_queue *nq;
3201 txq = port->txqs[txq_id];
3202 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3203 nq = netdev_get_tx_queue(port->dev, txq_id);
3204 aggr_txq = &port->priv->aggr_txqs[thread];
3206 txq_pcpu->reserved_num -= nxmit;
3207 txq_pcpu->count += nxmit;
3208 aggr_txq->count += nxmit;
3210 /* Enable transmit */
3212 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3214 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3215 netif_tx_stop_queue(nq);
3217 /* Finalize TX processing */
3218 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3219 mvpp2_txq_done(port, txq, txq_pcpu);
3223 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3224 struct xdp_frame *xdpf, bool dma_map)
3226 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3227 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3228 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3229 enum mvpp2_tx_buf_type buf_type;
3230 struct mvpp2_txq_pcpu *txq_pcpu;
3231 struct mvpp2_tx_queue *aggr_txq;
3232 struct mvpp2_tx_desc *tx_desc;
3233 struct mvpp2_tx_queue *txq;
3234 int ret = MVPP2_XDP_TX;
3235 dma_addr_t dma_addr;
3237 txq = port->txqs[txq_id];
3238 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3239 aggr_txq = &port->priv->aggr_txqs[thread];
3241 /* Check number of available descriptors */
3242 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3243 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3244 ret = MVPP2_XDP_DROPPED;
3248 /* Get a descriptor for the first part of the packet */
3249 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3250 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3251 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3254 /* XDP_REDIRECT or AF_XDP */
3255 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3256 xdpf->len, DMA_TO_DEVICE);
3258 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3259 mvpp2_txq_desc_put(txq);
3260 ret = MVPP2_XDP_DROPPED;
3264 buf_type = MVPP2_TYPE_XDP_NDO;
3267 struct page *page = virt_to_page(xdpf->data);
3269 dma_addr = page_pool_get_dma_addr(page) +
3270 sizeof(*xdpf) + xdpf->headroom;
3271 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3272 xdpf->len, DMA_BIDIRECTIONAL);
3274 buf_type = MVPP2_TYPE_XDP_TX;
3277 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3279 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3280 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3287 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3289 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3290 struct xdp_frame *xdpf;
3294 xdpf = xdp_convert_buff_to_frame(xdp);
3295 if (unlikely(!xdpf))
3296 return MVPP2_XDP_DROPPED;
3298 /* The first of the TX queues are used for XPS,
3299 * the second half for XDP_TX
3301 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3303 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3304 if (ret == MVPP2_XDP_TX) {
3305 u64_stats_update_begin(&stats->syncp);
3306 stats->tx_bytes += xdpf->len;
3307 stats->tx_packets++;
3309 u64_stats_update_end(&stats->syncp);
3311 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3313 u64_stats_update_begin(&stats->syncp);
3314 stats->xdp_tx_err++;
3315 u64_stats_update_end(&stats->syncp);
3322 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3323 struct xdp_frame **frames, u32 flags)
3325 struct mvpp2_port *port = netdev_priv(dev);
3326 int i, nxmit_byte = 0, nxmit = num_frame;
3327 struct mvpp2_pcpu_stats *stats;
3331 if (unlikely(test_bit(0, &port->state)))
3334 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3337 /* The first of the TX queues are used for XPS,
3338 * the second half for XDP_TX
3340 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3342 for (i = 0; i < num_frame; i++) {
3343 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3344 if (ret == MVPP2_XDP_TX) {
3345 nxmit_byte += frames[i]->len;
3347 xdp_return_frame_rx_napi(frames[i]);
3352 if (likely(nxmit > 0))
3353 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3355 stats = this_cpu_ptr(port->stats);
3356 u64_stats_update_begin(&stats->syncp);
3357 stats->tx_bytes += nxmit_byte;
3358 stats->tx_packets += nxmit;
3359 stats->xdp_xmit += nxmit;
3360 stats->xdp_xmit_err += num_frame - nxmit;
3361 u64_stats_update_end(&stats->syncp);
3367 mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3368 struct bpf_prog *prog, struct xdp_buff *xdp,
3369 struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
3371 unsigned int len, sync, err;
3375 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3376 act = bpf_prog_run_xdp(prog, xdp);
3378 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3379 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3380 sync = max(sync, len);
3385 ret = MVPP2_XDP_PASS;
3388 err = xdp_do_redirect(port->dev, xdp, prog);
3389 if (unlikely(err)) {
3390 ret = MVPP2_XDP_DROPPED;
3391 page = virt_to_head_page(xdp->data);
3392 page_pool_put_page(pp, page, sync, true);
3394 ret = MVPP2_XDP_REDIR;
3395 stats->xdp_redirect++;
3399 ret = mvpp2_xdp_xmit_back(port, xdp);
3400 if (ret != MVPP2_XDP_TX) {
3401 page = virt_to_head_page(xdp->data);
3402 page_pool_put_page(pp, page, sync, true);
3406 bpf_warn_invalid_xdp_action(act);
3409 trace_xdp_exception(port->dev, prog, act);
3412 page = virt_to_head_page(xdp->data);
3413 page_pool_put_page(pp, page, sync, true);
3414 ret = MVPP2_XDP_DROPPED;
3422 /* Main rx processing */
3423 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3424 int rx_todo, struct mvpp2_rx_queue *rxq)
3426 struct net_device *dev = port->dev;
3427 struct mvpp2_pcpu_stats ps = {};
3428 enum dma_data_direction dma_dir;
3429 struct bpf_prog *xdp_prog;
3430 struct xdp_buff xdp;
3437 xdp_prog = READ_ONCE(port->xdp_prog);
3439 /* Get number of received packets and clamp the to-do */
3440 rx_received = mvpp2_rxq_received(port, rxq->id);
3441 if (rx_todo > rx_received)
3442 rx_todo = rx_received;
3444 while (rx_done < rx_todo) {
3445 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3446 struct mvpp2_bm_pool *bm_pool;
3447 struct page_pool *pp = NULL;
3448 struct sk_buff *skb;
3449 unsigned int frag_size;
3450 dma_addr_t dma_addr;
3451 phys_addr_t phys_addr;
3453 int pool, rx_bytes, err, ret;
3457 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3458 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3459 rx_bytes -= MVPP2_MH_SIZE;
3460 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3461 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3462 data = (void *)phys_to_virt(phys_addr);
3464 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3465 MVPP2_RXD_BM_POOL_ID_OFFS;
3466 bm_pool = &port->priv->bm_pools[pool];
3468 /* In case of an error, release the requested buffer pointer
3469 * to the Buffer Manager. This request process is controlled
3470 * by the hardware, and the information about the buffer is
3471 * comprised by the RX descriptor.
3473 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3474 goto err_drop_frame;
3476 if (port->priv->percpu_pools) {
3477 pp = port->priv->page_pool[pool];
3478 dma_dir = page_pool_get_dma_dir(pp);
3480 dma_dir = DMA_FROM_DEVICE;
3483 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3484 rx_bytes + MVPP2_MH_SIZE,
3487 /* Prefetch header */
3490 if (bm_pool->frag_size > PAGE_SIZE)
3493 frag_size = bm_pool->frag_size;
3496 xdp.data_hard_start = data;
3497 xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
3498 xdp.data_end = xdp.data + rx_bytes;
3499 xdp.frame_sz = PAGE_SIZE;
3501 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3502 xdp.rxq = &rxq->xdp_rxq_short;
3504 xdp.rxq = &rxq->xdp_rxq_long;
3506 xdp_set_data_meta_invalid(&xdp);
3508 ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
3512 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3514 netdev_err(port->dev, "failed to refill BM pools\n");
3515 goto err_drop_frame;
3519 ps.rx_bytes += rx_bytes;
3524 skb = build_skb(data, frag_size);
3526 netdev_warn(port->dev, "skb build failed\n");
3527 goto err_drop_frame;
3530 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3532 netdev_err(port->dev, "failed to refill BM pools\n");
3533 dev_kfree_skb_any(skb);
3534 goto err_drop_frame;
3538 page_pool_release_page(pp, virt_to_page(data));
3540 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
3541 bm_pool->buf_size, DMA_FROM_DEVICE,
3542 DMA_ATTR_SKIP_CPU_SYNC);
3545 ps.rx_bytes += rx_bytes;
3547 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3548 skb_put(skb, rx_bytes);
3549 skb->protocol = eth_type_trans(skb, dev);
3550 mvpp2_rx_csum(port, rx_status, skb);
3552 napi_gro_receive(napi, skb);
3556 dev->stats.rx_errors++;
3557 mvpp2_rx_error(port, rx_desc);
3558 /* Return the buffer to the pool */
3559 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3564 if (xdp_ret & MVPP2_XDP_REDIR)
3567 if (ps.rx_packets) {
3568 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3570 u64_stats_update_begin(&stats->syncp);
3571 stats->rx_packets += ps.rx_packets;
3572 stats->rx_bytes += ps.rx_bytes;
3574 stats->xdp_redirect += ps.xdp_redirect;
3575 stats->xdp_pass += ps.xdp_pass;
3576 stats->xdp_drop += ps.xdp_drop;
3577 u64_stats_update_end(&stats->syncp);
3580 /* Update Rx queue management counters */
3582 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3588 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3589 struct mvpp2_tx_desc *desc)
3591 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3592 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3594 dma_addr_t buf_dma_addr =
3595 mvpp2_txdesc_dma_addr_get(port, desc);
3597 mvpp2_txdesc_size_get(port, desc);
3598 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3599 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3600 buf_sz, DMA_TO_DEVICE);
3601 mvpp2_txq_desc_put(txq);
3604 /* Handle tx fragmentation processing */
3605 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3606 struct mvpp2_tx_queue *aggr_txq,
3607 struct mvpp2_tx_queue *txq)
3609 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3610 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3611 struct mvpp2_tx_desc *tx_desc;
3613 dma_addr_t buf_dma_addr;
3615 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3616 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3617 void *addr = skb_frag_address(frag);
3619 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3620 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3621 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3623 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3624 skb_frag_size(frag),
3626 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3627 mvpp2_txq_desc_put(txq);
3631 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3633 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3634 /* Last descriptor */
3635 mvpp2_txdesc_cmd_set(port, tx_desc,
3637 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3639 /* Descriptor in the middle: Not First, Not Last */
3640 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3641 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3647 /* Release all descriptors that were used to map fragments of
3648 * this packet, as well as the corresponding DMA mappings
3650 for (i = i - 1; i >= 0; i--) {
3651 tx_desc = txq->descs + i;
3652 tx_desc_unmap_put(port, txq, tx_desc);
3658 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3659 struct net_device *dev,
3660 struct mvpp2_tx_queue *txq,
3661 struct mvpp2_tx_queue *aggr_txq,
3662 struct mvpp2_txq_pcpu *txq_pcpu,
3665 struct mvpp2_port *port = netdev_priv(dev);
3666 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3669 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3670 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3672 addr = txq_pcpu->tso_headers_dma +
3673 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3674 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3676 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3678 MVPP2_TXD_PADDING_DISABLE);
3679 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3682 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3683 struct net_device *dev, struct tso_t *tso,
3684 struct mvpp2_tx_queue *txq,
3685 struct mvpp2_tx_queue *aggr_txq,
3686 struct mvpp2_txq_pcpu *txq_pcpu,
3687 int sz, bool left, bool last)
3689 struct mvpp2_port *port = netdev_priv(dev);
3690 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3691 dma_addr_t buf_dma_addr;
3693 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3694 mvpp2_txdesc_size_set(port, tx_desc, sz);
3696 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3698 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3699 mvpp2_txq_desc_put(txq);
3703 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3706 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3708 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3712 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3715 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3719 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3720 struct mvpp2_tx_queue *txq,
3721 struct mvpp2_tx_queue *aggr_txq,
3722 struct mvpp2_txq_pcpu *txq_pcpu)
3724 struct mvpp2_port *port = netdev_priv(dev);
3725 int hdr_sz, i, len, descs = 0;
3728 /* Check number of available descriptors */
3729 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3730 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3731 tso_count_descs(skb)))
3734 hdr_sz = tso_start(skb, &tso);
3736 len = skb->len - hdr_sz;
3738 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3739 char *hdr = txq_pcpu->tso_headers +
3740 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3745 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3746 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3749 int sz = min_t(int, tso.size, left);
3753 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3754 txq_pcpu, sz, left, len == 0))
3756 tso_build_data(skb, &tso, sz);
3763 for (i = descs - 1; i >= 0; i--) {
3764 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3765 tx_desc_unmap_put(port, txq, tx_desc);
3770 /* Main tx processing */
3771 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3773 struct mvpp2_port *port = netdev_priv(dev);
3774 struct mvpp2_tx_queue *txq, *aggr_txq;
3775 struct mvpp2_txq_pcpu *txq_pcpu;
3776 struct mvpp2_tx_desc *tx_desc;
3777 dma_addr_t buf_dma_addr;
3778 unsigned long flags = 0;
3779 unsigned int thread;
3784 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3786 txq_id = skb_get_queue_mapping(skb);
3787 txq = port->txqs[txq_id];
3788 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3789 aggr_txq = &port->priv->aggr_txqs[thread];
3791 if (test_bit(thread, &port->priv->lock_map))
3792 spin_lock_irqsave(&port->tx_lock[thread], flags);
3794 if (skb_is_gso(skb)) {
3795 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3798 frags = skb_shinfo(skb)->nr_frags + 1;
3800 /* Check number of available descriptors */
3801 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3802 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3807 /* Get a descriptor for the first part of the packet */
3808 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3809 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3810 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3812 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3813 skb_headlen(skb), DMA_TO_DEVICE);
3814 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3815 mvpp2_txq_desc_put(txq);
3820 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3822 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3825 /* First and Last descriptor */
3826 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3827 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3828 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3830 /* First but not Last */
3831 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
3832 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3833 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3835 /* Continue with other skb fragments */
3836 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
3837 tx_desc_unmap_put(port, txq, tx_desc);
3844 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
3845 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
3847 txq_pcpu->reserved_num -= frags;
3848 txq_pcpu->count += frags;
3849 aggr_txq->count += frags;
3851 /* Enable transmit */
3853 mvpp2_aggr_txq_pend_desc_add(port, frags);
3855 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3856 netif_tx_stop_queue(nq);
3858 u64_stats_update_begin(&stats->syncp);
3859 stats->tx_packets++;
3860 stats->tx_bytes += skb->len;
3861 u64_stats_update_end(&stats->syncp);
3863 dev->stats.tx_dropped++;
3864 dev_kfree_skb_any(skb);
3867 /* Finalize TX processing */
3868 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3869 mvpp2_txq_done(port, txq, txq_pcpu);
3871 /* Set the timer in case not all frags were processed */
3872 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3873 txq_pcpu->count > 0) {
3874 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
3876 if (!port_pcpu->timer_scheduled) {
3877 port_pcpu->timer_scheduled = true;
3878 hrtimer_start(&port_pcpu->tx_done_timer,
3879 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
3880 HRTIMER_MODE_REL_PINNED_SOFT);
3884 if (test_bit(thread, &port->priv->lock_map))
3885 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
3887 return NETDEV_TX_OK;
3890 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3892 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3893 netdev_err(dev, "FCS error\n");
3894 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3895 netdev_err(dev, "rx fifo overrun error\n");
3896 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3897 netdev_err(dev, "tx fifo underrun error\n");
3900 static int mvpp2_poll(struct napi_struct *napi, int budget)
3902 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3904 struct mvpp2_port *port = netdev_priv(napi->dev);
3905 struct mvpp2_queue_vector *qv;
3906 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3908 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3910 /* Rx/Tx cause register
3912 * Bits 0-15: each bit indicates received packets on the Rx queue
3913 * (bit 0 is for Rx queue 0).
3915 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3916 * (bit 16 is for Tx queue 0).
3918 * Each CPU has its own Rx/Tx cause register
3920 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
3921 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3923 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3925 mvpp2_cause_error(port->dev, cause_misc);
3927 /* Clear the cause register */
3928 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3929 mvpp2_thread_write(port->priv, thread,
3930 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3931 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3934 if (port->has_tx_irqs) {
3935 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3937 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3938 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3942 /* Process RX packets */
3943 cause_rx = cause_rx_tx &
3944 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3945 cause_rx <<= qv->first_rxq;
3946 cause_rx |= qv->pending_cause_rx;
3947 while (cause_rx && budget > 0) {
3949 struct mvpp2_rx_queue *rxq;
3951 rxq = mvpp2_get_rx_queue(port, cause_rx);
3955 count = mvpp2_rx(port, napi, budget, rxq);
3959 /* Clear the bit associated to this Rx queue
3960 * so that next iteration will continue from
3961 * the next Rx queue.
3963 cause_rx &= ~(1 << rxq->logic_rxq);
3969 napi_complete_done(napi, rx_done);
3971 mvpp2_qvec_interrupt_enable(qv);
3973 qv->pending_cause_rx = cause_rx;
3977 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3981 /* Set the GMAC & XLG MAC in reset */
3982 mvpp2_mac_reset_assert(port);
3984 /* Set the MPCS and XPCS in reset */
3985 mvpp22_pcs_reset_assert(port);
3987 /* comphy reconfiguration */
3988 mvpp22_comphy_init(port);
3990 /* gop reconfiguration */
3991 mvpp22_gop_init(port);
3993 mvpp22_pcs_reset_deassert(port);
3995 if (mvpp2_port_supports_xlg(port)) {
3996 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3997 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3999 if (mvpp2_is_xlg(port->phy_interface))
4000 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4002 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4004 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4007 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
4008 mvpp2_xlg_max_rx_size_set(port);
4010 mvpp2_gmac_max_rx_size_set(port);
4013 /* Set hw internals when starting port */
4014 static void mvpp2_start_dev(struct mvpp2_port *port)
4018 mvpp2_txp_max_tx_size_set(port);
4020 for (i = 0; i < port->nqvecs; i++)
4021 napi_enable(&port->qvecs[i].napi);
4023 /* Enable interrupts on all threads */
4024 mvpp2_interrupts_enable(port);
4026 if (port->priv->hw_version == MVPP22)
4027 mvpp22_mode_reconfigure(port);
4029 if (port->phylink) {
4030 phylink_start(port->phylink);
4032 mvpp2_acpi_start(port);
4035 netif_tx_start_all_queues(port->dev);
4037 clear_bit(0, &port->state);
4040 /* Set hw internals when stopping port */
4041 static void mvpp2_stop_dev(struct mvpp2_port *port)
4045 set_bit(0, &port->state);
4047 /* Disable interrupts on all threads */
4048 mvpp2_interrupts_disable(port);
4050 for (i = 0; i < port->nqvecs; i++)
4051 napi_disable(&port->qvecs[i].napi);
4054 phylink_stop(port->phylink);
4055 phy_power_off(port->comphy);
4058 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4059 struct ethtool_ringparam *ring)
4061 u16 new_rx_pending = ring->rx_pending;
4062 u16 new_tx_pending = ring->tx_pending;
4064 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4067 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4068 new_rx_pending = MVPP2_MAX_RXD_MAX;
4069 else if (!IS_ALIGNED(ring->rx_pending, 16))
4070 new_rx_pending = ALIGN(ring->rx_pending, 16);
4072 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4073 new_tx_pending = MVPP2_MAX_TXD_MAX;
4074 else if (!IS_ALIGNED(ring->tx_pending, 32))
4075 new_tx_pending = ALIGN(ring->tx_pending, 32);
4077 /* The Tx ring size cannot be smaller than the minimum number of
4078 * descriptors needed for TSO.
4080 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4081 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4083 if (ring->rx_pending != new_rx_pending) {
4084 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4085 ring->rx_pending, new_rx_pending);
4086 ring->rx_pending = new_rx_pending;
4089 if (ring->tx_pending != new_tx_pending) {
4090 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4091 ring->tx_pending, new_tx_pending);
4092 ring->tx_pending = new_tx_pending;
4098 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4100 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4102 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4103 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4104 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4105 addr[0] = (mac_addr_h >> 24) & 0xFF;
4106 addr[1] = (mac_addr_h >> 16) & 0xFF;
4107 addr[2] = (mac_addr_h >> 8) & 0xFF;
4108 addr[3] = mac_addr_h & 0xFF;
4109 addr[4] = mac_addr_m & 0xFF;
4110 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4113 static int mvpp2_irqs_init(struct mvpp2_port *port)
4117 for (i = 0; i < port->nqvecs; i++) {
4118 struct mvpp2_queue_vector *qv = port->qvecs + i;
4120 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4121 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4127 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4130 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4134 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4137 for_each_present_cpu(cpu) {
4138 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4140 cpumask_set_cpu(cpu, qv->mask);
4143 irq_set_affinity_hint(qv->irq, qv->mask);
4149 for (i = 0; i < port->nqvecs; i++) {
4150 struct mvpp2_queue_vector *qv = port->qvecs + i;
4152 irq_set_affinity_hint(qv->irq, NULL);
4155 free_irq(qv->irq, qv);
4161 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4165 for (i = 0; i < port->nqvecs; i++) {
4166 struct mvpp2_queue_vector *qv = port->qvecs + i;
4168 irq_set_affinity_hint(qv->irq, NULL);
4171 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4172 free_irq(qv->irq, qv);
4176 static bool mvpp22_rss_is_supported(void)
4178 return queue_mode == MVPP2_QDIST_MULTI_MODE;
4181 static int mvpp2_open(struct net_device *dev)
4183 struct mvpp2_port *port = netdev_priv(dev);
4184 struct mvpp2 *priv = port->priv;
4185 unsigned char mac_bcast[ETH_ALEN] = {
4186 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4190 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4192 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4195 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4197 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4200 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4202 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4205 err = mvpp2_prs_def_flow(port);
4207 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4211 /* Allocate the Rx/Tx queues */
4212 err = mvpp2_setup_rxqs(port);
4214 netdev_err(port->dev, "cannot allocate Rx queues\n");
4218 err = mvpp2_setup_txqs(port);
4220 netdev_err(port->dev, "cannot allocate Tx queues\n");
4221 goto err_cleanup_rxqs;
4224 err = mvpp2_irqs_init(port);
4226 netdev_err(port->dev, "cannot init IRQs\n");
4227 goto err_cleanup_txqs;
4230 /* Phylink isn't supported yet in ACPI mode */
4231 if (port->of_node) {
4232 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
4234 netdev_err(port->dev, "could not attach PHY (%d)\n",
4242 if (priv->hw_version == MVPP22 && port->port_irq) {
4243 err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4246 netdev_err(port->dev,
4247 "cannot request port link/ptp IRQ %d\n",
4252 mvpp22_gop_setup_irq(port);
4254 /* In default link is down */
4255 netif_carrier_off(port->dev);
4263 netdev_err(port->dev,
4264 "invalid configuration: no dt or link IRQ");
4268 /* Unmask interrupts on all CPUs */
4269 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4270 mvpp2_shared_interrupt_mask_unmask(port, false);
4272 mvpp2_start_dev(port);
4274 /* Start hardware statistics gathering */
4275 queue_delayed_work(priv->stats_queue, &port->stats_work,
4276 MVPP2_MIB_COUNTERS_STATS_DELAY);
4281 mvpp2_irqs_deinit(port);
4283 mvpp2_cleanup_txqs(port);
4285 mvpp2_cleanup_rxqs(port);
4289 static int mvpp2_stop(struct net_device *dev)
4291 struct mvpp2_port *port = netdev_priv(dev);
4292 struct mvpp2_port_pcpu *port_pcpu;
4293 unsigned int thread;
4295 mvpp2_stop_dev(port);
4297 /* Mask interrupts on all threads */
4298 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4299 mvpp2_shared_interrupt_mask_unmask(port, true);
4302 phylink_disconnect_phy(port->phylink);
4304 free_irq(port->port_irq, port);
4306 mvpp2_irqs_deinit(port);
4307 if (!port->has_tx_irqs) {
4308 for (thread = 0; thread < port->priv->nthreads; thread++) {
4309 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4311 hrtimer_cancel(&port_pcpu->tx_done_timer);
4312 port_pcpu->timer_scheduled = false;
4315 mvpp2_cleanup_rxqs(port);
4316 mvpp2_cleanup_txqs(port);
4318 cancel_delayed_work_sync(&port->stats_work);
4320 mvpp2_mac_reset_assert(port);
4321 mvpp22_pcs_reset_assert(port);
4326 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4327 struct netdev_hw_addr_list *list)
4329 struct netdev_hw_addr *ha;
4332 netdev_hw_addr_list_for_each(ha, list) {
4333 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4341 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4343 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4344 mvpp2_prs_vid_enable_filtering(port);
4346 mvpp2_prs_vid_disable_filtering(port);
4348 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4349 MVPP2_PRS_L2_UNI_CAST, enable);
4351 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4352 MVPP2_PRS_L2_MULTI_CAST, enable);
4355 static void mvpp2_set_rx_mode(struct net_device *dev)
4357 struct mvpp2_port *port = netdev_priv(dev);
4359 /* Clear the whole UC and MC list */
4360 mvpp2_prs_mac_del_all(port);
4362 if (dev->flags & IFF_PROMISC) {
4363 mvpp2_set_rx_promisc(port, true);
4367 mvpp2_set_rx_promisc(port, false);
4369 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4370 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4371 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4372 MVPP2_PRS_L2_UNI_CAST, true);
4374 if (dev->flags & IFF_ALLMULTI) {
4375 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4376 MVPP2_PRS_L2_MULTI_CAST, true);
4380 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4381 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4382 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4383 MVPP2_PRS_L2_MULTI_CAST, true);
4386 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4388 const struct sockaddr *addr = p;
4391 if (!is_valid_ether_addr(addr->sa_data))
4392 return -EADDRNOTAVAIL;
4394 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4396 /* Reconfigure parser accept the original MAC address */
4397 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4398 netdev_err(dev, "failed to change MAC address\n");
4403 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4404 * then bring up again all ports.
4406 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4408 int numbufs = MVPP2_BM_POOLS_NUM, i;
4409 struct mvpp2_port *port = NULL;
4410 bool status[MVPP2_MAX_PORTS];
4412 for (i = 0; i < priv->port_count; i++) {
4413 port = priv->port_list[i];
4414 status[i] = netif_running(port->dev);
4416 mvpp2_stop(port->dev);
4419 /* nrxqs is the same for all ports */
4420 if (priv->percpu_pools)
4421 numbufs = port->nrxqs * 2;
4423 for (i = 0; i < numbufs; i++)
4424 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4426 devm_kfree(port->dev->dev.parent, priv->bm_pools);
4427 priv->percpu_pools = percpu;
4428 mvpp2_bm_init(port->dev->dev.parent, priv);
4430 for (i = 0; i < priv->port_count; i++) {
4431 port = priv->port_list[i];
4432 mvpp2_swf_bm_pool_init(port);
4434 mvpp2_open(port->dev);
4440 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
4442 struct mvpp2_port *port = netdev_priv(dev);
4443 bool running = netif_running(dev);
4444 struct mvpp2 *priv = port->priv;
4447 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
4448 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
4449 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
4450 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
4453 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
4454 if (port->xdp_prog) {
4455 netdev_err(dev, "Jumbo frames are not supported with XDP\n");
4458 if (priv->percpu_pools) {
4459 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
4460 mvpp2_bm_switch_buffers(priv, false);
4466 for (i = 0; i < priv->port_count; i++)
4467 if (priv->port_list[i] != port &&
4468 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
4469 MVPP2_BM_LONG_PKT_SIZE) {
4474 /* No port is using jumbo frames */
4476 dev_info(port->dev->dev.parent,
4477 "all ports have a low MTU, switching to per-cpu buffers");
4478 mvpp2_bm_switch_buffers(priv, true);
4483 mvpp2_stop_dev(port);
4485 err = mvpp2_bm_update_mtu(dev, mtu);
4487 netdev_err(dev, "failed to change MTU\n");
4488 /* Reconfigure BM to the original MTU */
4489 mvpp2_bm_update_mtu(dev, dev->mtu);
4491 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4495 mvpp2_start_dev(port);
4496 mvpp2_egress_enable(port);
4497 mvpp2_ingress_enable(port);
4503 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
4505 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
4506 struct mvpp2 *priv = port->priv;
4509 if (!priv->percpu_pools)
4512 if (!priv->page_pool[0])
4515 for (i = 0; i < priv->port_count; i++) {
4516 port = priv->port_list[i];
4517 if (port->xdp_prog) {
4518 dma_dir = DMA_BIDIRECTIONAL;
4523 /* All pools are equal in terms of DMA direction */
4524 if (priv->page_pool[0]->p.dma_dir != dma_dir)
4525 err = mvpp2_bm_switch_buffers(priv, true);
4531 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4533 struct mvpp2_port *port = netdev_priv(dev);
4537 for_each_possible_cpu(cpu) {
4538 struct mvpp2_pcpu_stats *cpu_stats;
4544 cpu_stats = per_cpu_ptr(port->stats, cpu);
4546 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4547 rx_packets = cpu_stats->rx_packets;
4548 rx_bytes = cpu_stats->rx_bytes;
4549 tx_packets = cpu_stats->tx_packets;
4550 tx_bytes = cpu_stats->tx_bytes;
4551 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4553 stats->rx_packets += rx_packets;
4554 stats->rx_bytes += rx_bytes;
4555 stats->tx_packets += tx_packets;
4556 stats->tx_bytes += tx_bytes;
4559 stats->rx_errors = dev->stats.rx_errors;
4560 stats->rx_dropped = dev->stats.rx_dropped;
4561 stats->tx_dropped = dev->stats.tx_dropped;
4564 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4566 struct mvpp2_port *port = netdev_priv(dev);
4571 return phylink_mii_ioctl(port->phylink, ifr, cmd);
4574 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4576 struct mvpp2_port *port = netdev_priv(dev);
4579 ret = mvpp2_prs_vid_entry_add(port, vid);
4581 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
4582 MVPP2_PRS_VLAN_FILT_MAX - 1);
4586 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4588 struct mvpp2_port *port = netdev_priv(dev);
4590 mvpp2_prs_vid_entry_remove(port, vid);
4594 static int mvpp2_set_features(struct net_device *dev,
4595 netdev_features_t features)
4597 netdev_features_t changed = dev->features ^ features;
4598 struct mvpp2_port *port = netdev_priv(dev);
4600 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4601 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4602 mvpp2_prs_vid_enable_filtering(port);
4604 /* Invalidate all registered VID filters for this
4607 mvpp2_prs_vid_remove_all(port);
4609 mvpp2_prs_vid_disable_filtering(port);
4613 if (changed & NETIF_F_RXHASH) {
4614 if (features & NETIF_F_RXHASH)
4615 mvpp22_port_rss_enable(port);
4617 mvpp22_port_rss_disable(port);
4623 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
4625 struct bpf_prog *prog = bpf->prog, *old_prog;
4626 bool running = netif_running(port->dev);
4627 bool reset = !prog != !port->xdp_prog;
4629 if (port->dev->mtu > ETH_DATA_LEN) {
4630 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
4634 if (!port->priv->percpu_pools) {
4635 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
4639 if (port->ntxqs < num_possible_cpus() * 2) {
4640 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
4644 /* device is up and bpf is added/removed, must setup the RX queues */
4645 if (running && reset)
4646 mvpp2_stop(port->dev);
4648 old_prog = xchg(&port->xdp_prog, prog);
4650 bpf_prog_put(old_prog);
4652 /* bpf is just replaced, RXQ and MTU are already setup */
4656 /* device was up, restore the link */
4658 mvpp2_open(port->dev);
4660 /* Check Page Pool DMA Direction */
4661 mvpp2_check_pagepool_dma(port);
4666 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4668 struct mvpp2_port *port = netdev_priv(dev);
4670 switch (xdp->command) {
4671 case XDP_SETUP_PROG:
4672 return mvpp2_xdp_setup(port, xdp);
4678 /* Ethtool methods */
4680 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4682 struct mvpp2_port *port = netdev_priv(dev);
4687 return phylink_ethtool_nway_reset(port->phylink);
4690 /* Set interrupt coalescing for ethtools */
4691 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4692 struct ethtool_coalesce *c)
4694 struct mvpp2_port *port = netdev_priv(dev);
4697 for (queue = 0; queue < port->nrxqs; queue++) {
4698 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4700 rxq->time_coal = c->rx_coalesce_usecs;
4701 rxq->pkts_coal = c->rx_max_coalesced_frames;
4702 mvpp2_rx_pkts_coal_set(port, rxq);
4703 mvpp2_rx_time_coal_set(port, rxq);
4706 if (port->has_tx_irqs) {
4707 port->tx_time_coal = c->tx_coalesce_usecs;
4708 mvpp2_tx_time_coal_set(port);
4711 for (queue = 0; queue < port->ntxqs; queue++) {
4712 struct mvpp2_tx_queue *txq = port->txqs[queue];
4714 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4716 if (port->has_tx_irqs)
4717 mvpp2_tx_pkts_coal_set(port, txq);
4723 /* get coalescing for ethtools */
4724 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
4725 struct ethtool_coalesce *c)
4727 struct mvpp2_port *port = netdev_priv(dev);
4729 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
4730 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
4731 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
4732 c->tx_coalesce_usecs = port->tx_time_coal;
4736 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
4737 struct ethtool_drvinfo *drvinfo)
4739 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
4740 sizeof(drvinfo->driver));
4741 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
4742 sizeof(drvinfo->version));
4743 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4744 sizeof(drvinfo->bus_info));
4747 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
4748 struct ethtool_ringparam *ring)
4750 struct mvpp2_port *port = netdev_priv(dev);
4752 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
4753 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
4754 ring->rx_pending = port->rx_ring_size;
4755 ring->tx_pending = port->tx_ring_size;
4758 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
4759 struct ethtool_ringparam *ring)
4761 struct mvpp2_port *port = netdev_priv(dev);
4762 u16 prev_rx_ring_size = port->rx_ring_size;
4763 u16 prev_tx_ring_size = port->tx_ring_size;
4766 err = mvpp2_check_ringparam_valid(dev, ring);
4770 if (!netif_running(dev)) {
4771 port->rx_ring_size = ring->rx_pending;
4772 port->tx_ring_size = ring->tx_pending;
4776 /* The interface is running, so we have to force a
4777 * reallocation of the queues
4779 mvpp2_stop_dev(port);
4780 mvpp2_cleanup_rxqs(port);
4781 mvpp2_cleanup_txqs(port);
4783 port->rx_ring_size = ring->rx_pending;
4784 port->tx_ring_size = ring->tx_pending;
4786 err = mvpp2_setup_rxqs(port);
4788 /* Reallocate Rx queues with the original ring size */
4789 port->rx_ring_size = prev_rx_ring_size;
4790 ring->rx_pending = prev_rx_ring_size;
4791 err = mvpp2_setup_rxqs(port);
4795 err = mvpp2_setup_txqs(port);
4797 /* Reallocate Tx queues with the original ring size */
4798 port->tx_ring_size = prev_tx_ring_size;
4799 ring->tx_pending = prev_tx_ring_size;
4800 err = mvpp2_setup_txqs(port);
4802 goto err_clean_rxqs;
4805 mvpp2_start_dev(port);
4806 mvpp2_egress_enable(port);
4807 mvpp2_ingress_enable(port);
4812 mvpp2_cleanup_rxqs(port);
4814 netdev_err(dev, "failed to change ring parameters");
4818 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
4819 struct ethtool_pauseparam *pause)
4821 struct mvpp2_port *port = netdev_priv(dev);
4826 phylink_ethtool_get_pauseparam(port->phylink, pause);
4829 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
4830 struct ethtool_pauseparam *pause)
4832 struct mvpp2_port *port = netdev_priv(dev);
4837 return phylink_ethtool_set_pauseparam(port->phylink, pause);
4840 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
4841 struct ethtool_link_ksettings *cmd)
4843 struct mvpp2_port *port = netdev_priv(dev);
4848 return phylink_ethtool_ksettings_get(port->phylink, cmd);
4851 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
4852 const struct ethtool_link_ksettings *cmd)
4854 struct mvpp2_port *port = netdev_priv(dev);
4859 return phylink_ethtool_ksettings_set(port->phylink, cmd);
4862 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
4863 struct ethtool_rxnfc *info, u32 *rules)
4865 struct mvpp2_port *port = netdev_priv(dev);
4866 int ret = 0, i, loc = 0;
4868 if (!mvpp22_rss_is_supported())
4871 switch (info->cmd) {
4873 ret = mvpp2_ethtool_rxfh_get(port, info);
4875 case ETHTOOL_GRXRINGS:
4876 info->data = port->nrxqs;
4878 case ETHTOOL_GRXCLSRLCNT:
4879 info->rule_cnt = port->n_rfs_rules;
4881 case ETHTOOL_GRXCLSRULE:
4882 ret = mvpp2_ethtool_cls_rule_get(port, info);
4884 case ETHTOOL_GRXCLSRLALL:
4885 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
4886 if (port->rfs_rules[i])
4897 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
4898 struct ethtool_rxnfc *info)
4900 struct mvpp2_port *port = netdev_priv(dev);
4903 if (!mvpp22_rss_is_supported())
4906 switch (info->cmd) {
4908 ret = mvpp2_ethtool_rxfh_set(port, info);
4910 case ETHTOOL_SRXCLSRLINS:
4911 ret = mvpp2_ethtool_cls_rule_ins(port, info);
4913 case ETHTOOL_SRXCLSRLDEL:
4914 ret = mvpp2_ethtool_cls_rule_del(port, info);
4922 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
4924 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
4927 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4930 struct mvpp2_port *port = netdev_priv(dev);
4933 if (!mvpp22_rss_is_supported())
4937 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
4940 *hfunc = ETH_RSS_HASH_CRC32;
4945 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4946 const u8 *key, const u8 hfunc)
4948 struct mvpp2_port *port = netdev_priv(dev);
4951 if (!mvpp22_rss_is_supported())
4954 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4961 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
4966 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
4967 u8 *key, u8 *hfunc, u32 rss_context)
4969 struct mvpp2_port *port = netdev_priv(dev);
4972 if (!mvpp22_rss_is_supported())
4974 if (rss_context >= MVPP22_N_RSS_TABLES)
4978 *hfunc = ETH_RSS_HASH_CRC32;
4981 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
4986 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
4987 const u32 *indir, const u8 *key,
4988 const u8 hfunc, u32 *rss_context,
4991 struct mvpp2_port *port = netdev_priv(dev);
4994 if (!mvpp22_rss_is_supported())
4997 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5004 return mvpp22_port_rss_ctx_delete(port, *rss_context);
5006 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5007 ret = mvpp22_port_rss_ctx_create(port, rss_context);
5012 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5016 static const struct net_device_ops mvpp2_netdev_ops = {
5017 .ndo_open = mvpp2_open,
5018 .ndo_stop = mvpp2_stop,
5019 .ndo_start_xmit = mvpp2_tx,
5020 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5021 .ndo_set_mac_address = mvpp2_set_mac_address,
5022 .ndo_change_mtu = mvpp2_change_mtu,
5023 .ndo_get_stats64 = mvpp2_get_stats64,
5024 .ndo_do_ioctl = mvpp2_ioctl,
5025 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5026 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5027 .ndo_set_features = mvpp2_set_features,
5028 .ndo_bpf = mvpp2_xdp,
5029 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5032 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5033 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5034 ETHTOOL_COALESCE_MAX_FRAMES,
5035 .nway_reset = mvpp2_ethtool_nway_reset,
5036 .get_link = ethtool_op_get_link,
5037 .set_coalesce = mvpp2_ethtool_set_coalesce,
5038 .get_coalesce = mvpp2_ethtool_get_coalesce,
5039 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5040 .get_ringparam = mvpp2_ethtool_get_ringparam,
5041 .set_ringparam = mvpp2_ethtool_set_ringparam,
5042 .get_strings = mvpp2_ethtool_get_strings,
5043 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5044 .get_sset_count = mvpp2_ethtool_get_sset_count,
5045 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5046 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5047 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5048 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5049 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5050 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5051 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5052 .get_rxfh = mvpp2_ethtool_get_rxfh,
5053 .set_rxfh = mvpp2_ethtool_set_rxfh,
5054 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5055 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
5058 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5059 * had a single IRQ defined per-port.
5061 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5062 struct device_node *port_node)
5064 struct mvpp2_queue_vector *v = &port->qvecs[0];
5067 v->nrxqs = port->nrxqs;
5068 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5069 v->sw_thread_id = 0;
5070 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5072 v->irq = irq_of_parse_and_map(port_node, 0);
5075 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5083 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5084 struct device_node *port_node)
5086 struct mvpp2 *priv = port->priv;
5087 struct mvpp2_queue_vector *v;
5090 switch (queue_mode) {
5091 case MVPP2_QDIST_SINGLE_MODE:
5092 port->nqvecs = priv->nthreads + 1;
5094 case MVPP2_QDIST_MULTI_MODE:
5095 port->nqvecs = priv->nthreads;
5099 for (i = 0; i < port->nqvecs; i++) {
5102 v = port->qvecs + i;
5105 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5106 v->sw_thread_id = i;
5107 v->sw_thread_mask = BIT(i);
5109 if (port->flags & MVPP2_F_DT_COMPAT)
5110 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5112 snprintf(irqname, sizeof(irqname), "hif%d", i);
5114 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5117 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5118 i == (port->nqvecs - 1)) {
5120 v->nrxqs = port->nrxqs;
5121 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5123 if (port->flags & MVPP2_F_DT_COMPAT)
5124 strncpy(irqname, "rx-shared", sizeof(irqname));
5128 v->irq = of_irq_get_byname(port_node, irqname);
5130 v->irq = fwnode_irq_get(port->fwnode, i);
5136 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5143 for (i = 0; i < port->nqvecs; i++)
5144 irq_dispose_mapping(port->qvecs[i].irq);
5148 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5149 struct device_node *port_node)
5151 if (port->has_tx_irqs)
5152 return mvpp2_multi_queue_vectors_init(port, port_node);
5154 return mvpp2_simple_queue_vectors_init(port, port_node);
5157 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5161 for (i = 0; i < port->nqvecs; i++)
5162 irq_dispose_mapping(port->qvecs[i].irq);
5165 /* Configure Rx queue group interrupt for this port */
5166 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5168 struct mvpp2 *priv = port->priv;
5172 if (priv->hw_version == MVPP21) {
5173 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5178 /* Handle the more complicated PPv2.2 case */
5179 for (i = 0; i < port->nqvecs; i++) {
5180 struct mvpp2_queue_vector *qv = port->qvecs + i;
5185 val = qv->sw_thread_id;
5186 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5187 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5189 val = qv->first_rxq;
5190 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5191 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5195 /* Initialize port HW */
5196 static int mvpp2_port_init(struct mvpp2_port *port)
5198 struct device *dev = port->dev->dev.parent;
5199 struct mvpp2 *priv = port->priv;
5200 struct mvpp2_txq_pcpu *txq_pcpu;
5201 unsigned int thread;
5204 /* Checks for hardware constraints */
5205 if (port->first_rxq + port->nrxqs >
5206 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5209 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5213 mvpp2_egress_disable(port);
5214 mvpp2_port_disable(port);
5216 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5218 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5223 /* Associate physical Tx queues to this port and initialize.
5224 * The mapping is predefined.
5226 for (queue = 0; queue < port->ntxqs; queue++) {
5227 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5228 struct mvpp2_tx_queue *txq;
5230 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5233 goto err_free_percpu;
5236 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5239 goto err_free_percpu;
5242 txq->id = queue_phy_id;
5243 txq->log_id = queue;
5244 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5245 for (thread = 0; thread < priv->nthreads; thread++) {
5246 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5247 txq_pcpu->thread = thread;
5250 port->txqs[queue] = txq;
5253 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5257 goto err_free_percpu;
5260 /* Allocate and initialize Rx queue for this port */
5261 for (queue = 0; queue < port->nrxqs; queue++) {
5262 struct mvpp2_rx_queue *rxq;
5264 /* Map physical Rx queue to port's logical Rx queue */
5265 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5268 goto err_free_percpu;
5270 /* Map this Rx queue to a physical queue */
5271 rxq->id = port->first_rxq + queue;
5272 rxq->port = port->id;
5273 rxq->logic_rxq = queue;
5275 port->rxqs[queue] = rxq;
5278 mvpp2_rx_irqs_setup(port);
5280 /* Create Rx descriptor rings */
5281 for (queue = 0; queue < port->nrxqs; queue++) {
5282 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5284 rxq->size = port->rx_ring_size;
5285 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5286 rxq->time_coal = MVPP2_RX_COAL_USEC;
5289 mvpp2_ingress_disable(port);
5291 /* Port default configuration */
5292 mvpp2_defaults_set(port);
5294 /* Port's classifier configuration */
5295 mvpp2_cls_oversize_rxq_set(port);
5296 mvpp2_cls_port_config(port);
5298 if (mvpp22_rss_is_supported())
5299 mvpp22_port_rss_init(port);
5301 /* Provide an initial Rx packet size */
5302 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5304 /* Initialize pools for swf */
5305 err = mvpp2_swf_bm_pool_init(port);
5307 goto err_free_percpu;
5309 /* Clear all port stats */
5310 mvpp2_read_stats(port);
5311 memset(port->ethtool_stats, 0,
5312 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
5317 for (queue = 0; queue < port->ntxqs; queue++) {
5318 if (!port->txqs[queue])
5320 free_percpu(port->txqs[queue]->pcpu);
5325 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
5326 unsigned long *flags)
5328 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
5332 for (i = 0; i < 5; i++)
5333 if (of_property_match_string(port_node, "interrupt-names",
5337 *flags |= MVPP2_F_DT_COMPAT;
5341 /* Checks if the port dt description has the required Tx interrupts:
5342 * - PPv2.1: there are no such interrupts.
5344 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
5345 * - The new ones have: "hifX" with X in [0..8]
5347 * All those variants are supported to keep the backward compatibility.
5349 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
5350 struct device_node *port_node,
5351 unsigned long *flags)
5360 if (priv->hw_version == MVPP21)
5363 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
5366 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5367 snprintf(name, 5, "hif%d", i);
5368 if (of_property_match_string(port_node, "interrupt-names",
5376 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
5377 struct fwnode_handle *fwnode,
5380 struct mvpp2_port *port = netdev_priv(dev);
5381 char hw_mac_addr[ETH_ALEN] = {0};
5382 char fw_mac_addr[ETH_ALEN];
5384 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
5385 *mac_from = "firmware node";
5386 ether_addr_copy(dev->dev_addr, fw_mac_addr);
5390 if (priv->hw_version == MVPP21) {
5391 mvpp21_get_mac_address(port, hw_mac_addr);
5392 if (is_valid_ether_addr(hw_mac_addr)) {
5393 *mac_from = "hardware";
5394 ether_addr_copy(dev->dev_addr, hw_mac_addr);
5399 *mac_from = "random";
5400 eth_hw_addr_random(dev);
5403 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
5405 return container_of(config, struct mvpp2_port, phylink_config);
5408 static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
5410 return container_of(pcs, struct mvpp2_port, phylink_pcs);
5413 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
5414 struct phylink_link_state *state)
5416 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5419 state->speed = SPEED_10000;
5421 state->an_complete = 1;
5423 val = readl(port->base + MVPP22_XLG_STATUS);
5424 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
5427 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5428 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
5429 state->pause |= MLO_PAUSE_TX;
5430 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
5431 state->pause |= MLO_PAUSE_RX;
5434 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
5436 phy_interface_t interface,
5437 const unsigned long *advertising,
5438 bool permit_pause_to_mac)
5443 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
5444 .pcs_get_state = mvpp2_xlg_pcs_get_state,
5445 .pcs_config = mvpp2_xlg_pcs_config,
5448 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
5449 struct phylink_link_state *state)
5451 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5454 val = readl(port->base + MVPP2_GMAC_STATUS0);
5456 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
5457 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
5458 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
5460 switch (port->phy_interface) {
5461 case PHY_INTERFACE_MODE_1000BASEX:
5462 state->speed = SPEED_1000;
5464 case PHY_INTERFACE_MODE_2500BASEX:
5465 state->speed = SPEED_2500;
5468 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
5469 state->speed = SPEED_1000;
5470 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
5471 state->speed = SPEED_100;
5473 state->speed = SPEED_10;
5477 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
5478 state->pause |= MLO_PAUSE_RX;
5479 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
5480 state->pause |= MLO_PAUSE_TX;
5483 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
5484 phy_interface_t interface,
5485 const unsigned long *advertising,
5486 bool permit_pause_to_mac)
5488 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5489 u32 mask, val, an, old_an, changed;
5491 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5492 MVPP2_GMAC_IN_BAND_AUTONEG |
5493 MVPP2_GMAC_AN_SPEED_EN |
5494 MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5495 MVPP2_GMAC_AN_DUPLEX_EN;
5497 if (phylink_autoneg_inband(mode)) {
5498 mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
5499 MVPP2_GMAC_CONFIG_GMII_SPEED |
5500 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5501 val = MVPP2_GMAC_IN_BAND_AUTONEG;
5503 if (interface == PHY_INTERFACE_MODE_SGMII) {
5504 /* SGMII mode receives the speed and duplex from PHY */
5505 val |= MVPP2_GMAC_AN_SPEED_EN |
5506 MVPP2_GMAC_AN_DUPLEX_EN;
5508 /* 802.3z mode has fixed speed and duplex */
5509 val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
5510 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5512 /* The FLOW_CTRL_AUTONEG bit selects either the hardware
5513 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
5514 * manually controls the GMAC pause modes.
5516 if (permit_pause_to_mac)
5517 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5519 /* Configure advertisement bits */
5520 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
5521 if (phylink_test(advertising, Pause))
5522 val |= MVPP2_GMAC_FC_ADV_EN;
5523 if (phylink_test(advertising, Asym_Pause))
5524 val |= MVPP2_GMAC_FC_ADV_ASM_EN;
5530 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5531 an = (an & ~mask) | val;
5532 changed = an ^ old_an;
5534 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5536 /* We are only interested in the advertisement bits changing */
5537 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
5540 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
5542 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5543 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5545 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
5546 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5547 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
5548 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5551 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
5552 .pcs_get_state = mvpp2_gmac_pcs_get_state,
5553 .pcs_config = mvpp2_gmac_pcs_config,
5554 .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
5557 static void mvpp2_phylink_validate(struct phylink_config *config,
5558 unsigned long *supported,
5559 struct phylink_link_state *state)
5561 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5562 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5564 /* Invalid combinations */
5565 switch (state->interface) {
5566 case PHY_INTERFACE_MODE_10GBASER:
5567 case PHY_INTERFACE_MODE_XAUI:
5568 if (!mvpp2_port_supports_xlg(port))
5571 case PHY_INTERFACE_MODE_RGMII:
5572 case PHY_INTERFACE_MODE_RGMII_ID:
5573 case PHY_INTERFACE_MODE_RGMII_RXID:
5574 case PHY_INTERFACE_MODE_RGMII_TXID:
5575 if (!mvpp2_port_supports_rgmii(port))
5582 phylink_set(mask, Autoneg);
5583 phylink_set_port_modes(mask);
5584 phylink_set(mask, Pause);
5585 phylink_set(mask, Asym_Pause);
5587 switch (state->interface) {
5588 case PHY_INTERFACE_MODE_10GBASER:
5589 case PHY_INTERFACE_MODE_XAUI:
5590 case PHY_INTERFACE_MODE_NA:
5591 if (mvpp2_port_supports_xlg(port)) {
5592 phylink_set(mask, 10000baseT_Full);
5593 phylink_set(mask, 10000baseCR_Full);
5594 phylink_set(mask, 10000baseSR_Full);
5595 phylink_set(mask, 10000baseLR_Full);
5596 phylink_set(mask, 10000baseLRM_Full);
5597 phylink_set(mask, 10000baseER_Full);
5598 phylink_set(mask, 10000baseKR_Full);
5600 if (state->interface != PHY_INTERFACE_MODE_NA)
5603 case PHY_INTERFACE_MODE_RGMII:
5604 case PHY_INTERFACE_MODE_RGMII_ID:
5605 case PHY_INTERFACE_MODE_RGMII_RXID:
5606 case PHY_INTERFACE_MODE_RGMII_TXID:
5607 case PHY_INTERFACE_MODE_SGMII:
5608 phylink_set(mask, 10baseT_Half);
5609 phylink_set(mask, 10baseT_Full);
5610 phylink_set(mask, 100baseT_Half);
5611 phylink_set(mask, 100baseT_Full);
5612 phylink_set(mask, 1000baseT_Full);
5613 phylink_set(mask, 1000baseX_Full);
5614 if (state->interface != PHY_INTERFACE_MODE_NA)
5617 case PHY_INTERFACE_MODE_1000BASEX:
5618 case PHY_INTERFACE_MODE_2500BASEX:
5620 state->interface != PHY_INTERFACE_MODE_2500BASEX) {
5621 phylink_set(mask, 1000baseT_Full);
5622 phylink_set(mask, 1000baseX_Full);
5625 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
5626 phylink_set(mask, 2500baseT_Full);
5627 phylink_set(mask, 2500baseX_Full);
5634 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
5635 bitmap_and(state->advertising, state->advertising, mask,
5636 __ETHTOOL_LINK_MODE_MASK_NBITS);
5638 phylink_helper_basex_speed(state);
5642 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
5645 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
5646 const struct phylink_link_state *state)
5650 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5651 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
5652 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5653 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
5654 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
5655 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
5656 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
5657 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
5659 /* Wait for reset to deassert */
5661 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5662 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
5665 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
5666 const struct phylink_link_state *state)
5668 u32 old_ctrl0, ctrl0;
5669 u32 old_ctrl2, ctrl2;
5670 u32 old_ctrl4, ctrl4;
5672 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5673 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5674 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5676 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5677 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
5679 /* Configure port type */
5680 if (phy_interface_mode_is_8023z(state->interface)) {
5681 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
5682 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5683 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5684 MVPP22_CTRL4_DP_CLK_SEL |
5685 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5686 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5687 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
5688 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5689 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5690 MVPP22_CTRL4_DP_CLK_SEL |
5691 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5692 } else if (phy_interface_mode_is_rgmii(state->interface)) {
5693 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
5694 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5695 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5696 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5699 /* Configure negotiation style */
5700 if (!phylink_autoneg_inband(mode)) {
5701 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
5702 * configured speed, duplex and flow control as-is.
5704 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5705 /* SGMII in-band mode receives the speed and duplex from
5706 * the PHY. Flow control information is not received. */
5707 } else if (phy_interface_mode_is_8023z(state->interface)) {
5708 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
5709 * they negotiate duplex: they are always operating with a fixed
5710 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
5711 * speed and full duplex here.
5713 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
5716 if (old_ctrl0 != ctrl0)
5717 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
5718 if (old_ctrl2 != ctrl2)
5719 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
5720 if (old_ctrl4 != ctrl4)
5721 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
5724 static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
5725 phy_interface_t interface)
5727 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5729 /* Check for invalid configuration */
5730 if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
5731 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
5735 if (port->phy_interface != interface ||
5736 phylink_autoneg_inband(mode)) {
5737 /* Force the link down when changing the interface or if in
5738 * in-band mode to ensure we do not change the configuration
5739 * while the hardware is indicating link is up. We force both
5740 * XLG and GMAC down to ensure that they're both in a known
5743 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
5744 MVPP2_GMAC_FORCE_LINK_PASS |
5745 MVPP2_GMAC_FORCE_LINK_DOWN,
5746 MVPP2_GMAC_FORCE_LINK_DOWN);
5748 if (mvpp2_port_supports_xlg(port))
5749 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5750 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
5751 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
5752 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
5755 /* Make sure the port is disabled when reconfiguring the mode */
5756 mvpp2_port_disable(port);
5758 if (port->phy_interface != interface) {
5759 /* Place GMAC into reset */
5760 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
5761 MVPP2_GMAC_PORT_RESET_MASK,
5762 MVPP2_GMAC_PORT_RESET_MASK);
5764 if (port->priv->hw_version == MVPP22) {
5765 mvpp22_gop_mask_irq(port);
5767 phy_power_off(port->comphy);
5771 /* Select the appropriate PCS operations depending on the
5772 * configured interface mode. We will only switch to a mode
5773 * that the validate() checks have already passed.
5775 if (mvpp2_is_xlg(interface))
5776 port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
5778 port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
5783 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
5784 phy_interface_t interface)
5786 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5789 ret = mvpp2__mac_prepare(config, mode, interface);
5791 phylink_set_pcs(port->phylink, &port->phylink_pcs);
5796 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
5797 const struct phylink_link_state *state)
5799 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5801 /* mac (re)configuration */
5802 if (mvpp2_is_xlg(state->interface))
5803 mvpp2_xlg_config(port, mode, state);
5804 else if (phy_interface_mode_is_rgmii(state->interface) ||
5805 phy_interface_mode_is_8023z(state->interface) ||
5806 state->interface == PHY_INTERFACE_MODE_SGMII)
5807 mvpp2_gmac_config(port, mode, state);
5809 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
5810 mvpp2_port_loopback_set(port, state);
5813 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
5814 phy_interface_t interface)
5816 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5818 if (port->priv->hw_version == MVPP22 &&
5819 port->phy_interface != interface) {
5820 port->phy_interface = interface;
5822 /* Reconfigure the serdes lanes */
5823 mvpp22_mode_reconfigure(port);
5825 /* Unmask interrupts */
5826 mvpp22_gop_unmask_irq(port);
5829 if (!mvpp2_is_xlg(interface)) {
5830 /* Release GMAC reset and wait */
5831 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
5832 MVPP2_GMAC_PORT_RESET_MASK, 0);
5834 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5835 MVPP2_GMAC_PORT_RESET_MASK)
5839 mvpp2_port_enable(port);
5841 /* Allow the link to come up if in in-band mode, otherwise the
5842 * link is forced via mac_link_down()/mac_link_up()
5844 if (phylink_autoneg_inband(mode)) {
5845 if (mvpp2_is_xlg(interface))
5846 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5847 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
5848 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
5850 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
5851 MVPP2_GMAC_FORCE_LINK_PASS |
5852 MVPP2_GMAC_FORCE_LINK_DOWN, 0);
5858 static void mvpp2_mac_link_up(struct phylink_config *config,
5859 struct phy_device *phy,
5860 unsigned int mode, phy_interface_t interface,
5861 int speed, int duplex,
5862 bool tx_pause, bool rx_pause)
5864 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5867 if (mvpp2_is_xlg(interface)) {
5868 if (!phylink_autoneg_inband(mode)) {
5869 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5871 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
5873 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5875 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5876 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
5877 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
5878 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
5879 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
5882 if (!phylink_autoneg_inband(mode)) {
5883 val = MVPP2_GMAC_FORCE_LINK_PASS;
5885 if (speed == SPEED_1000 || speed == SPEED_2500)
5886 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
5887 else if (speed == SPEED_100)
5888 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5890 if (duplex == DUPLEX_FULL)
5891 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5893 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
5894 MVPP2_GMAC_FORCE_LINK_DOWN |
5895 MVPP2_GMAC_FORCE_LINK_PASS |
5896 MVPP2_GMAC_CONFIG_MII_SPEED |
5897 MVPP2_GMAC_CONFIG_GMII_SPEED |
5898 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
5901 /* We can always update the flow control enable bits;
5902 * these will only be effective if flow control AN
5903 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
5907 val |= MVPP22_CTRL4_TX_FC_EN;
5909 val |= MVPP22_CTRL4_RX_FC_EN;
5911 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
5912 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
5916 mvpp2_port_enable(port);
5918 mvpp2_egress_enable(port);
5919 mvpp2_ingress_enable(port);
5920 netif_tx_wake_all_queues(port->dev);
5923 static void mvpp2_mac_link_down(struct phylink_config *config,
5924 unsigned int mode, phy_interface_t interface)
5926 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5929 if (!phylink_autoneg_inband(mode)) {
5930 if (mvpp2_is_xlg(interface)) {
5931 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5932 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5933 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5934 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5936 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5937 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5938 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5939 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5943 netif_tx_stop_all_queues(port->dev);
5944 mvpp2_egress_disable(port);
5945 mvpp2_ingress_disable(port);
5947 mvpp2_port_disable(port);
5950 static const struct phylink_mac_ops mvpp2_phylink_ops = {
5951 .validate = mvpp2_phylink_validate,
5952 .mac_prepare = mvpp2_mac_prepare,
5953 .mac_config = mvpp2_mac_config,
5954 .mac_finish = mvpp2_mac_finish,
5955 .mac_link_up = mvpp2_mac_link_up,
5956 .mac_link_down = mvpp2_mac_link_down,
5959 /* Work-around for ACPI */
5960 static void mvpp2_acpi_start(struct mvpp2_port *port)
5962 /* Phylink isn't used as of now for ACPI, so the MAC has to be
5963 * configured manually when the interface is started. This will
5964 * be removed as soon as the phylink ACPI support lands in.
5966 struct phylink_link_state state = {
5967 .interface = port->phy_interface,
5969 mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
5970 port->phy_interface);
5971 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
5972 port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
5973 port->phy_interface,
5974 state.advertising, false);
5975 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
5976 port->phy_interface);
5977 mvpp2_mac_link_up(&port->phylink_config, NULL,
5978 MLO_AN_INBAND, port->phy_interface,
5979 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
5982 /* Ports initialization */
5983 static int mvpp2_port_probe(struct platform_device *pdev,
5984 struct fwnode_handle *port_fwnode,
5987 struct phy *comphy = NULL;
5988 struct mvpp2_port *port;
5989 struct mvpp2_port_pcpu *port_pcpu;
5990 struct device_node *port_node = to_of_node(port_fwnode);
5991 netdev_features_t features;
5992 struct net_device *dev;
5993 struct phylink *phylink;
5994 char *mac_from = "";
5995 unsigned int ntxqs, nrxqs, thread;
5996 unsigned long flags = 0;
6002 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6003 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6005 "not enough IRQs to support multi queue mode\n");
6009 ntxqs = MVPP2_MAX_TXQ;
6010 nrxqs = mvpp2_get_nrxqs(priv);
6012 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6016 phy_mode = fwnode_get_phy_mode(port_fwnode);
6018 dev_err(&pdev->dev, "incorrect phy mode\n");
6020 goto err_free_netdev;
6024 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6025 * Existing usage of 10GBASE-KR is not correct; no backplane
6026 * negotiation is done, and this driver does not actually support
6029 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6030 phy_mode = PHY_INTERFACE_MODE_10GBASER;
6033 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6034 if (IS_ERR(comphy)) {
6035 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6036 err = -EPROBE_DEFER;
6037 goto err_free_netdev;
6043 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6045 dev_err(&pdev->dev, "missing port-id value\n");
6046 goto err_free_netdev;
6049 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6050 dev->watchdog_timeo = 5 * HZ;
6051 dev->netdev_ops = &mvpp2_netdev_ops;
6052 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6054 port = netdev_priv(dev);
6056 port->fwnode = port_fwnode;
6057 port->has_phy = !!of_find_property(port_node, "phy", NULL);
6058 port->ntxqs = ntxqs;
6059 port->nrxqs = nrxqs;
6061 port->has_tx_irqs = has_tx_irqs;
6062 port->flags = flags;
6064 err = mvpp2_queue_vectors_init(port, port_node);
6066 goto err_free_netdev;
6069 port->port_irq = of_irq_get_byname(port_node, "link");
6071 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6072 if (port->port_irq == -EPROBE_DEFER) {
6073 err = -EPROBE_DEFER;
6074 goto err_deinit_qvecs;
6076 if (port->port_irq <= 0)
6077 /* the link irq is optional */
6080 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6081 port->flags |= MVPP2_F_LOOPBACK;
6084 if (priv->hw_version == MVPP21)
6085 port->first_rxq = port->id * port->nrxqs;
6087 port->first_rxq = port->id * priv->max_port_rxqs;
6089 port->of_node = port_node;
6090 port->phy_interface = phy_mode;
6091 port->comphy = comphy;
6093 if (priv->hw_version == MVPP21) {
6094 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6095 if (IS_ERR(port->base)) {
6096 err = PTR_ERR(port->base);
6100 port->stats_base = port->priv->lms_base +
6101 MVPP21_MIB_COUNTERS_OFFSET +
6102 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6104 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6107 dev_err(&pdev->dev, "missing gop-port-id value\n");
6108 goto err_deinit_qvecs;
6111 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6112 port->stats_base = port->priv->iface_base +
6113 MVPP22_MIB_COUNTERS_OFFSET +
6114 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6117 /* Alloc per-cpu and ethtool stats */
6118 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6124 port->ethtool_stats = devm_kcalloc(&pdev->dev,
6125 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6126 sizeof(u64), GFP_KERNEL);
6127 if (!port->ethtool_stats) {
6129 goto err_free_stats;
6132 mutex_init(&port->gather_stats_lock);
6133 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6135 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6137 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6138 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6139 SET_NETDEV_DEV(dev, &pdev->dev);
6141 err = mvpp2_port_init(port);
6143 dev_err(&pdev->dev, "failed to init port %d\n", id);
6144 goto err_free_stats;
6147 mvpp2_port_periodic_xon_disable(port);
6149 mvpp2_mac_reset_assert(port);
6150 mvpp22_pcs_reset_assert(port);
6152 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6155 goto err_free_txq_pcpu;
6158 if (!port->has_tx_irqs) {
6159 for (thread = 0; thread < priv->nthreads; thread++) {
6160 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6162 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6163 HRTIMER_MODE_REL_PINNED_SOFT);
6164 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6165 port_pcpu->timer_scheduled = false;
6166 port_pcpu->dev = dev;
6170 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6172 dev->features = features | NETIF_F_RXCSUM;
6173 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6174 NETIF_F_HW_VLAN_CTAG_FILTER;
6176 if (mvpp22_rss_is_supported()) {
6177 dev->hw_features |= NETIF_F_RXHASH;
6178 dev->features |= NETIF_F_NTUPLE;
6181 if (!port->priv->percpu_pools)
6182 mvpp2_set_hw_csum(port, port->pool_long->id);
6184 dev->vlan_features |= features;
6185 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
6186 dev->priv_flags |= IFF_UNICAST_FLT;
6188 /* MTU range: 68 - 9704 */
6189 dev->min_mtu = ETH_MIN_MTU;
6190 /* 9704 == 9728 - 20 and rounding to 8 */
6191 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6192 dev->dev.of_node = port_node;
6194 /* Phylink isn't used w/ ACPI as of now */
6196 port->phylink_config.dev = &dev->dev;
6197 port->phylink_config.type = PHYLINK_NETDEV;
6199 phylink = phylink_create(&port->phylink_config, port_fwnode,
6200 phy_mode, &mvpp2_phylink_ops);
6201 if (IS_ERR(phylink)) {
6202 err = PTR_ERR(phylink);
6203 goto err_free_port_pcpu;
6205 port->phylink = phylink;
6207 port->phylink = NULL;
6210 /* Cycle the comphy to power it down, saving 270mW per port -
6211 * don't worry about an error powering it up. When the comphy
6212 * driver does this, we can remove this code.
6215 err = mvpp22_comphy_init(port);
6217 phy_power_off(port->comphy);
6220 err = register_netdev(dev);
6222 dev_err(&pdev->dev, "failed to register netdev\n");
6225 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6227 priv->port_list[priv->port_count++] = port;
6233 phylink_destroy(port->phylink);
6235 free_percpu(port->pcpu);
6237 for (i = 0; i < port->ntxqs; i++)
6238 free_percpu(port->txqs[i]->pcpu);
6240 free_percpu(port->stats);
6243 irq_dispose_mapping(port->port_irq);
6245 mvpp2_queue_vectors_deinit(port);
6251 /* Ports removal routine */
6252 static void mvpp2_port_remove(struct mvpp2_port *port)
6256 unregister_netdev(port->dev);
6258 phylink_destroy(port->phylink);
6259 free_percpu(port->pcpu);
6260 free_percpu(port->stats);
6261 for (i = 0; i < port->ntxqs; i++)
6262 free_percpu(port->txqs[i]->pcpu);
6263 mvpp2_queue_vectors_deinit(port);
6265 irq_dispose_mapping(port->port_irq);
6266 free_netdev(port->dev);
6269 /* Initialize decoding windows */
6270 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6276 for (i = 0; i < 6; i++) {
6277 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6278 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6281 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6286 for (i = 0; i < dram->num_cs; i++) {
6287 const struct mbus_dram_window *cs = dram->cs + i;
6289 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6290 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6291 dram->mbus_dram_target_id);
6293 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6294 (cs->size - 1) & 0xffff0000);
6296 win_enable |= (1 << i);
6299 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6302 /* Initialize Rx FIFO's */
6303 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6307 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6308 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6309 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6310 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6311 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6314 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6315 MVPP2_RX_FIFO_PORT_MIN_PKT);
6316 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6319 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
6323 /* The FIFO size parameters are set depending on the maximum speed a
6324 * given port can handle:
6327 * - Ports 2 and 3: 1Gbps
6330 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
6331 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
6332 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
6333 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
6335 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
6336 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
6337 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
6338 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
6340 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
6341 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6342 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6343 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6344 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6347 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6348 MVPP2_RX_FIFO_PORT_MIN_PKT);
6349 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6352 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
6353 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
6354 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
6356 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
6358 int port, size, thrs;
6360 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6362 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
6363 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
6365 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
6366 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
6368 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
6369 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
6373 static void mvpp2_axi_init(struct mvpp2 *priv)
6375 u32 val, rdval, wrval;
6377 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6379 /* AXI Bridge Configuration */
6381 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6382 << MVPP22_AXI_ATTR_CACHE_OFFS;
6383 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6384 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6386 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6387 << MVPP22_AXI_ATTR_CACHE_OFFS;
6388 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6389 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6392 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6393 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6396 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6397 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6398 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6399 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6402 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6403 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6405 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6406 << MVPP22_AXI_CODE_CACHE_OFFS;
6407 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6408 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6409 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6410 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6412 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6413 << MVPP22_AXI_CODE_CACHE_OFFS;
6414 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6415 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6417 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6419 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6420 << MVPP22_AXI_CODE_CACHE_OFFS;
6421 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6422 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6424 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6427 /* Initialize network controller common part HW */
6428 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6430 const struct mbus_dram_target_info *dram_target_info;
6434 /* MBUS windows configuration */
6435 dram_target_info = mv_mbus_dram_info();
6436 if (dram_target_info)
6437 mvpp2_conf_mbus_windows(dram_target_info, priv);
6439 if (priv->hw_version == MVPP22)
6440 mvpp2_axi_init(priv);
6442 /* Disable HW PHY polling */
6443 if (priv->hw_version == MVPP21) {
6444 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6445 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6446 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6448 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6449 val &= ~MVPP22_SMI_POLLING_EN;
6450 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6453 /* Allocate and initialize aggregated TXQs */
6454 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
6455 sizeof(*priv->aggr_txqs),
6457 if (!priv->aggr_txqs)
6460 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6461 priv->aggr_txqs[i].id = i;
6462 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6463 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
6469 if (priv->hw_version == MVPP21) {
6470 mvpp2_rx_fifo_init(priv);
6472 mvpp22_rx_fifo_init(priv);
6473 mvpp22_tx_fifo_init(priv);
6476 if (priv->hw_version == MVPP21)
6477 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6478 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6480 /* Allow cache snoop when transmiting packets */
6481 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6483 /* Buffer Manager initialization */
6484 err = mvpp2_bm_init(&pdev->dev, priv);
6488 /* Parser default initialization */
6489 err = mvpp2_prs_default_init(pdev, priv);
6493 /* Classifier default initialization */
6494 mvpp2_cls_init(priv);
6499 static int mvpp2_probe(struct platform_device *pdev)
6501 const struct acpi_device_id *acpi_id;
6502 struct fwnode_handle *fwnode = pdev->dev.fwnode;
6503 struct fwnode_handle *port_fwnode;
6505 struct resource *res;
6510 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6514 if (has_acpi_companion(&pdev->dev)) {
6515 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
6519 priv->hw_version = (unsigned long)acpi_id->driver_data;
6522 (unsigned long)of_device_get_match_data(&pdev->dev);
6525 /* multi queue mode isn't supported on PPV2.1, fallback to single
6528 if (priv->hw_version == MVPP21)
6529 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6531 base = devm_platform_ioremap_resource(pdev, 0);
6533 return PTR_ERR(base);
6535 if (priv->hw_version == MVPP21) {
6536 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
6537 if (IS_ERR(priv->lms_base))
6538 return PTR_ERR(priv->lms_base);
6540 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6541 if (has_acpi_companion(&pdev->dev)) {
6542 /* In case the MDIO memory region is declared in
6543 * the ACPI, it can already appear as 'in-use'
6544 * in the OS. Because it is overlapped by second
6545 * region of the network controller, make
6546 * sure it is released, before requesting it again.
6547 * The care is taken by mvpp2 driver to avoid
6548 * concurrent access to this memory region.
6550 release_resource(res);
6552 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6553 if (IS_ERR(priv->iface_base))
6554 return PTR_ERR(priv->iface_base);
6557 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
6558 priv->sysctrl_base =
6559 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
6560 "marvell,system-controller");
6561 if (IS_ERR(priv->sysctrl_base))
6562 /* The system controller regmap is optional for dt
6563 * compatibility reasons. When not provided, the
6564 * configuration of the GoP relies on the
6565 * firmware/bootloader.
6567 priv->sysctrl_base = NULL;
6570 if (priv->hw_version == MVPP22 &&
6571 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
6572 priv->percpu_pools = 1;
6574 mvpp2_setup_bm_pool();
6577 priv->nthreads = min_t(unsigned int, num_present_cpus(),
6580 shared = num_present_cpus() - priv->nthreads;
6582 bitmap_fill(&priv->lock_map,
6583 min_t(int, shared, MVPP2_MAX_THREADS));
6585 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6588 addr_space_sz = (priv->hw_version == MVPP21 ?
6589 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6590 priv->swth_base[i] = base + i * addr_space_sz;
6593 if (priv->hw_version == MVPP21)
6594 priv->max_port_rxqs = 8;
6596 priv->max_port_rxqs = 32;
6598 if (dev_of_node(&pdev->dev)) {
6599 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6600 if (IS_ERR(priv->pp_clk))
6601 return PTR_ERR(priv->pp_clk);
6602 err = clk_prepare_enable(priv->pp_clk);
6606 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6607 if (IS_ERR(priv->gop_clk)) {
6608 err = PTR_ERR(priv->gop_clk);
6611 err = clk_prepare_enable(priv->gop_clk);
6615 if (priv->hw_version == MVPP22) {
6616 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6617 if (IS_ERR(priv->mg_clk)) {
6618 err = PTR_ERR(priv->mg_clk);
6622 err = clk_prepare_enable(priv->mg_clk);
6626 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
6627 if (IS_ERR(priv->mg_core_clk)) {
6628 priv->mg_core_clk = NULL;
6630 err = clk_prepare_enable(priv->mg_core_clk);
6636 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
6637 if (IS_ERR(priv->axi_clk)) {
6638 err = PTR_ERR(priv->axi_clk);
6639 if (err == -EPROBE_DEFER)
6640 goto err_mg_core_clk;
6641 priv->axi_clk = NULL;
6643 err = clk_prepare_enable(priv->axi_clk);
6645 goto err_mg_core_clk;
6648 /* Get system's tclk rate */
6649 priv->tclk = clk_get_rate(priv->pp_clk);
6650 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
6652 dev_err(&pdev->dev, "missing clock-frequency value\n");
6656 if (priv->hw_version == MVPP22) {
6657 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
6660 /* Sadly, the BM pools all share the same register to
6661 * store the high 32 bits of their address. So they
6662 * must all have the same high 32 bits, which forces
6663 * us to restrict coherent memory to DMA_BIT_MASK(32).
6665 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6670 /* Initialize network controller */
6671 err = mvpp2_init(pdev, priv);
6673 dev_err(&pdev->dev, "failed to initialize controller\n");
6677 err = mvpp22_tai_probe(&pdev->dev, priv);
6681 /* Initialize ports */
6682 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6683 err = mvpp2_port_probe(pdev, port_fwnode, priv);
6685 goto err_port_probe;
6688 if (priv->port_count == 0) {
6689 dev_err(&pdev->dev, "no ports enabled\n");
6694 /* Statistics must be gathered regularly because some of them (like
6695 * packets counters) are 32-bit registers and could overflow quite
6696 * quickly. For instance, a 10Gb link used at full bandwidth with the
6697 * smallest packets (64B) will overflow a 32-bit counter in less than
6698 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
6700 snprintf(priv->queue_name, sizeof(priv->queue_name),
6701 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
6702 priv->port_count > 1 ? "+" : "");
6703 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
6704 if (!priv->stats_queue) {
6706 goto err_port_probe;
6709 mvpp2_dbgfs_init(priv, pdev->name);
6711 platform_set_drvdata(pdev, priv);
6716 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6717 if (priv->port_list[i])
6718 mvpp2_port_remove(priv->port_list[i]);
6722 clk_disable_unprepare(priv->axi_clk);
6725 if (priv->hw_version == MVPP22)
6726 clk_disable_unprepare(priv->mg_core_clk);
6728 if (priv->hw_version == MVPP22)
6729 clk_disable_unprepare(priv->mg_clk);
6731 clk_disable_unprepare(priv->gop_clk);
6733 clk_disable_unprepare(priv->pp_clk);
6737 static int mvpp2_remove(struct platform_device *pdev)
6739 struct mvpp2 *priv = platform_get_drvdata(pdev);
6740 struct fwnode_handle *fwnode = pdev->dev.fwnode;
6741 int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
6742 struct fwnode_handle *port_fwnode;
6744 mvpp2_dbgfs_cleanup(priv);
6746 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6747 if (priv->port_list[i]) {
6748 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
6749 mvpp2_port_remove(priv->port_list[i]);
6754 destroy_workqueue(priv->stats_queue);
6756 if (priv->percpu_pools)
6757 poolnum = mvpp2_get_nrxqs(priv) * 2;
6759 for (i = 0; i < poolnum; i++) {
6760 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6762 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
6765 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6766 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6768 dma_free_coherent(&pdev->dev,
6769 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6771 aggr_txq->descs_dma);
6774 if (is_acpi_node(port_fwnode))
6777 clk_disable_unprepare(priv->axi_clk);
6778 clk_disable_unprepare(priv->mg_core_clk);
6779 clk_disable_unprepare(priv->mg_clk);
6780 clk_disable_unprepare(priv->pp_clk);
6781 clk_disable_unprepare(priv->gop_clk);
6786 static const struct of_device_id mvpp2_match[] = {
6788 .compatible = "marvell,armada-375-pp2",
6789 .data = (void *)MVPP21,
6792 .compatible = "marvell,armada-7k-pp22",
6793 .data = (void *)MVPP22,
6797 MODULE_DEVICE_TABLE(of, mvpp2_match);
6799 static const struct acpi_device_id mvpp2_acpi_match[] = {
6800 { "MRVL0110", MVPP22 },
6803 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
6805 static struct platform_driver mvpp2_driver = {
6806 .probe = mvpp2_probe,
6807 .remove = mvpp2_remove,
6809 .name = MVPP2_DRIVER_NAME,
6810 .of_match_table = mvpp2_match,
6811 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
6815 module_platform_driver(mvpp2_driver);
6817 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6818 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
6819 MODULE_LICENSE("GPL v2");