1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
44 enum mvpp2_bm_pool_log_num {
54 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
56 /* The prototype is added here to be used in start_dev when using ACPI. This
57 * will be removed once phylink is used for all modes (dt+ACPI).
59 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
60 const struct phylink_link_state *state);
61 static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
65 #define MVPP2_QDIST_SINGLE_MODE 0
66 #define MVPP2_QDIST_MULTI_MODE 1
68 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
70 module_param(queue_mode, int, 0444);
71 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
73 /* Utility/helper methods */
75 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
77 writel(data, priv->swth_base[0] + offset);
80 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
82 return readl(priv->swth_base[0] + offset);
85 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
87 return readl_relaxed(priv->swth_base[0] + offset);
90 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
92 return cpu % priv->nthreads;
95 /* These accessors should be used to access:
97 * - per-thread registers, where each thread has its own copy of the
100 * MVPP2_BM_VIRT_ALLOC_REG
101 * MVPP2_BM_ADDR_HIGH_ALLOC
102 * MVPP22_BM_ADDR_HIGH_RLS_REG
103 * MVPP2_BM_VIRT_RLS_REG
104 * MVPP2_ISR_RX_TX_CAUSE_REG
105 * MVPP2_ISR_RX_TX_MASK_REG
107 * MVPP2_AGGR_TXQ_UPDATE_REG
108 * MVPP2_TXQ_RSVD_REQ_REG
109 * MVPP2_TXQ_RSVD_RSLT_REG
113 * - global registers that must be accessed through a specific thread
114 * window, because they are related to an access to a per-thread
117 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
118 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
119 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
120 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
121 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
122 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
123 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
124 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
125 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
126 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
127 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
128 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
129 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
131 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
132 u32 offset, u32 data)
134 writel(data, priv->swth_base[thread] + offset);
137 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
140 return readl(priv->swth_base[thread] + offset);
143 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
144 u32 offset, u32 data)
146 writel_relaxed(data, priv->swth_base[thread] + offset);
149 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
152 return readl_relaxed(priv->swth_base[thread] + offset);
155 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
156 struct mvpp2_tx_desc *tx_desc)
158 if (port->priv->hw_version == MVPP21)
159 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
161 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
165 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
166 struct mvpp2_tx_desc *tx_desc,
169 dma_addr_t addr, offset;
171 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
172 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
174 if (port->priv->hw_version == MVPP21) {
175 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
176 tx_desc->pp21.packet_offset = offset;
178 __le64 val = cpu_to_le64(addr);
180 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
181 tx_desc->pp22.buf_dma_addr_ptp |= val;
182 tx_desc->pp22.packet_offset = offset;
186 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
187 struct mvpp2_tx_desc *tx_desc)
189 if (port->priv->hw_version == MVPP21)
190 return le16_to_cpu(tx_desc->pp21.data_size);
192 return le16_to_cpu(tx_desc->pp22.data_size);
195 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
196 struct mvpp2_tx_desc *tx_desc,
199 if (port->priv->hw_version == MVPP21)
200 tx_desc->pp21.data_size = cpu_to_le16(size);
202 tx_desc->pp22.data_size = cpu_to_le16(size);
205 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
206 struct mvpp2_tx_desc *tx_desc,
209 if (port->priv->hw_version == MVPP21)
210 tx_desc->pp21.phys_txq = txq;
212 tx_desc->pp22.phys_txq = txq;
215 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
216 struct mvpp2_tx_desc *tx_desc,
217 unsigned int command)
219 if (port->priv->hw_version == MVPP21)
220 tx_desc->pp21.command = cpu_to_le32(command);
222 tx_desc->pp22.command = cpu_to_le32(command);
225 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
226 struct mvpp2_tx_desc *tx_desc)
228 if (port->priv->hw_version == MVPP21)
229 return tx_desc->pp21.packet_offset;
231 return tx_desc->pp22.packet_offset;
234 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
235 struct mvpp2_rx_desc *rx_desc)
237 if (port->priv->hw_version == MVPP21)
238 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
240 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
244 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
245 struct mvpp2_rx_desc *rx_desc)
247 if (port->priv->hw_version == MVPP21)
248 return le32_to_cpu(rx_desc->pp21.buf_cookie);
250 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
254 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
255 struct mvpp2_rx_desc *rx_desc)
257 if (port->priv->hw_version == MVPP21)
258 return le16_to_cpu(rx_desc->pp21.data_size);
260 return le16_to_cpu(rx_desc->pp22.data_size);
263 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
264 struct mvpp2_rx_desc *rx_desc)
266 if (port->priv->hw_version == MVPP21)
267 return le32_to_cpu(rx_desc->pp21.status);
269 return le32_to_cpu(rx_desc->pp22.status);
272 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
274 txq_pcpu->txq_get_index++;
275 if (txq_pcpu->txq_get_index == txq_pcpu->size)
276 txq_pcpu->txq_get_index = 0;
279 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
280 struct mvpp2_txq_pcpu *txq_pcpu,
282 struct mvpp2_tx_desc *tx_desc)
284 struct mvpp2_txq_pcpu_buf *tx_buf =
285 txq_pcpu->buffs + txq_pcpu->txq_put_index;
287 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
288 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
289 mvpp2_txdesc_offset_get(port, tx_desc);
290 txq_pcpu->txq_put_index++;
291 if (txq_pcpu->txq_put_index == txq_pcpu->size)
292 txq_pcpu->txq_put_index = 0;
295 /* Get number of physical egress port */
296 static inline int mvpp2_egress_port(struct mvpp2_port *port)
298 return MVPP2_MAX_TCONT + port->id;
301 /* Get number of physical TXQ */
302 static inline int mvpp2_txq_phys(int port, int txq)
304 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
307 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
309 if (likely(pool->frag_size <= PAGE_SIZE))
310 return netdev_alloc_frag(pool->frag_size);
312 return kmalloc(pool->frag_size, GFP_ATOMIC);
315 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
317 if (likely(pool->frag_size <= PAGE_SIZE))
323 /* Buffer Manager configuration routines */
326 static int mvpp2_bm_pool_create(struct platform_device *pdev,
328 struct mvpp2_bm_pool *bm_pool, int size)
332 /* Number of buffer pointers must be a multiple of 16, as per
333 * hardware constraints
335 if (!IS_ALIGNED(size, 16))
338 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
339 * bytes per buffer pointer
341 if (priv->hw_version == MVPP21)
342 bm_pool->size_bytes = 2 * sizeof(u32) * size;
344 bm_pool->size_bytes = 2 * sizeof(u64) * size;
346 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
349 if (!bm_pool->virt_addr)
352 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
353 MVPP2_BM_POOL_PTR_ALIGN)) {
354 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
355 bm_pool->virt_addr, bm_pool->dma_addr);
356 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
357 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
361 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
362 lower_32_bits(bm_pool->dma_addr));
363 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
365 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
366 val |= MVPP2_BM_START_MASK;
367 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
369 bm_pool->size = size;
370 bm_pool->pkt_size = 0;
371 bm_pool->buf_num = 0;
376 /* Set pool buffer size */
377 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
378 struct mvpp2_bm_pool *bm_pool,
383 bm_pool->buf_size = buf_size;
385 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
386 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
389 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
390 struct mvpp2_bm_pool *bm_pool,
391 dma_addr_t *dma_addr,
392 phys_addr_t *phys_addr)
394 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
396 *dma_addr = mvpp2_thread_read(priv, thread,
397 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
398 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
400 if (priv->hw_version == MVPP22) {
402 u32 dma_addr_highbits, phys_addr_highbits;
404 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
405 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
406 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
407 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
409 if (sizeof(dma_addr_t) == 8)
410 *dma_addr |= (u64)dma_addr_highbits << 32;
412 if (sizeof(phys_addr_t) == 8)
413 *phys_addr |= (u64)phys_addr_highbits << 32;
419 /* Free all buffers from the pool */
420 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
421 struct mvpp2_bm_pool *bm_pool, int buf_num)
425 if (buf_num > bm_pool->buf_num) {
426 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
427 bm_pool->id, buf_num);
428 buf_num = bm_pool->buf_num;
431 for (i = 0; i < buf_num; i++) {
432 dma_addr_t buf_dma_addr;
433 phys_addr_t buf_phys_addr;
436 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
437 &buf_dma_addr, &buf_phys_addr);
439 dma_unmap_single(dev, buf_dma_addr,
440 bm_pool->buf_size, DMA_FROM_DEVICE);
442 data = (void *)phys_to_virt(buf_phys_addr);
446 mvpp2_frag_free(bm_pool, data);
449 /* Update BM driver with number of buffers removed from pool */
450 bm_pool->buf_num -= i;
453 /* Check number of buffers in BM pool */
454 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
458 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
459 MVPP22_BM_POOL_PTRS_NUM_MASK;
460 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
461 MVPP2_BM_BPPI_PTR_NUM_MASK;
463 /* HW has one buffer ready which is not reflected in the counters */
471 static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
473 struct mvpp2_bm_pool *bm_pool)
478 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
479 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
481 /* Check buffer counters after free */
482 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
484 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
485 bm_pool->id, bm_pool->buf_num);
489 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
490 val |= MVPP2_BM_STOP_MASK;
491 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
493 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
499 static int mvpp2_bm_pools_init(struct platform_device *pdev,
503 struct mvpp2_bm_pool *bm_pool;
505 /* Create all pools with maximum size */
506 size = MVPP2_BM_POOL_SIZE_MAX;
507 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
508 bm_pool = &priv->bm_pools[i];
510 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
512 goto err_unroll_pools;
513 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
518 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
519 for (i = i - 1; i >= 0; i--)
520 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
524 static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
528 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
529 /* Mask BM all interrupts */
530 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
531 /* Clear BM cause register */
532 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
535 /* Allocate and initialize BM pools */
536 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
537 sizeof(*priv->bm_pools), GFP_KERNEL);
541 err = mvpp2_bm_pools_init(pdev, priv);
547 static void mvpp2_setup_bm_pool(void)
550 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
551 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
554 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
555 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
558 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
559 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
562 /* Attach long pool to rxq */
563 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
564 int lrxq, int long_pool)
569 /* Get queue physical ID */
570 prxq = port->rxqs[lrxq]->id;
572 if (port->priv->hw_version == MVPP21)
573 mask = MVPP21_RXQ_POOL_LONG_MASK;
575 mask = MVPP22_RXQ_POOL_LONG_MASK;
577 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
579 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
580 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
583 /* Attach short pool to rxq */
584 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
585 int lrxq, int short_pool)
590 /* Get queue physical ID */
591 prxq = port->rxqs[lrxq]->id;
593 if (port->priv->hw_version == MVPP21)
594 mask = MVPP21_RXQ_POOL_SHORT_MASK;
596 mask = MVPP22_RXQ_POOL_SHORT_MASK;
598 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
600 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
601 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
604 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
605 struct mvpp2_bm_pool *bm_pool,
606 dma_addr_t *buf_dma_addr,
607 phys_addr_t *buf_phys_addr,
613 data = mvpp2_frag_alloc(bm_pool);
617 dma_addr = dma_map_single(port->dev->dev.parent, data,
618 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
620 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
621 mvpp2_frag_free(bm_pool, data);
624 *buf_dma_addr = dma_addr;
625 *buf_phys_addr = virt_to_phys(data);
630 /* Release buffer to BM */
631 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
632 dma_addr_t buf_dma_addr,
633 phys_addr_t buf_phys_addr)
635 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
636 unsigned long flags = 0;
638 if (test_bit(thread, &port->priv->lock_map))
639 spin_lock_irqsave(&port->bm_lock[thread], flags);
641 if (port->priv->hw_version == MVPP22) {
644 if (sizeof(dma_addr_t) == 8)
645 val |= upper_32_bits(buf_dma_addr) &
646 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
648 if (sizeof(phys_addr_t) == 8)
649 val |= (upper_32_bits(buf_phys_addr)
650 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
651 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
653 mvpp2_thread_write_relaxed(port->priv, thread,
654 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
657 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
658 * returned in the "cookie" field of the RX
659 * descriptor. Instead of storing the virtual address, we
660 * store the physical address
662 mvpp2_thread_write_relaxed(port->priv, thread,
663 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
664 mvpp2_thread_write_relaxed(port->priv, thread,
665 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
667 if (test_bit(thread, &port->priv->lock_map))
668 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
673 /* Allocate buffers for the pool */
674 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
675 struct mvpp2_bm_pool *bm_pool, int buf_num)
677 int i, buf_size, total_size;
679 phys_addr_t phys_addr;
682 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
683 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
686 (buf_num + bm_pool->buf_num > bm_pool->size)) {
687 netdev_err(port->dev,
688 "cannot allocate %d buffers for pool %d\n",
689 buf_num, bm_pool->id);
693 for (i = 0; i < buf_num; i++) {
694 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
695 &phys_addr, GFP_KERNEL);
699 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
703 /* Update BM driver with number of buffers added to pool */
704 bm_pool->buf_num += i;
706 netdev_dbg(port->dev,
707 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
708 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
710 netdev_dbg(port->dev,
711 "pool %d: %d of %d buffers added\n",
712 bm_pool->id, i, buf_num);
716 /* Notify the driver that BM pool is being used as specific type and return the
717 * pool pointer on success
719 static struct mvpp2_bm_pool *
720 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
722 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
725 if (pool >= MVPP2_BM_POOLS_NUM) {
726 netdev_err(port->dev, "Invalid pool %d\n", pool);
730 /* Allocate buffers in case BM pool is used as long pool, but packet
731 * size doesn't match MTU or BM pool hasn't being used yet
733 if (new_pool->pkt_size == 0) {
736 /* Set default buffer number or free all the buffers in case
737 * the pool is not empty
739 pkts_num = new_pool->buf_num;
741 pkts_num = mvpp2_pools[pool].buf_num;
743 mvpp2_bm_bufs_free(port->dev->dev.parent,
744 port->priv, new_pool, pkts_num);
746 new_pool->pkt_size = pkt_size;
747 new_pool->frag_size =
748 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
749 MVPP2_SKB_SHINFO_SIZE;
751 /* Allocate buffers for this pool */
752 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
753 if (num != pkts_num) {
754 WARN(1, "pool %d: %d of %d allocated\n",
755 new_pool->id, num, pkts_num);
760 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
761 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
766 /* Initialize pools for swf */
767 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
770 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
772 /* If port pkt_size is higher than 1518B:
773 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
774 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
776 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
777 long_log_pool = MVPP2_BM_JUMBO;
778 short_log_pool = MVPP2_BM_LONG;
780 long_log_pool = MVPP2_BM_LONG;
781 short_log_pool = MVPP2_BM_SHORT;
784 if (!port->pool_long) {
786 mvpp2_bm_pool_use(port, long_log_pool,
787 mvpp2_pools[long_log_pool].pkt_size);
788 if (!port->pool_long)
791 port->pool_long->port_map |= BIT(port->id);
793 for (rxq = 0; rxq < port->nrxqs; rxq++)
794 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
797 if (!port->pool_short) {
799 mvpp2_bm_pool_use(port, short_log_pool,
800 mvpp2_pools[short_log_pool].pkt_size);
801 if (!port->pool_short)
804 port->pool_short->port_map |= BIT(port->id);
806 for (rxq = 0; rxq < port->nrxqs; rxq++)
807 mvpp2_rxq_short_pool_set(port, rxq,
808 port->pool_short->id);
814 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
815 enum mvpp2_bm_pool_log_num new_long_pool)
817 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
819 /* Update L4 checksum when jumbo enable/disable on port.
820 * Only port 0 supports hardware checksum offload due to
821 * the Tx FIFO size limitation.
822 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
823 * has 7 bits, so the maximum L3 offset is 128.
825 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
826 port->dev->features &= ~csums;
827 port->dev->hw_features &= ~csums;
829 port->dev->features |= csums;
830 port->dev->hw_features |= csums;
834 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
836 struct mvpp2_port *port = netdev_priv(dev);
837 enum mvpp2_bm_pool_log_num new_long_pool;
838 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
840 /* If port MTU is higher than 1518B:
841 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
842 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
844 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
845 new_long_pool = MVPP2_BM_JUMBO;
847 new_long_pool = MVPP2_BM_LONG;
849 if (new_long_pool != port->pool_long->id) {
850 /* Remove port from old short & long pool */
851 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
852 port->pool_long->pkt_size);
853 port->pool_long->port_map &= ~BIT(port->id);
854 port->pool_long = NULL;
856 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
857 port->pool_short->pkt_size);
858 port->pool_short->port_map &= ~BIT(port->id);
859 port->pool_short = NULL;
861 port->pkt_size = pkt_size;
863 /* Add port to new short & long pool */
864 mvpp2_swf_bm_pool_init(port);
866 mvpp2_set_hw_csum(port, new_long_pool);
870 dev->wanted_features = dev->features;
872 netdev_update_features(dev);
876 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
878 int i, sw_thread_mask = 0;
880 for (i = 0; i < port->nqvecs; i++)
881 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
883 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
884 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
887 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
889 int i, sw_thread_mask = 0;
891 for (i = 0; i < port->nqvecs; i++)
892 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
894 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
895 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
898 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
900 struct mvpp2_port *port = qvec->port;
902 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
903 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
906 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
908 struct mvpp2_port *port = qvec->port;
910 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
911 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
914 /* Mask the current thread's Rx/Tx interrupts
915 * Called by on_each_cpu(), guaranteed to run with migration disabled,
916 * using smp_processor_id() is OK.
918 static void mvpp2_interrupts_mask(void *arg)
920 struct mvpp2_port *port = arg;
922 /* If the thread isn't used, don't do anything */
923 if (smp_processor_id() > port->priv->nthreads)
926 mvpp2_thread_write(port->priv,
927 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
928 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
931 /* Unmask the current thread's Rx/Tx interrupts.
932 * Called by on_each_cpu(), guaranteed to run with migration disabled,
933 * using smp_processor_id() is OK.
935 static void mvpp2_interrupts_unmask(void *arg)
937 struct mvpp2_port *port = arg;
940 /* If the thread isn't used, don't do anything */
941 if (smp_processor_id() > port->priv->nthreads)
944 val = MVPP2_CAUSE_MISC_SUM_MASK |
945 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
946 if (port->has_tx_irqs)
947 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
949 mvpp2_thread_write(port->priv,
950 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
951 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
955 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
960 if (port->priv->hw_version != MVPP22)
966 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
968 for (i = 0; i < port->nqvecs; i++) {
969 struct mvpp2_queue_vector *v = port->qvecs + i;
971 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
974 mvpp2_thread_write(port->priv, v->sw_thread_id,
975 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
979 /* Port configuration routines */
980 static bool mvpp2_is_xlg(phy_interface_t interface)
982 return interface == PHY_INTERFACE_MODE_10GKR ||
983 interface == PHY_INTERFACE_MODE_XAUI;
986 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
988 struct mvpp2 *priv = port->priv;
991 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
992 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
993 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
995 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
996 if (port->gop_id == 2)
997 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
998 else if (port->gop_id == 3)
999 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1000 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1003 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1005 struct mvpp2 *priv = port->priv;
1008 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1009 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1010 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1011 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1013 if (port->gop_id > 1) {
1014 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1015 if (port->gop_id == 2)
1016 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1017 else if (port->gop_id == 3)
1018 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1019 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1023 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1025 struct mvpp2 *priv = port->priv;
1026 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1027 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1030 val = readl(xpcs + MVPP22_XPCS_CFG0);
1031 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1032 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1033 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1034 writel(val, xpcs + MVPP22_XPCS_CFG0);
1036 val = readl(mpcs + MVPP22_MPCS_CTRL);
1037 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1038 writel(val, mpcs + MVPP22_MPCS_CTRL);
1040 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1041 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1042 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1043 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1046 static int mvpp22_gop_init(struct mvpp2_port *port)
1048 struct mvpp2 *priv = port->priv;
1051 if (!priv->sysctrl_base)
1054 switch (port->phy_interface) {
1055 case PHY_INTERFACE_MODE_RGMII:
1056 case PHY_INTERFACE_MODE_RGMII_ID:
1057 case PHY_INTERFACE_MODE_RGMII_RXID:
1058 case PHY_INTERFACE_MODE_RGMII_TXID:
1059 if (port->gop_id == 0)
1061 mvpp22_gop_init_rgmii(port);
1063 case PHY_INTERFACE_MODE_SGMII:
1064 case PHY_INTERFACE_MODE_1000BASEX:
1065 case PHY_INTERFACE_MODE_2500BASEX:
1066 mvpp22_gop_init_sgmii(port);
1068 case PHY_INTERFACE_MODE_10GKR:
1069 if (port->gop_id != 0)
1071 mvpp22_gop_init_10gkr(port);
1074 goto unsupported_conf;
1077 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1078 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1079 GENCONF_PORT_CTRL1_EN(port->gop_id);
1080 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1082 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1083 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1084 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1086 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1087 val |= GENCONF_SOFT_RESET1_GOP;
1088 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1094 netdev_err(port->dev, "Invalid port configuration\n");
1098 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1102 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1103 phy_interface_mode_is_8023z(port->phy_interface) ||
1104 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1105 /* Enable the GMAC link status irq for this port */
1106 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1107 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1108 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1111 if (port->gop_id == 0) {
1112 /* Enable the XLG/GIG irqs for this port */
1113 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1114 if (mvpp2_is_xlg(port->phy_interface))
1115 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1117 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1118 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1122 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1126 if (port->gop_id == 0) {
1127 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1128 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1129 MVPP22_XLG_EXT_INT_MASK_GIG);
1130 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1133 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1134 phy_interface_mode_is_8023z(port->phy_interface) ||
1135 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1136 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1137 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1138 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1142 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1146 if (port->phylink ||
1147 phy_interface_mode_is_rgmii(port->phy_interface) ||
1148 phy_interface_mode_is_8023z(port->phy_interface) ||
1149 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1150 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1151 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1152 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1155 if (port->gop_id == 0) {
1156 val = readl(port->base + MVPP22_XLG_INT_MASK);
1157 val |= MVPP22_XLG_INT_MASK_LINK;
1158 writel(val, port->base + MVPP22_XLG_INT_MASK);
1161 mvpp22_gop_unmask_irq(port);
1164 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1166 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1167 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1170 * The COMPHY configures the serdes lanes regardless of the actual use of the
1171 * lanes by the physical layer. This is why configurations like
1172 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1174 static int mvpp22_comphy_init(struct mvpp2_port *port)
1181 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1182 port->phy_interface);
1186 return phy_power_on(port->comphy);
1189 static void mvpp2_port_enable(struct mvpp2_port *port)
1193 /* Only GOP port 0 has an XLG MAC */
1194 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1195 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1196 val |= MVPP22_XLG_CTRL0_PORT_EN;
1197 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1198 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1200 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1201 val |= MVPP2_GMAC_PORT_EN_MASK;
1202 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1203 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1207 static void mvpp2_port_disable(struct mvpp2_port *port)
1211 /* Only GOP port 0 has an XLG MAC */
1212 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
1213 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1214 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1215 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1218 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1219 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1220 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1223 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1224 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1228 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1229 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1230 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1233 /* Configure loopback port */
1234 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1235 const struct phylink_link_state *state)
1239 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1241 if (state->speed == 1000)
1242 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1244 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1246 if (phy_interface_mode_is_8023z(port->phy_interface) ||
1247 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
1248 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1250 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1252 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1255 struct mvpp2_ethtool_counter {
1256 unsigned int offset;
1257 const char string[ETH_GSTRING_LEN];
1261 static u64 mvpp2_read_count(struct mvpp2_port *port,
1262 const struct mvpp2_ethtool_counter *counter)
1266 val = readl(port->stats_base + counter->offset);
1267 if (counter->reg_is_64b)
1268 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1273 /* Some counters are accessed indirectly by first writing an index to
1274 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1275 * register we access, it can be a hit counter for some classification tables,
1276 * a counter specific to a rxq, a txq or a buffer pool.
1278 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1280 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1281 return mvpp2_read(priv, reg);
1284 /* Due to the fact that software statistics and hardware statistics are, by
1285 * design, incremented at different moments in the chain of packet processing,
1286 * it is very likely that incoming packets could have been dropped after being
1287 * counted by hardware but before reaching software statistics (most probably
1288 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1289 * are added in between as well as TSO skb will be split and header bytes added.
1290 * Hence, statistics gathered from userspace with ifconfig (software) and
1291 * ethtool (hardware) cannot be compared.
1293 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1294 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1295 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1296 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1297 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1298 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1299 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1300 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1301 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1302 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1303 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1304 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1305 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1306 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1307 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1308 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1309 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1310 { MVPP2_MIB_FC_SENT, "fc_sent" },
1311 { MVPP2_MIB_FC_RCVD, "fc_received" },
1312 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1313 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1314 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1315 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1316 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1317 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1318 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1319 { MVPP2_MIB_COLLISION, "collision" },
1320 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1323 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1324 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1325 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1328 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1329 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1330 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1331 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1332 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1333 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1334 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1335 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1336 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1337 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1340 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1341 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1342 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1343 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1344 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1347 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1348 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1349 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1350 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
1352 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1355 struct mvpp2_port *port = netdev_priv(netdev);
1358 if (sset != ETH_SS_STATS)
1361 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1362 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1364 data += ETH_GSTRING_LEN;
1367 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1368 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1370 data += ETH_GSTRING_LEN;
1373 for (q = 0; q < port->ntxqs; q++) {
1374 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1375 snprintf(data, ETH_GSTRING_LEN,
1376 mvpp2_ethtool_txq_regs[i].string, q);
1377 data += ETH_GSTRING_LEN;
1381 for (q = 0; q < port->nrxqs; q++) {
1382 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1383 snprintf(data, ETH_GSTRING_LEN,
1384 mvpp2_ethtool_rxq_regs[i].string,
1386 data += ETH_GSTRING_LEN;
1391 static void mvpp2_read_stats(struct mvpp2_port *port)
1396 pstats = port->ethtool_stats;
1398 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1399 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1401 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1402 *pstats++ += mvpp2_read(port->priv,
1403 mvpp2_ethtool_port_regs[i].offset +
1406 for (q = 0; q < port->ntxqs; q++)
1407 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1408 *pstats++ += mvpp2_read_index(port->priv,
1409 MVPP22_CTRS_TX_CTR(port->id, i),
1410 mvpp2_ethtool_txq_regs[i].offset);
1412 /* Rxqs are numbered from 0 from the user standpoint, but not from the
1413 * driver's. We need to add the port->first_rxq offset.
1415 for (q = 0; q < port->nrxqs; q++)
1416 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1417 *pstats++ += mvpp2_read_index(port->priv,
1418 port->first_rxq + i,
1419 mvpp2_ethtool_rxq_regs[i].offset);
1422 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1424 struct delayed_work *del_work = to_delayed_work(work);
1425 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1428 mutex_lock(&port->gather_stats_lock);
1430 mvpp2_read_stats(port);
1432 /* No need to read again the counters right after this function if it
1433 * was called asynchronously by the user (ie. use of ethtool).
1435 cancel_delayed_work(&port->stats_work);
1436 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1437 MVPP2_MIB_COUNTERS_STATS_DELAY);
1439 mutex_unlock(&port->gather_stats_lock);
1442 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1443 struct ethtool_stats *stats, u64 *data)
1445 struct mvpp2_port *port = netdev_priv(dev);
1447 /* Update statistics for the given port, then take the lock to avoid
1448 * concurrent accesses on the ethtool_stats structure during its copy.
1450 mvpp2_gather_hw_statistics(&port->stats_work.work);
1452 mutex_lock(&port->gather_stats_lock);
1453 memcpy(data, port->ethtool_stats,
1454 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1455 mutex_unlock(&port->gather_stats_lock);
1458 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1460 struct mvpp2_port *port = netdev_priv(dev);
1462 if (sset == ETH_SS_STATS)
1463 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1468 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1472 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1473 MVPP2_GMAC_PORT_RESET_MASK;
1474 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1476 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1477 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1478 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1479 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1483 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1485 struct mvpp2 *priv = port->priv;
1486 void __iomem *mpcs, *xpcs;
1489 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1492 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1493 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1495 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1496 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1497 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1498 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1500 val = readl(xpcs + MVPP22_XPCS_CFG0);
1501 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1504 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1506 struct mvpp2 *priv = port->priv;
1507 void __iomem *mpcs, *xpcs;
1510 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1513 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1514 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1516 switch (port->phy_interface) {
1517 case PHY_INTERFACE_MODE_10GKR:
1518 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1519 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1520 MAC_CLK_RESET_SD_TX;
1521 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1522 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1524 case PHY_INTERFACE_MODE_XAUI:
1525 case PHY_INTERFACE_MODE_RXAUI:
1526 val = readl(xpcs + MVPP22_XPCS_CFG0);
1527 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1534 /* Change maximum receive size of the port */
1535 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1539 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1540 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1541 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1542 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1543 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1546 /* Change maximum receive size of the port */
1547 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1551 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1552 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1553 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1554 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1555 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1558 /* Set defaults to the MVPP2 port */
1559 static void mvpp2_defaults_set(struct mvpp2_port *port)
1561 int tx_port_num, val, queue, lrxq;
1563 if (port->priv->hw_version == MVPP21) {
1564 /* Update TX FIFO MIN Threshold */
1565 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1566 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1567 /* Min. TX threshold must be less than minimal packet length */
1568 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1569 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1572 /* Disable Legacy WRR, Disable EJP, Release from reset */
1573 tx_port_num = mvpp2_egress_port(port);
1574 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1576 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1578 /* Set TXQ scheduling to Round-Robin */
1579 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1581 /* Close bandwidth for all queues */
1582 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1583 mvpp2_write(port->priv,
1584 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1586 /* Set refill period to 1 usec, refill tokens
1587 * and bucket size to maximum
1589 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1590 port->priv->tclk / USEC_PER_SEC);
1591 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1592 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1593 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1594 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1595 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1596 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1597 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1599 /* Set MaximumLowLatencyPacketSize value to 256 */
1600 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1601 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1602 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1604 /* Enable Rx cache snoop */
1605 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1606 queue = port->rxqs[lrxq]->id;
1607 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1608 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1609 MVPP2_SNOOP_BUF_HDR_MASK;
1610 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1613 /* At default, mask all interrupts to all present cpus */
1614 mvpp2_interrupts_disable(port);
1617 /* Enable/disable receiving packets */
1618 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1623 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1624 queue = port->rxqs[lrxq]->id;
1625 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1626 val &= ~MVPP2_RXQ_DISABLE_MASK;
1627 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1631 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1636 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1637 queue = port->rxqs[lrxq]->id;
1638 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1639 val |= MVPP2_RXQ_DISABLE_MASK;
1640 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1644 /* Enable transmit via physical egress queue
1645 * - HW starts take descriptors from DRAM
1647 static void mvpp2_egress_enable(struct mvpp2_port *port)
1651 int tx_port_num = mvpp2_egress_port(port);
1653 /* Enable all initialized TXs. */
1655 for (queue = 0; queue < port->ntxqs; queue++) {
1656 struct mvpp2_tx_queue *txq = port->txqs[queue];
1659 qmap |= (1 << queue);
1662 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1663 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
1666 /* Disable transmit via physical egress queue
1667 * - HW doesn't take descriptors from DRAM
1669 static void mvpp2_egress_disable(struct mvpp2_port *port)
1673 int tx_port_num = mvpp2_egress_port(port);
1675 /* Issue stop command for active channels only */
1676 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1677 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
1678 MVPP2_TXP_SCHED_ENQ_MASK;
1680 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
1681 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
1683 /* Wait for all Tx activity to terminate. */
1686 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
1687 netdev_warn(port->dev,
1688 "Tx stop timed out, status=0x%08x\n",
1695 /* Check port TX Command register that all
1696 * Tx queues are stopped
1698 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
1699 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
1702 /* Rx descriptors helper methods */
1704 /* Get number of Rx descriptors occupied by received packets */
1706 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
1708 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
1710 return val & MVPP2_RXQ_OCCUPIED_MASK;
1713 /* Update Rx queue status with the number of occupied and available
1714 * Rx descriptor slots.
1717 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
1718 int used_count, int free_count)
1720 /* Decrement the number of used descriptors and increment count
1721 * increment the number of free descriptors.
1723 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
1725 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
1728 /* Get pointer to next RX descriptor to be processed by SW */
1729 static inline struct mvpp2_rx_desc *
1730 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
1732 int rx_desc = rxq->next_desc_to_proc;
1734 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
1735 prefetch(rxq->descs + rxq->next_desc_to_proc);
1736 return rxq->descs + rx_desc;
1739 /* Set rx queue offset */
1740 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
1741 int prxq, int offset)
1745 /* Convert offset from bytes to units of 32 bytes */
1746 offset = offset >> 5;
1748 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
1749 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
1752 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
1753 MVPP2_RXQ_PACKET_OFFSET_MASK);
1755 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
1758 /* Tx descriptors helper methods */
1760 /* Get pointer to next Tx descriptor to be processed (send) by HW */
1761 static struct mvpp2_tx_desc *
1762 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
1764 int tx_desc = txq->next_desc_to_proc;
1766 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
1767 return txq->descs + tx_desc;
1770 /* Update HW with number of aggregated Tx descriptors to be sent
1772 * Called only from mvpp2_tx(), so migration is disabled, using
1773 * smp_processor_id() is OK.
1775 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
1777 /* aggregated access - relevant TXQ number is written in TX desc */
1778 mvpp2_thread_write(port->priv,
1779 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1780 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
1783 /* Check if there are enough free descriptors in aggregated txq.
1784 * If not, update the number of occupied descriptors and repeat the check.
1786 * Called only from mvpp2_tx(), so migration is disabled, using
1787 * smp_processor_id() is OK.
1789 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
1790 struct mvpp2_tx_queue *aggr_txq, int num)
1792 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
1793 /* Update number of occupied aggregated Tx descriptors */
1794 unsigned int thread =
1795 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1796 u32 val = mvpp2_read_relaxed(port->priv,
1797 MVPP2_AGGR_TXQ_STATUS_REG(thread));
1799 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
1801 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
1807 /* Reserved Tx descriptors allocation request
1809 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
1810 * only by mvpp2_tx(), so migration is disabled, using
1811 * smp_processor_id() is OK.
1813 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
1814 struct mvpp2_tx_queue *txq, int num)
1816 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
1817 struct mvpp2 *priv = port->priv;
1820 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
1821 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
1823 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
1825 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
1828 /* Check if there are enough reserved descriptors for transmission.
1829 * If not, request chunk of reserved descriptors and check again.
1831 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
1832 struct mvpp2_tx_queue *txq,
1833 struct mvpp2_txq_pcpu *txq_pcpu,
1836 int req, desc_count;
1837 unsigned int thread;
1839 if (txq_pcpu->reserved_num >= num)
1842 /* Not enough descriptors reserved! Update the reserved descriptor
1843 * count and check again.
1847 /* Compute total of used descriptors */
1848 for (thread = 0; thread < port->priv->nthreads; thread++) {
1849 struct mvpp2_txq_pcpu *txq_pcpu_aux;
1851 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
1852 desc_count += txq_pcpu_aux->count;
1853 desc_count += txq_pcpu_aux->reserved_num;
1856 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
1860 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
1863 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
1865 /* OK, the descriptor could have been updated: check again. */
1866 if (txq_pcpu->reserved_num < num)
1871 /* Release the last allocated Tx descriptor. Useful to handle DMA
1872 * mapping failures in the Tx path.
1874 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
1876 if (txq->next_desc_to_proc == 0)
1877 txq->next_desc_to_proc = txq->last_desc - 1;
1879 txq->next_desc_to_proc--;
1882 /* Set Tx descriptors fields relevant for CSUM calculation */
1883 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
1884 int ip_hdr_len, int l4_proto)
1888 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1889 * G_L4_chk, L4_type required only for checksum calculation
1891 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
1892 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
1893 command |= MVPP2_TXD_IP_CSUM_DISABLE;
1895 if (l3_proto == htons(ETH_P_IP)) {
1896 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
1897 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
1899 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
1902 if (l4_proto == IPPROTO_TCP) {
1903 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
1904 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
1905 } else if (l4_proto == IPPROTO_UDP) {
1906 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
1907 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
1909 command |= MVPP2_TXD_L4_CSUM_NOT;
1915 /* Get number of sent descriptors and decrement counter.
1916 * The number of sent descriptors is returned.
1919 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
1920 * (migration disabled) and from the TX completion tasklet (migration
1921 * disabled) so using smp_processor_id() is OK.
1923 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
1924 struct mvpp2_tx_queue *txq)
1928 /* Reading status reg resets transmitted descriptor counter */
1929 val = mvpp2_thread_read_relaxed(port->priv,
1930 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1931 MVPP2_TXQ_SENT_REG(txq->id));
1933 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
1934 MVPP2_TRANSMITTED_COUNT_OFFSET;
1937 /* Called through on_each_cpu(), so runs on all CPUs, with migration
1938 * disabled, therefore using smp_processor_id() is OK.
1940 static void mvpp2_txq_sent_counter_clear(void *arg)
1942 struct mvpp2_port *port = arg;
1945 /* If the thread isn't used, don't do anything */
1946 if (smp_processor_id() > port->priv->nthreads)
1949 for (queue = 0; queue < port->ntxqs; queue++) {
1950 int id = port->txqs[queue]->id;
1952 mvpp2_thread_read(port->priv,
1953 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1954 MVPP2_TXQ_SENT_REG(id));
1958 /* Set max sizes for Tx queues */
1959 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
1962 int txq, tx_port_num;
1964 mtu = port->pkt_size * 8;
1965 if (mtu > MVPP2_TXP_MTU_MAX)
1966 mtu = MVPP2_TXP_MTU_MAX;
1968 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
1971 /* Indirect access to registers */
1972 tx_port_num = mvpp2_egress_port(port);
1973 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
1976 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
1977 val &= ~MVPP2_TXP_MTU_MAX;
1979 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
1981 /* TXP token size and all TXQs token size must be larger that MTU */
1982 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
1983 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
1986 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
1988 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1991 for (txq = 0; txq < port->ntxqs; txq++) {
1992 val = mvpp2_read(port->priv,
1993 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
1994 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
1998 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2000 mvpp2_write(port->priv,
2001 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2007 /* Set the number of packets that will be received before Rx interrupt
2008 * will be generated by HW.
2010 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2011 struct mvpp2_rx_queue *rxq)
2013 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2015 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2016 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2018 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2019 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2025 /* For some reason in the LSP this is done on each CPU. Why ? */
2026 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2027 struct mvpp2_tx_queue *txq)
2029 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2032 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2033 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2035 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2036 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2037 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2042 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2044 u64 tmp = (u64)clk_hz * usec;
2046 do_div(tmp, USEC_PER_SEC);
2048 return tmp > U32_MAX ? U32_MAX : tmp;
2051 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2053 u64 tmp = (u64)cycles * USEC_PER_SEC;
2055 do_div(tmp, clk_hz);
2057 return tmp > U32_MAX ? U32_MAX : tmp;
2060 /* Set the time delay in usec before Rx interrupt */
2061 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2062 struct mvpp2_rx_queue *rxq)
2064 unsigned long freq = port->priv->tclk;
2065 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2067 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2069 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2071 /* re-evaluate to get actual register value */
2072 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2075 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2078 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2080 unsigned long freq = port->priv->tclk;
2081 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2083 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2084 port->tx_time_coal =
2085 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2087 /* re-evaluate to get actual register value */
2088 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2091 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2094 /* Free Tx queue skbuffs */
2095 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2096 struct mvpp2_tx_queue *txq,
2097 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2101 for (i = 0; i < num; i++) {
2102 struct mvpp2_txq_pcpu_buf *tx_buf =
2103 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2105 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
2106 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2107 tx_buf->size, DMA_TO_DEVICE);
2109 dev_kfree_skb_any(tx_buf->skb);
2111 mvpp2_txq_inc_get(txq_pcpu);
2115 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2118 int queue = fls(cause) - 1;
2120 return port->rxqs[queue];
2123 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2126 int queue = fls(cause) - 1;
2128 return port->txqs[queue];
2131 /* Handle end of transmission */
2132 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2133 struct mvpp2_txq_pcpu *txq_pcpu)
2135 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2138 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2139 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2141 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2144 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2146 txq_pcpu->count -= tx_done;
2148 if (netif_tx_queue_stopped(nq))
2149 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2150 netif_tx_wake_queue(nq);
2153 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2154 unsigned int thread)
2156 struct mvpp2_tx_queue *txq;
2157 struct mvpp2_txq_pcpu *txq_pcpu;
2158 unsigned int tx_todo = 0;
2161 txq = mvpp2_get_tx_queue(port, cause);
2165 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2167 if (txq_pcpu->count) {
2168 mvpp2_txq_done(port, txq, txq_pcpu);
2169 tx_todo += txq_pcpu->count;
2172 cause &= ~(1 << txq->log_id);
2177 /* Rx/Tx queue initialization/cleanup methods */
2179 /* Allocate and initialize descriptors for aggr TXQ */
2180 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2181 struct mvpp2_tx_queue *aggr_txq,
2182 unsigned int thread, struct mvpp2 *priv)
2186 /* Allocate memory for TX descriptors */
2187 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2188 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2189 &aggr_txq->descs_dma, GFP_KERNEL);
2190 if (!aggr_txq->descs)
2193 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2195 /* Aggr TXQ no reset WA */
2196 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2197 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2199 /* Set Tx descriptors queue starting address indirect
2202 if (priv->hw_version == MVPP21)
2203 txq_dma = aggr_txq->descs_dma;
2205 txq_dma = aggr_txq->descs_dma >>
2206 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2208 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2209 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2210 MVPP2_AGGR_TXQ_SIZE);
2215 /* Create a specified Rx queue */
2216 static int mvpp2_rxq_init(struct mvpp2_port *port,
2217 struct mvpp2_rx_queue *rxq)
2220 unsigned int thread;
2223 rxq->size = port->rx_ring_size;
2225 /* Allocate memory for RX descriptors */
2226 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2227 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2228 &rxq->descs_dma, GFP_KERNEL);
2232 rxq->last_desc = rxq->size - 1;
2234 /* Zero occupied and non-occupied counters - direct access */
2235 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2237 /* Set Rx descriptors queue starting address - indirect access */
2238 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2239 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2240 if (port->priv->hw_version == MVPP21)
2241 rxq_dma = rxq->descs_dma;
2243 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2244 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2245 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2246 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2250 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
2252 /* Set coalescing pkts and time */
2253 mvpp2_rx_pkts_coal_set(port, rxq);
2254 mvpp2_rx_time_coal_set(port, rxq);
2256 /* Add number of descriptors ready for receiving packets */
2257 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2262 /* Push packets received by the RXQ to BM pool */
2263 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2264 struct mvpp2_rx_queue *rxq)
2268 rx_received = mvpp2_rxq_received(port, rxq->id);
2272 for (i = 0; i < rx_received; i++) {
2273 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2274 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2277 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2278 MVPP2_RXD_BM_POOL_ID_OFFS;
2280 mvpp2_bm_pool_put(port, pool,
2281 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2282 mvpp2_rxdesc_cookie_get(port, rx_desc));
2284 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2287 /* Cleanup Rx queue */
2288 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2289 struct mvpp2_rx_queue *rxq)
2291 unsigned int thread;
2293 mvpp2_rxq_drop_pkts(port, rxq);
2296 dma_free_coherent(port->dev->dev.parent,
2297 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2303 rxq->next_desc_to_proc = 0;
2306 /* Clear Rx descriptors queue starting address and size;
2307 * free descriptor number
2309 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2310 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2311 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2312 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2313 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2317 /* Create and initialize a Tx queue */
2318 static int mvpp2_txq_init(struct mvpp2_port *port,
2319 struct mvpp2_tx_queue *txq)
2322 unsigned int thread;
2323 int desc, desc_per_txq, tx_port_num;
2324 struct mvpp2_txq_pcpu *txq_pcpu;
2326 txq->size = port->tx_ring_size;
2328 /* Allocate memory for Tx descriptors */
2329 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2330 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2331 &txq->descs_dma, GFP_KERNEL);
2335 txq->last_desc = txq->size - 1;
2337 /* Set Tx descriptors queue starting address - indirect access */
2338 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2339 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2340 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2342 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2343 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2344 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2345 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2346 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2347 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2348 val &= ~MVPP2_TXQ_PENDING_MASK;
2349 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2351 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2352 * for each existing TXQ.
2353 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2354 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2357 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2358 (txq->log_id * desc_per_txq);
2360 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2361 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2362 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2365 /* WRR / EJP configuration - indirect access */
2366 tx_port_num = mvpp2_egress_port(port);
2367 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2369 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2370 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2371 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2372 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2373 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2375 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2376 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2379 for (thread = 0; thread < port->priv->nthreads; thread++) {
2380 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2381 txq_pcpu->size = txq->size;
2382 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2383 sizeof(*txq_pcpu->buffs),
2385 if (!txq_pcpu->buffs)
2388 txq_pcpu->count = 0;
2389 txq_pcpu->reserved_num = 0;
2390 txq_pcpu->txq_put_index = 0;
2391 txq_pcpu->txq_get_index = 0;
2392 txq_pcpu->tso_headers = NULL;
2394 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2395 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2397 txq_pcpu->tso_headers =
2398 dma_alloc_coherent(port->dev->dev.parent,
2399 txq_pcpu->size * TSO_HEADER_SIZE,
2400 &txq_pcpu->tso_headers_dma,
2402 if (!txq_pcpu->tso_headers)
2409 /* Free allocated TXQ resources */
2410 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2411 struct mvpp2_tx_queue *txq)
2413 struct mvpp2_txq_pcpu *txq_pcpu;
2414 unsigned int thread;
2416 for (thread = 0; thread < port->priv->nthreads; thread++) {
2417 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2418 kfree(txq_pcpu->buffs);
2420 if (txq_pcpu->tso_headers)
2421 dma_free_coherent(port->dev->dev.parent,
2422 txq_pcpu->size * TSO_HEADER_SIZE,
2423 txq_pcpu->tso_headers,
2424 txq_pcpu->tso_headers_dma);
2426 txq_pcpu->tso_headers = NULL;
2430 dma_free_coherent(port->dev->dev.parent,
2431 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2432 txq->descs, txq->descs_dma);
2436 txq->next_desc_to_proc = 0;
2439 /* Set minimum bandwidth for disabled TXQs */
2440 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2442 /* Set Tx descriptors queue starting address and size */
2443 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2444 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2445 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2446 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2450 /* Cleanup Tx ports */
2451 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2453 struct mvpp2_txq_pcpu *txq_pcpu;
2455 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2458 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2459 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2460 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2461 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2463 /* The napi queue has been stopped so wait for all packets
2464 * to be transmitted.
2468 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2469 netdev_warn(port->dev,
2470 "port %d: cleaning queue %d timed out\n",
2471 port->id, txq->log_id);
2477 pending = mvpp2_thread_read(port->priv, thread,
2478 MVPP2_TXQ_PENDING_REG);
2479 pending &= MVPP2_TXQ_PENDING_MASK;
2482 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2483 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2486 for (thread = 0; thread < port->priv->nthreads; thread++) {
2487 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2489 /* Release all packets */
2490 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2493 txq_pcpu->count = 0;
2494 txq_pcpu->txq_put_index = 0;
2495 txq_pcpu->txq_get_index = 0;
2499 /* Cleanup all Tx queues */
2500 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2502 struct mvpp2_tx_queue *txq;
2506 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2508 /* Reset Tx ports and delete Tx queues */
2509 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2510 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2512 for (queue = 0; queue < port->ntxqs; queue++) {
2513 txq = port->txqs[queue];
2514 mvpp2_txq_clean(port, txq);
2515 mvpp2_txq_deinit(port, txq);
2518 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2520 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2521 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2524 /* Cleanup all Rx queues */
2525 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2529 for (queue = 0; queue < port->nrxqs; queue++)
2530 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2533 /* Init all Rx queues for port */
2534 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2538 for (queue = 0; queue < port->nrxqs; queue++) {
2539 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2546 mvpp2_cleanup_rxqs(port);
2550 /* Init all tx queues for port */
2551 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2553 struct mvpp2_tx_queue *txq;
2554 int queue, err, cpu;
2556 for (queue = 0; queue < port->ntxqs; queue++) {
2557 txq = port->txqs[queue];
2558 err = mvpp2_txq_init(port, txq);
2562 /* Assign this queue to a CPU */
2563 cpu = queue % num_present_cpus();
2564 netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
2567 if (port->has_tx_irqs) {
2568 mvpp2_tx_time_coal_set(port);
2569 for (queue = 0; queue < port->ntxqs; queue++) {
2570 txq = port->txqs[queue];
2571 mvpp2_tx_pkts_coal_set(port, txq);
2575 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2579 mvpp2_cleanup_txqs(port);
2583 /* The callback for per-port interrupt */
2584 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2586 struct mvpp2_queue_vector *qv = dev_id;
2588 mvpp2_qvec_interrupt_disable(qv);
2590 napi_schedule(&qv->napi);
2595 /* Per-port interrupt for link status changes */
2596 static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
2598 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
2599 struct net_device *dev = port->dev;
2600 bool event = false, link = false;
2603 mvpp22_gop_mask_irq(port);
2605 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
2606 val = readl(port->base + MVPP22_XLG_INT_STAT);
2607 if (val & MVPP22_XLG_INT_STAT_LINK) {
2609 val = readl(port->base + MVPP22_XLG_STATUS);
2610 if (val & MVPP22_XLG_STATUS_LINK_UP)
2613 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
2614 phy_interface_mode_is_8023z(port->phy_interface) ||
2615 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
2616 val = readl(port->base + MVPP22_GMAC_INT_STAT);
2617 if (val & MVPP22_GMAC_INT_STAT_LINK) {
2619 val = readl(port->base + MVPP2_GMAC_STATUS0);
2620 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
2625 if (port->phylink) {
2626 phylink_mac_change(port->phylink, link);
2630 if (!netif_running(dev) || !event)
2634 mvpp2_interrupts_enable(port);
2636 mvpp2_egress_enable(port);
2637 mvpp2_ingress_enable(port);
2638 netif_carrier_on(dev);
2639 netif_tx_wake_all_queues(dev);
2641 netif_tx_stop_all_queues(dev);
2642 netif_carrier_off(dev);
2643 mvpp2_ingress_disable(port);
2644 mvpp2_egress_disable(port);
2646 mvpp2_interrupts_disable(port);
2650 mvpp22_gop_unmask_irq(port);
2654 static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
2658 if (!port_pcpu->timer_scheduled) {
2659 port_pcpu->timer_scheduled = true;
2660 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
2661 hrtimer_start(&port_pcpu->tx_done_timer, interval,
2662 HRTIMER_MODE_REL_PINNED);
2666 static void mvpp2_tx_proc_cb(unsigned long data)
2668 struct net_device *dev = (struct net_device *)data;
2669 struct mvpp2_port *port = netdev_priv(dev);
2670 struct mvpp2_port_pcpu *port_pcpu;
2671 unsigned int tx_todo, cause;
2673 port_pcpu = per_cpu_ptr(port->pcpu,
2674 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
2676 if (!netif_running(dev))
2678 port_pcpu->timer_scheduled = false;
2680 /* Process all the Tx queues */
2681 cause = (1 << port->ntxqs) - 1;
2682 tx_todo = mvpp2_tx_done(port, cause,
2683 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
2685 /* Set the timer in case not all the packets were processed */
2687 mvpp2_timer_set(port_pcpu);
2690 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
2692 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
2693 struct mvpp2_port_pcpu,
2696 tasklet_schedule(&port_pcpu->tx_done_tasklet);
2698 return HRTIMER_NORESTART;
2701 /* Main RX/TX processing routines */
2703 /* Display more error info */
2704 static void mvpp2_rx_error(struct mvpp2_port *port,
2705 struct mvpp2_rx_desc *rx_desc)
2707 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2708 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
2709 char *err_str = NULL;
2711 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
2712 case MVPP2_RXD_ERR_CRC:
2715 case MVPP2_RXD_ERR_OVERRUN:
2716 err_str = "overrun";
2718 case MVPP2_RXD_ERR_RESOURCE:
2719 err_str = "resource";
2722 if (err_str && net_ratelimit())
2723 netdev_err(port->dev,
2724 "bad rx status %08x (%s error), size=%zu\n",
2725 status, err_str, sz);
2728 /* Handle RX checksum offload */
2729 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
2730 struct sk_buff *skb)
2732 if (((status & MVPP2_RXD_L3_IP4) &&
2733 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
2734 (status & MVPP2_RXD_L3_IP6))
2735 if (((status & MVPP2_RXD_L4_UDP) ||
2736 (status & MVPP2_RXD_L4_TCP)) &&
2737 (status & MVPP2_RXD_L4_CSUM_OK)) {
2739 skb->ip_summed = CHECKSUM_UNNECESSARY;
2743 skb->ip_summed = CHECKSUM_NONE;
2746 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
2747 static int mvpp2_rx_refill(struct mvpp2_port *port,
2748 struct mvpp2_bm_pool *bm_pool, int pool)
2750 dma_addr_t dma_addr;
2751 phys_addr_t phys_addr;
2754 /* No recycle or too many buffers are in use, so allocate a new skb */
2755 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
2760 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2765 /* Handle tx checksum */
2766 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
2768 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2771 __be16 l3_proto = vlan_get_protocol(skb);
2773 if (l3_proto == htons(ETH_P_IP)) {
2774 struct iphdr *ip4h = ip_hdr(skb);
2776 /* Calculate IPv4 checksum and L4 checksum */
2777 ip_hdr_len = ip4h->ihl;
2778 l4_proto = ip4h->protocol;
2779 } else if (l3_proto == htons(ETH_P_IPV6)) {
2780 struct ipv6hdr *ip6h = ipv6_hdr(skb);
2782 /* Read l4_protocol from one of IPv6 extra headers */
2783 if (skb_network_header_len(skb) > 0)
2784 ip_hdr_len = (skb_network_header_len(skb) >> 2);
2785 l4_proto = ip6h->nexthdr;
2787 return MVPP2_TXD_L4_CSUM_NOT;
2790 return mvpp2_txq_desc_csum(skb_network_offset(skb),
2791 l3_proto, ip_hdr_len, l4_proto);
2794 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
2797 /* Main rx processing */
2798 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
2799 int rx_todo, struct mvpp2_rx_queue *rxq)
2801 struct net_device *dev = port->dev;
2807 /* Get number of received packets and clamp the to-do */
2808 rx_received = mvpp2_rxq_received(port, rxq->id);
2809 if (rx_todo > rx_received)
2810 rx_todo = rx_received;
2812 while (rx_done < rx_todo) {
2813 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2814 struct mvpp2_bm_pool *bm_pool;
2815 struct sk_buff *skb;
2816 unsigned int frag_size;
2817 dma_addr_t dma_addr;
2818 phys_addr_t phys_addr;
2820 int pool, rx_bytes, err;
2824 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
2825 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
2826 rx_bytes -= MVPP2_MH_SIZE;
2827 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
2828 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
2829 data = (void *)phys_to_virt(phys_addr);
2831 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2832 MVPP2_RXD_BM_POOL_ID_OFFS;
2833 bm_pool = &port->priv->bm_pools[pool];
2835 /* In case of an error, release the requested buffer pointer
2836 * to the Buffer Manager. This request process is controlled
2837 * by the hardware, and the information about the buffer is
2838 * comprised by the RX descriptor.
2840 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
2842 dev->stats.rx_errors++;
2843 mvpp2_rx_error(port, rx_desc);
2844 /* Return the buffer to the pool */
2845 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2849 if (bm_pool->frag_size > PAGE_SIZE)
2852 frag_size = bm_pool->frag_size;
2854 skb = build_skb(data, frag_size);
2856 netdev_warn(port->dev, "skb build failed\n");
2857 goto err_drop_frame;
2860 err = mvpp2_rx_refill(port, bm_pool, pool);
2862 netdev_err(port->dev, "failed to refill BM pools\n");
2863 goto err_drop_frame;
2866 dma_unmap_single(dev->dev.parent, dma_addr,
2867 bm_pool->buf_size, DMA_FROM_DEVICE);
2870 rcvd_bytes += rx_bytes;
2872 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
2873 skb_put(skb, rx_bytes);
2874 skb->protocol = eth_type_trans(skb, dev);
2875 mvpp2_rx_csum(port, rx_status, skb);
2877 napi_gro_receive(napi, skb);
2881 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
2883 u64_stats_update_begin(&stats->syncp);
2884 stats->rx_packets += rcvd_pkts;
2885 stats->rx_bytes += rcvd_bytes;
2886 u64_stats_update_end(&stats->syncp);
2889 /* Update Rx queue management counters */
2891 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
2897 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2898 struct mvpp2_tx_desc *desc)
2900 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2901 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2903 dma_addr_t buf_dma_addr =
2904 mvpp2_txdesc_dma_addr_get(port, desc);
2906 mvpp2_txdesc_size_get(port, desc);
2907 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
2908 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
2909 buf_sz, DMA_TO_DEVICE);
2910 mvpp2_txq_desc_put(txq);
2913 /* Handle tx fragmentation processing */
2914 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
2915 struct mvpp2_tx_queue *aggr_txq,
2916 struct mvpp2_tx_queue *txq)
2918 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2919 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2920 struct mvpp2_tx_desc *tx_desc;
2922 dma_addr_t buf_dma_addr;
2924 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2925 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2926 void *addr = page_address(frag->page.p) + frag->page_offset;
2928 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2929 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2930 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
2932 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
2933 frag->size, DMA_TO_DEVICE);
2934 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
2935 mvpp2_txq_desc_put(txq);
2939 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
2941 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
2942 /* Last descriptor */
2943 mvpp2_txdesc_cmd_set(port, tx_desc,
2945 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
2947 /* Descriptor in the middle: Not First, Not Last */
2948 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
2949 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2955 /* Release all descriptors that were used to map fragments of
2956 * this packet, as well as the corresponding DMA mappings
2958 for (i = i - 1; i >= 0; i--) {
2959 tx_desc = txq->descs + i;
2960 tx_desc_unmap_put(port, txq, tx_desc);
2966 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
2967 struct net_device *dev,
2968 struct mvpp2_tx_queue *txq,
2969 struct mvpp2_tx_queue *aggr_txq,
2970 struct mvpp2_txq_pcpu *txq_pcpu,
2973 struct mvpp2_port *port = netdev_priv(dev);
2974 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2977 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
2978 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
2980 addr = txq_pcpu->tso_headers_dma +
2981 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
2982 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
2984 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
2986 MVPP2_TXD_PADDING_DISABLE);
2987 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
2990 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
2991 struct net_device *dev, struct tso_t *tso,
2992 struct mvpp2_tx_queue *txq,
2993 struct mvpp2_tx_queue *aggr_txq,
2994 struct mvpp2_txq_pcpu *txq_pcpu,
2995 int sz, bool left, bool last)
2997 struct mvpp2_port *port = netdev_priv(dev);
2998 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
2999 dma_addr_t buf_dma_addr;
3001 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3002 mvpp2_txdesc_size_set(port, tx_desc, sz);
3004 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3006 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3007 mvpp2_txq_desc_put(txq);
3011 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3014 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3016 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3020 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3023 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3027 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3028 struct mvpp2_tx_queue *txq,
3029 struct mvpp2_tx_queue *aggr_txq,
3030 struct mvpp2_txq_pcpu *txq_pcpu)
3032 struct mvpp2_port *port = netdev_priv(dev);
3034 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
3035 int i, len, descs = 0;
3037 /* Check number of available descriptors */
3038 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3039 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3040 tso_count_descs(skb)))
3043 tso_start(skb, &tso);
3044 len = skb->len - hdr_sz;
3046 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3047 char *hdr = txq_pcpu->tso_headers +
3048 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3053 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3054 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3057 int sz = min_t(int, tso.size, left);
3061 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3062 txq_pcpu, sz, left, len == 0))
3064 tso_build_data(skb, &tso, sz);
3071 for (i = descs - 1; i >= 0; i--) {
3072 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3073 tx_desc_unmap_put(port, txq, tx_desc);
3078 /* Main tx processing */
3079 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3081 struct mvpp2_port *port = netdev_priv(dev);
3082 struct mvpp2_tx_queue *txq, *aggr_txq;
3083 struct mvpp2_txq_pcpu *txq_pcpu;
3084 struct mvpp2_tx_desc *tx_desc;
3085 dma_addr_t buf_dma_addr;
3086 unsigned long flags = 0;
3087 unsigned int thread;
3092 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3094 txq_id = skb_get_queue_mapping(skb);
3095 txq = port->txqs[txq_id];
3096 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3097 aggr_txq = &port->priv->aggr_txqs[thread];
3099 if (test_bit(thread, &port->priv->lock_map))
3100 spin_lock_irqsave(&port->tx_lock[thread], flags);
3102 if (skb_is_gso(skb)) {
3103 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3106 frags = skb_shinfo(skb)->nr_frags + 1;
3108 /* Check number of available descriptors */
3109 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3110 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3115 /* Get a descriptor for the first part of the packet */
3116 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3117 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3118 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3120 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3121 skb_headlen(skb), DMA_TO_DEVICE);
3122 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3123 mvpp2_txq_desc_put(txq);
3128 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3130 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3133 /* First and Last descriptor */
3134 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3135 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3136 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
3138 /* First but not Last */
3139 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
3140 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3141 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
3143 /* Continue with other skb fragments */
3144 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
3145 tx_desc_unmap_put(port, txq, tx_desc);
3152 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
3153 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
3155 txq_pcpu->reserved_num -= frags;
3156 txq_pcpu->count += frags;
3157 aggr_txq->count += frags;
3159 /* Enable transmit */
3161 mvpp2_aggr_txq_pend_desc_add(port, frags);
3163 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3164 netif_tx_stop_queue(nq);
3166 u64_stats_update_begin(&stats->syncp);
3167 stats->tx_packets++;
3168 stats->tx_bytes += skb->len;
3169 u64_stats_update_end(&stats->syncp);
3171 dev->stats.tx_dropped++;
3172 dev_kfree_skb_any(skb);
3175 /* Finalize TX processing */
3176 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3177 mvpp2_txq_done(port, txq, txq_pcpu);
3179 /* Set the timer in case not all frags were processed */
3180 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
3181 txq_pcpu->count > 0) {
3182 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
3184 mvpp2_timer_set(port_pcpu);
3187 if (test_bit(thread, &port->priv->lock_map))
3188 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
3190 return NETDEV_TX_OK;
3193 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
3195 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
3196 netdev_err(dev, "FCS error\n");
3197 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
3198 netdev_err(dev, "rx fifo overrun error\n");
3199 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
3200 netdev_err(dev, "tx fifo underrun error\n");
3203 static int mvpp2_poll(struct napi_struct *napi, int budget)
3205 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
3207 struct mvpp2_port *port = netdev_priv(napi->dev);
3208 struct mvpp2_queue_vector *qv;
3209 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3211 qv = container_of(napi, struct mvpp2_queue_vector, napi);
3213 /* Rx/Tx cause register
3215 * Bits 0-15: each bit indicates received packets on the Rx queue
3216 * (bit 0 is for Rx queue 0).
3218 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3219 * (bit 16 is for Tx queue 0).
3221 * Each CPU has its own Rx/Tx cause register
3223 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
3224 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
3226 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
3228 mvpp2_cause_error(port->dev, cause_misc);
3230 /* Clear the cause register */
3231 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
3232 mvpp2_thread_write(port->priv, thread,
3233 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
3234 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3237 if (port->has_tx_irqs) {
3238 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3240 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3241 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3245 /* Process RX packets */
3246 cause_rx = cause_rx_tx &
3247 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
3248 cause_rx <<= qv->first_rxq;
3249 cause_rx |= qv->pending_cause_rx;
3250 while (cause_rx && budget > 0) {
3252 struct mvpp2_rx_queue *rxq;
3254 rxq = mvpp2_get_rx_queue(port, cause_rx);
3258 count = mvpp2_rx(port, napi, budget, rxq);
3262 /* Clear the bit associated to this Rx queue
3263 * so that next iteration will continue from
3264 * the next Rx queue.
3266 cause_rx &= ~(1 << rxq->logic_rxq);
3272 napi_complete_done(napi, rx_done);
3274 mvpp2_qvec_interrupt_enable(qv);
3276 qv->pending_cause_rx = cause_rx;
3280 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
3284 /* Set the GMAC & XLG MAC in reset */
3285 mvpp2_mac_reset_assert(port);
3287 /* Set the MPCS and XPCS in reset */
3288 mvpp22_pcs_reset_assert(port);
3290 /* comphy reconfiguration */
3291 mvpp22_comphy_init(port);
3293 /* gop reconfiguration */
3294 mvpp22_gop_init(port);
3296 mvpp22_pcs_reset_deassert(port);
3298 /* Only GOP port 0 has an XLG MAC */
3299 if (port->gop_id == 0) {
3300 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
3301 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3303 if (mvpp2_is_xlg(port->phy_interface))
3304 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
3306 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3308 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
3311 if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
3312 mvpp2_xlg_max_rx_size_set(port);
3314 mvpp2_gmac_max_rx_size_set(port);
3317 /* Set hw internals when starting port */
3318 static void mvpp2_start_dev(struct mvpp2_port *port)
3322 mvpp2_txp_max_tx_size_set(port);
3324 for (i = 0; i < port->nqvecs; i++)
3325 napi_enable(&port->qvecs[i].napi);
3327 /* Enable interrupts on all threads */
3328 mvpp2_interrupts_enable(port);
3330 if (port->priv->hw_version == MVPP22)
3331 mvpp22_mode_reconfigure(port);
3333 if (port->phylink) {
3334 phylink_start(port->phylink);
3336 /* Phylink isn't used as of now for ACPI, so the MAC has to be
3337 * configured manually when the interface is started. This will
3338 * be removed as soon as the phylink ACPI support lands in.
3340 struct phylink_link_state state = {
3341 .interface = port->phy_interface,
3343 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
3344 mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
3345 port->phy_interface, NULL);
3348 netif_tx_start_all_queues(port->dev);
3351 /* Set hw internals when stopping port */
3352 static void mvpp2_stop_dev(struct mvpp2_port *port)
3356 /* Disable interrupts on all threads */
3357 mvpp2_interrupts_disable(port);
3359 for (i = 0; i < port->nqvecs; i++)
3360 napi_disable(&port->qvecs[i].napi);
3363 phylink_stop(port->phylink);
3364 phy_power_off(port->comphy);
3367 static int mvpp2_check_ringparam_valid(struct net_device *dev,
3368 struct ethtool_ringparam *ring)
3370 u16 new_rx_pending = ring->rx_pending;
3371 u16 new_tx_pending = ring->tx_pending;
3373 if (ring->rx_pending == 0 || ring->tx_pending == 0)
3376 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
3377 new_rx_pending = MVPP2_MAX_RXD_MAX;
3378 else if (!IS_ALIGNED(ring->rx_pending, 16))
3379 new_rx_pending = ALIGN(ring->rx_pending, 16);
3381 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
3382 new_tx_pending = MVPP2_MAX_TXD_MAX;
3383 else if (!IS_ALIGNED(ring->tx_pending, 32))
3384 new_tx_pending = ALIGN(ring->tx_pending, 32);
3386 /* The Tx ring size cannot be smaller than the minimum number of
3387 * descriptors needed for TSO.
3389 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
3390 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
3392 if (ring->rx_pending != new_rx_pending) {
3393 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
3394 ring->rx_pending, new_rx_pending);
3395 ring->rx_pending = new_rx_pending;
3398 if (ring->tx_pending != new_tx_pending) {
3399 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
3400 ring->tx_pending, new_tx_pending);
3401 ring->tx_pending = new_tx_pending;
3407 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
3409 u32 mac_addr_l, mac_addr_m, mac_addr_h;
3411 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3412 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
3413 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
3414 addr[0] = (mac_addr_h >> 24) & 0xFF;
3415 addr[1] = (mac_addr_h >> 16) & 0xFF;
3416 addr[2] = (mac_addr_h >> 8) & 0xFF;
3417 addr[3] = mac_addr_h & 0xFF;
3418 addr[4] = mac_addr_m & 0xFF;
3419 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
3422 static int mvpp2_irqs_init(struct mvpp2_port *port)
3426 for (i = 0; i < port->nqvecs; i++) {
3427 struct mvpp2_queue_vector *qv = port->qvecs + i;
3429 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3430 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
3436 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
3439 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
3443 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
3446 for_each_present_cpu(cpu) {
3447 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
3449 cpumask_set_cpu(cpu, qv->mask);
3452 irq_set_affinity_hint(qv->irq, qv->mask);
3458 for (i = 0; i < port->nqvecs; i++) {
3459 struct mvpp2_queue_vector *qv = port->qvecs + i;
3461 irq_set_affinity_hint(qv->irq, NULL);
3464 free_irq(qv->irq, qv);
3470 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
3474 for (i = 0; i < port->nqvecs; i++) {
3475 struct mvpp2_queue_vector *qv = port->qvecs + i;
3477 irq_set_affinity_hint(qv->irq, NULL);
3480 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
3481 free_irq(qv->irq, qv);
3485 static bool mvpp22_rss_is_supported(void)
3487 return queue_mode == MVPP2_QDIST_MULTI_MODE;
3490 static int mvpp2_open(struct net_device *dev)
3492 struct mvpp2_port *port = netdev_priv(dev);
3493 struct mvpp2 *priv = port->priv;
3494 unsigned char mac_bcast[ETH_ALEN] = {
3495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3499 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
3501 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
3504 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
3506 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
3509 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
3511 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
3514 err = mvpp2_prs_def_flow(port);
3516 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
3520 /* Allocate the Rx/Tx queues */
3521 err = mvpp2_setup_rxqs(port);
3523 netdev_err(port->dev, "cannot allocate Rx queues\n");
3527 err = mvpp2_setup_txqs(port);
3529 netdev_err(port->dev, "cannot allocate Tx queues\n");
3530 goto err_cleanup_rxqs;
3533 err = mvpp2_irqs_init(port);
3535 netdev_err(port->dev, "cannot init IRQs\n");
3536 goto err_cleanup_txqs;
3539 /* Phylink isn't supported yet in ACPI mode */
3540 if (port->of_node) {
3541 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
3543 netdev_err(port->dev, "could not attach PHY (%d)\n",
3551 if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) {
3552 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
3555 netdev_err(port->dev, "cannot request link IRQ %d\n",
3560 mvpp22_gop_setup_irq(port);
3562 /* In default link is down */
3563 netif_carrier_off(port->dev);
3571 netdev_err(port->dev,
3572 "invalid configuration: no dt or link IRQ");
3576 /* Unmask interrupts on all CPUs */
3577 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
3578 mvpp2_shared_interrupt_mask_unmask(port, false);
3580 mvpp2_start_dev(port);
3582 /* Start hardware statistics gathering */
3583 queue_delayed_work(priv->stats_queue, &port->stats_work,
3584 MVPP2_MIB_COUNTERS_STATS_DELAY);
3589 mvpp2_irqs_deinit(port);
3591 mvpp2_cleanup_txqs(port);
3593 mvpp2_cleanup_rxqs(port);
3597 static int mvpp2_stop(struct net_device *dev)
3599 struct mvpp2_port *port = netdev_priv(dev);
3600 struct mvpp2_port_pcpu *port_pcpu;
3601 unsigned int thread;
3603 mvpp2_stop_dev(port);
3605 /* Mask interrupts on all threads */
3606 on_each_cpu(mvpp2_interrupts_mask, port, 1);
3607 mvpp2_shared_interrupt_mask_unmask(port, true);
3610 phylink_disconnect_phy(port->phylink);
3612 free_irq(port->link_irq, port);
3614 mvpp2_irqs_deinit(port);
3615 if (!port->has_tx_irqs) {
3616 for (thread = 0; thread < port->priv->nthreads; thread++) {
3617 port_pcpu = per_cpu_ptr(port->pcpu, thread);
3619 hrtimer_cancel(&port_pcpu->tx_done_timer);
3620 port_pcpu->timer_scheduled = false;
3621 tasklet_kill(&port_pcpu->tx_done_tasklet);
3624 mvpp2_cleanup_rxqs(port);
3625 mvpp2_cleanup_txqs(port);
3627 cancel_delayed_work_sync(&port->stats_work);
3629 mvpp2_mac_reset_assert(port);
3630 mvpp22_pcs_reset_assert(port);
3635 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
3636 struct netdev_hw_addr_list *list)
3638 struct netdev_hw_addr *ha;
3641 netdev_hw_addr_list_for_each(ha, list) {
3642 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
3650 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
3652 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3653 mvpp2_prs_vid_enable_filtering(port);
3655 mvpp2_prs_vid_disable_filtering(port);
3657 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3658 MVPP2_PRS_L2_UNI_CAST, enable);
3660 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3661 MVPP2_PRS_L2_MULTI_CAST, enable);
3664 static void mvpp2_set_rx_mode(struct net_device *dev)
3666 struct mvpp2_port *port = netdev_priv(dev);
3668 /* Clear the whole UC and MC list */
3669 mvpp2_prs_mac_del_all(port);
3671 if (dev->flags & IFF_PROMISC) {
3672 mvpp2_set_rx_promisc(port, true);
3676 mvpp2_set_rx_promisc(port, false);
3678 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
3679 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
3680 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3681 MVPP2_PRS_L2_UNI_CAST, true);
3683 if (dev->flags & IFF_ALLMULTI) {
3684 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3685 MVPP2_PRS_L2_MULTI_CAST, true);
3689 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
3690 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
3691 mvpp2_prs_mac_promisc_set(port->priv, port->id,
3692 MVPP2_PRS_L2_MULTI_CAST, true);
3695 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
3697 const struct sockaddr *addr = p;
3700 if (!is_valid_ether_addr(addr->sa_data))
3701 return -EADDRNOTAVAIL;
3703 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
3705 /* Reconfigure parser accept the original MAC address */
3706 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
3707 netdev_err(dev, "failed to change MAC address\n");
3712 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
3714 struct mvpp2_port *port = netdev_priv(dev);
3715 bool running = netif_running(dev);
3718 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
3719 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
3720 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
3721 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
3725 mvpp2_stop_dev(port);
3727 err = mvpp2_bm_update_mtu(dev, mtu);
3729 netdev_err(dev, "failed to change MTU\n");
3730 /* Reconfigure BM to the original MTU */
3731 mvpp2_bm_update_mtu(dev, dev->mtu);
3733 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3737 mvpp2_start_dev(port);
3738 mvpp2_egress_enable(port);
3739 mvpp2_ingress_enable(port);
3746 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
3748 struct mvpp2_port *port = netdev_priv(dev);
3752 for_each_possible_cpu(cpu) {
3753 struct mvpp2_pcpu_stats *cpu_stats;
3759 cpu_stats = per_cpu_ptr(port->stats, cpu);
3761 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3762 rx_packets = cpu_stats->rx_packets;
3763 rx_bytes = cpu_stats->rx_bytes;
3764 tx_packets = cpu_stats->tx_packets;
3765 tx_bytes = cpu_stats->tx_bytes;
3766 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3768 stats->rx_packets += rx_packets;
3769 stats->rx_bytes += rx_bytes;
3770 stats->tx_packets += tx_packets;
3771 stats->tx_bytes += tx_bytes;
3774 stats->rx_errors = dev->stats.rx_errors;
3775 stats->rx_dropped = dev->stats.rx_dropped;
3776 stats->tx_dropped = dev->stats.tx_dropped;
3779 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3781 struct mvpp2_port *port = netdev_priv(dev);
3786 return phylink_mii_ioctl(port->phylink, ifr, cmd);
3789 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
3791 struct mvpp2_port *port = netdev_priv(dev);
3794 ret = mvpp2_prs_vid_entry_add(port, vid);
3796 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3797 MVPP2_PRS_VLAN_FILT_MAX - 1);
3801 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
3803 struct mvpp2_port *port = netdev_priv(dev);
3805 mvpp2_prs_vid_entry_remove(port, vid);
3809 static int mvpp2_set_features(struct net_device *dev,
3810 netdev_features_t features)
3812 netdev_features_t changed = dev->features ^ features;
3813 struct mvpp2_port *port = netdev_priv(dev);
3815 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3816 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
3817 mvpp2_prs_vid_enable_filtering(port);
3819 /* Invalidate all registered VID filters for this
3822 mvpp2_prs_vid_remove_all(port);
3824 mvpp2_prs_vid_disable_filtering(port);
3828 if (changed & NETIF_F_RXHASH) {
3829 if (features & NETIF_F_RXHASH)
3830 mvpp22_port_rss_enable(port);
3832 mvpp22_port_rss_disable(port);
3838 /* Ethtool methods */
3840 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
3842 struct mvpp2_port *port = netdev_priv(dev);
3847 return phylink_ethtool_nway_reset(port->phylink);
3850 /* Set interrupt coalescing for ethtools */
3851 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
3852 struct ethtool_coalesce *c)
3854 struct mvpp2_port *port = netdev_priv(dev);
3857 for (queue = 0; queue < port->nrxqs; queue++) {
3858 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
3860 rxq->time_coal = c->rx_coalesce_usecs;
3861 rxq->pkts_coal = c->rx_max_coalesced_frames;
3862 mvpp2_rx_pkts_coal_set(port, rxq);
3863 mvpp2_rx_time_coal_set(port, rxq);
3866 if (port->has_tx_irqs) {
3867 port->tx_time_coal = c->tx_coalesce_usecs;
3868 mvpp2_tx_time_coal_set(port);
3871 for (queue = 0; queue < port->ntxqs; queue++) {
3872 struct mvpp2_tx_queue *txq = port->txqs[queue];
3874 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3876 if (port->has_tx_irqs)
3877 mvpp2_tx_pkts_coal_set(port, txq);
3883 /* get coalescing for ethtools */
3884 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
3885 struct ethtool_coalesce *c)
3887 struct mvpp2_port *port = netdev_priv(dev);
3889 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
3890 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
3891 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
3892 c->tx_coalesce_usecs = port->tx_time_coal;
3896 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
3897 struct ethtool_drvinfo *drvinfo)
3899 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
3900 sizeof(drvinfo->driver));
3901 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
3902 sizeof(drvinfo->version));
3903 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3904 sizeof(drvinfo->bus_info));
3907 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
3908 struct ethtool_ringparam *ring)
3910 struct mvpp2_port *port = netdev_priv(dev);
3912 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
3913 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
3914 ring->rx_pending = port->rx_ring_size;
3915 ring->tx_pending = port->tx_ring_size;
3918 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
3919 struct ethtool_ringparam *ring)
3921 struct mvpp2_port *port = netdev_priv(dev);
3922 u16 prev_rx_ring_size = port->rx_ring_size;
3923 u16 prev_tx_ring_size = port->tx_ring_size;
3926 err = mvpp2_check_ringparam_valid(dev, ring);
3930 if (!netif_running(dev)) {
3931 port->rx_ring_size = ring->rx_pending;
3932 port->tx_ring_size = ring->tx_pending;
3936 /* The interface is running, so we have to force a
3937 * reallocation of the queues
3939 mvpp2_stop_dev(port);
3940 mvpp2_cleanup_rxqs(port);
3941 mvpp2_cleanup_txqs(port);
3943 port->rx_ring_size = ring->rx_pending;
3944 port->tx_ring_size = ring->tx_pending;
3946 err = mvpp2_setup_rxqs(port);
3948 /* Reallocate Rx queues with the original ring size */
3949 port->rx_ring_size = prev_rx_ring_size;
3950 ring->rx_pending = prev_rx_ring_size;
3951 err = mvpp2_setup_rxqs(port);
3955 err = mvpp2_setup_txqs(port);
3957 /* Reallocate Tx queues with the original ring size */
3958 port->tx_ring_size = prev_tx_ring_size;
3959 ring->tx_pending = prev_tx_ring_size;
3960 err = mvpp2_setup_txqs(port);
3962 goto err_clean_rxqs;
3965 mvpp2_start_dev(port);
3966 mvpp2_egress_enable(port);
3967 mvpp2_ingress_enable(port);
3972 mvpp2_cleanup_rxqs(port);
3974 netdev_err(dev, "failed to change ring parameters");
3978 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
3979 struct ethtool_pauseparam *pause)
3981 struct mvpp2_port *port = netdev_priv(dev);
3986 phylink_ethtool_get_pauseparam(port->phylink, pause);
3989 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
3990 struct ethtool_pauseparam *pause)
3992 struct mvpp2_port *port = netdev_priv(dev);
3997 return phylink_ethtool_set_pauseparam(port->phylink, pause);
4000 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
4001 struct ethtool_link_ksettings *cmd)
4003 struct mvpp2_port *port = netdev_priv(dev);
4008 return phylink_ethtool_ksettings_get(port->phylink, cmd);
4011 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
4012 const struct ethtool_link_ksettings *cmd)
4014 struct mvpp2_port *port = netdev_priv(dev);
4019 return phylink_ethtool_ksettings_set(port->phylink, cmd);
4022 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
4023 struct ethtool_rxnfc *info, u32 *rules)
4025 struct mvpp2_port *port = netdev_priv(dev);
4026 int ret = 0, i, loc = 0;
4028 if (!mvpp22_rss_is_supported())
4031 switch (info->cmd) {
4033 ret = mvpp2_ethtool_rxfh_get(port, info);
4035 case ETHTOOL_GRXRINGS:
4036 info->data = port->nrxqs;
4038 case ETHTOOL_GRXCLSRLCNT:
4039 info->rule_cnt = port->n_rfs_rules;
4041 case ETHTOOL_GRXCLSRULE:
4042 ret = mvpp2_ethtool_cls_rule_get(port, info);
4044 case ETHTOOL_GRXCLSRLALL:
4045 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
4046 if (port->rfs_rules[i])
4057 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
4058 struct ethtool_rxnfc *info)
4060 struct mvpp2_port *port = netdev_priv(dev);
4063 if (!mvpp22_rss_is_supported())
4066 switch (info->cmd) {
4068 ret = mvpp2_ethtool_rxfh_set(port, info);
4070 case ETHTOOL_SRXCLSRLINS:
4071 ret = mvpp2_ethtool_cls_rule_ins(port, info);
4073 case ETHTOOL_SRXCLSRLDEL:
4074 ret = mvpp2_ethtool_cls_rule_del(port, info);
4082 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
4084 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
4087 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4090 struct mvpp2_port *port = netdev_priv(dev);
4093 if (!mvpp22_rss_is_supported())
4097 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
4100 *hfunc = ETH_RSS_HASH_CRC32;
4105 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4106 const u8 *key, const u8 hfunc)
4108 struct mvpp2_port *port = netdev_priv(dev);
4111 if (!mvpp22_rss_is_supported())
4114 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4121 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
4126 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
4127 u8 *key, u8 *hfunc, u32 rss_context)
4129 struct mvpp2_port *port = netdev_priv(dev);
4132 if (!mvpp22_rss_is_supported())
4136 *hfunc = ETH_RSS_HASH_CRC32;
4139 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
4144 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
4145 const u32 *indir, const u8 *key,
4146 const u8 hfunc, u32 *rss_context,
4149 struct mvpp2_port *port = netdev_priv(dev);
4152 if (!mvpp22_rss_is_supported())
4155 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
4162 return mvpp22_port_rss_ctx_delete(port, *rss_context);
4164 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
4165 ret = mvpp22_port_rss_ctx_create(port, rss_context);
4170 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
4174 static const struct net_device_ops mvpp2_netdev_ops = {
4175 .ndo_open = mvpp2_open,
4176 .ndo_stop = mvpp2_stop,
4177 .ndo_start_xmit = mvpp2_tx,
4178 .ndo_set_rx_mode = mvpp2_set_rx_mode,
4179 .ndo_set_mac_address = mvpp2_set_mac_address,
4180 .ndo_change_mtu = mvpp2_change_mtu,
4181 .ndo_get_stats64 = mvpp2_get_stats64,
4182 .ndo_do_ioctl = mvpp2_ioctl,
4183 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
4184 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
4185 .ndo_set_features = mvpp2_set_features,
4188 static const struct ethtool_ops mvpp2_eth_tool_ops = {
4189 .nway_reset = mvpp2_ethtool_nway_reset,
4190 .get_link = ethtool_op_get_link,
4191 .set_coalesce = mvpp2_ethtool_set_coalesce,
4192 .get_coalesce = mvpp2_ethtool_get_coalesce,
4193 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
4194 .get_ringparam = mvpp2_ethtool_get_ringparam,
4195 .set_ringparam = mvpp2_ethtool_set_ringparam,
4196 .get_strings = mvpp2_ethtool_get_strings,
4197 .get_ethtool_stats = mvpp2_ethtool_get_stats,
4198 .get_sset_count = mvpp2_ethtool_get_sset_count,
4199 .get_pauseparam = mvpp2_ethtool_get_pause_param,
4200 .set_pauseparam = mvpp2_ethtool_set_pause_param,
4201 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
4202 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
4203 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
4204 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
4205 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
4206 .get_rxfh = mvpp2_ethtool_get_rxfh,
4207 .set_rxfh = mvpp2_ethtool_set_rxfh,
4208 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
4209 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
4212 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
4213 * had a single IRQ defined per-port.
4215 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
4216 struct device_node *port_node)
4218 struct mvpp2_queue_vector *v = &port->qvecs[0];
4221 v->nrxqs = port->nrxqs;
4222 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4223 v->sw_thread_id = 0;
4224 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
4226 v->irq = irq_of_parse_and_map(port_node, 0);
4229 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4237 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
4238 struct device_node *port_node)
4240 struct mvpp2 *priv = port->priv;
4241 struct mvpp2_queue_vector *v;
4244 switch (queue_mode) {
4245 case MVPP2_QDIST_SINGLE_MODE:
4246 port->nqvecs = priv->nthreads + 1;
4248 case MVPP2_QDIST_MULTI_MODE:
4249 port->nqvecs = priv->nthreads;
4253 for (i = 0; i < port->nqvecs; i++) {
4256 v = port->qvecs + i;
4259 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
4260 v->sw_thread_id = i;
4261 v->sw_thread_mask = BIT(i);
4263 if (port->flags & MVPP2_F_DT_COMPAT)
4264 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
4266 snprintf(irqname, sizeof(irqname), "hif%d", i);
4268 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
4271 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
4272 i == (port->nqvecs - 1)) {
4274 v->nrxqs = port->nrxqs;
4275 v->type = MVPP2_QUEUE_VECTOR_SHARED;
4277 if (port->flags & MVPP2_F_DT_COMPAT)
4278 strncpy(irqname, "rx-shared", sizeof(irqname));
4282 v->irq = of_irq_get_byname(port_node, irqname);
4284 v->irq = fwnode_irq_get(port->fwnode, i);
4290 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
4297 for (i = 0; i < port->nqvecs; i++)
4298 irq_dispose_mapping(port->qvecs[i].irq);
4302 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
4303 struct device_node *port_node)
4305 if (port->has_tx_irqs)
4306 return mvpp2_multi_queue_vectors_init(port, port_node);
4308 return mvpp2_simple_queue_vectors_init(port, port_node);
4311 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
4315 for (i = 0; i < port->nqvecs; i++)
4316 irq_dispose_mapping(port->qvecs[i].irq);
4319 /* Configure Rx queue group interrupt for this port */
4320 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
4322 struct mvpp2 *priv = port->priv;
4326 if (priv->hw_version == MVPP21) {
4327 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
4332 /* Handle the more complicated PPv2.2 case */
4333 for (i = 0; i < port->nqvecs; i++) {
4334 struct mvpp2_queue_vector *qv = port->qvecs + i;
4339 val = qv->sw_thread_id;
4340 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
4341 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
4343 val = qv->first_rxq;
4344 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
4345 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
4349 /* Initialize port HW */
4350 static int mvpp2_port_init(struct mvpp2_port *port)
4352 struct device *dev = port->dev->dev.parent;
4353 struct mvpp2 *priv = port->priv;
4354 struct mvpp2_txq_pcpu *txq_pcpu;
4355 unsigned int thread;
4358 /* Checks for hardware constraints */
4359 if (port->first_rxq + port->nrxqs >
4360 MVPP2_MAX_PORTS * priv->max_port_rxqs)
4363 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
4367 mvpp2_egress_disable(port);
4368 mvpp2_port_disable(port);
4370 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
4372 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
4377 /* Associate physical Tx queues to this port and initialize.
4378 * The mapping is predefined.
4380 for (queue = 0; queue < port->ntxqs; queue++) {
4381 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4382 struct mvpp2_tx_queue *txq;
4384 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4387 goto err_free_percpu;
4390 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
4393 goto err_free_percpu;
4396 txq->id = queue_phy_id;
4397 txq->log_id = queue;
4398 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4399 for (thread = 0; thread < priv->nthreads; thread++) {
4400 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4401 txq_pcpu->thread = thread;
4404 port->txqs[queue] = txq;
4407 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
4411 goto err_free_percpu;
4414 /* Allocate and initialize Rx queue for this port */
4415 for (queue = 0; queue < port->nrxqs; queue++) {
4416 struct mvpp2_rx_queue *rxq;
4418 /* Map physical Rx queue to port's logical Rx queue */
4419 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4422 goto err_free_percpu;
4424 /* Map this Rx queue to a physical queue */
4425 rxq->id = port->first_rxq + queue;
4426 rxq->port = port->id;
4427 rxq->logic_rxq = queue;
4429 port->rxqs[queue] = rxq;
4432 mvpp2_rx_irqs_setup(port);
4434 /* Create Rx descriptor rings */
4435 for (queue = 0; queue < port->nrxqs; queue++) {
4436 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4438 rxq->size = port->rx_ring_size;
4439 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4440 rxq->time_coal = MVPP2_RX_COAL_USEC;
4443 mvpp2_ingress_disable(port);
4445 /* Port default configuration */
4446 mvpp2_defaults_set(port);
4448 /* Port's classifier configuration */
4449 mvpp2_cls_oversize_rxq_set(port);
4450 mvpp2_cls_port_config(port);
4452 if (mvpp22_rss_is_supported())
4453 mvpp22_port_rss_init(port);
4455 /* Provide an initial Rx packet size */
4456 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
4458 /* Initialize pools for swf */
4459 err = mvpp2_swf_bm_pool_init(port);
4461 goto err_free_percpu;
4463 /* Clear all port stats */
4464 mvpp2_read_stats(port);
4465 memset(port->ethtool_stats, 0,
4466 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
4471 for (queue = 0; queue < port->ntxqs; queue++) {
4472 if (!port->txqs[queue])
4474 free_percpu(port->txqs[queue]->pcpu);
4479 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
4480 unsigned long *flags)
4482 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
4486 for (i = 0; i < 5; i++)
4487 if (of_property_match_string(port_node, "interrupt-names",
4491 *flags |= MVPP2_F_DT_COMPAT;
4495 /* Checks if the port dt description has the required Tx interrupts:
4496 * - PPv2.1: there are no such interrupts.
4498 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
4499 * - The new ones have: "hifX" with X in [0..8]
4501 * All those variants are supported to keep the backward compatibility.
4503 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
4504 struct device_node *port_node,
4505 unsigned long *flags)
4514 if (priv->hw_version == MVPP21)
4517 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
4520 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
4521 snprintf(name, 5, "hif%d", i);
4522 if (of_property_match_string(port_node, "interrupt-names",
4530 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
4531 struct fwnode_handle *fwnode,
4534 struct mvpp2_port *port = netdev_priv(dev);
4535 char hw_mac_addr[ETH_ALEN] = {0};
4536 char fw_mac_addr[ETH_ALEN];
4538 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
4539 *mac_from = "firmware node";
4540 ether_addr_copy(dev->dev_addr, fw_mac_addr);
4544 if (priv->hw_version == MVPP21) {
4545 mvpp21_get_mac_address(port, hw_mac_addr);
4546 if (is_valid_ether_addr(hw_mac_addr)) {
4547 *mac_from = "hardware";
4548 ether_addr_copy(dev->dev_addr, hw_mac_addr);
4553 *mac_from = "random";
4554 eth_hw_addr_random(dev);
4557 static void mvpp2_phylink_validate(struct phylink_config *config,
4558 unsigned long *supported,
4559 struct phylink_link_state *state)
4561 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4563 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
4565 /* Invalid combinations */
4566 switch (state->interface) {
4567 case PHY_INTERFACE_MODE_10GKR:
4568 case PHY_INTERFACE_MODE_XAUI:
4569 if (port->gop_id != 0)
4572 case PHY_INTERFACE_MODE_RGMII:
4573 case PHY_INTERFACE_MODE_RGMII_ID:
4574 case PHY_INTERFACE_MODE_RGMII_RXID:
4575 case PHY_INTERFACE_MODE_RGMII_TXID:
4576 if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
4583 phylink_set(mask, Autoneg);
4584 phylink_set_port_modes(mask);
4585 phylink_set(mask, Pause);
4586 phylink_set(mask, Asym_Pause);
4588 switch (state->interface) {
4589 case PHY_INTERFACE_MODE_10GKR:
4590 case PHY_INTERFACE_MODE_XAUI:
4591 case PHY_INTERFACE_MODE_NA:
4592 if (port->gop_id == 0) {
4593 phylink_set(mask, 10000baseT_Full);
4594 phylink_set(mask, 10000baseCR_Full);
4595 phylink_set(mask, 10000baseSR_Full);
4596 phylink_set(mask, 10000baseLR_Full);
4597 phylink_set(mask, 10000baseLRM_Full);
4598 phylink_set(mask, 10000baseER_Full);
4599 phylink_set(mask, 10000baseKR_Full);
4602 case PHY_INTERFACE_MODE_RGMII:
4603 case PHY_INTERFACE_MODE_RGMII_ID:
4604 case PHY_INTERFACE_MODE_RGMII_RXID:
4605 case PHY_INTERFACE_MODE_RGMII_TXID:
4606 case PHY_INTERFACE_MODE_SGMII:
4607 phylink_set(mask, 10baseT_Half);
4608 phylink_set(mask, 10baseT_Full);
4609 phylink_set(mask, 100baseT_Half);
4610 phylink_set(mask, 100baseT_Full);
4612 case PHY_INTERFACE_MODE_1000BASEX:
4613 case PHY_INTERFACE_MODE_2500BASEX:
4614 phylink_set(mask, 1000baseT_Full);
4615 phylink_set(mask, 1000baseX_Full);
4616 phylink_set(mask, 2500baseT_Full);
4617 phylink_set(mask, 2500baseX_Full);
4623 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
4624 bitmap_and(state->advertising, state->advertising, mask,
4625 __ETHTOOL_LINK_MODE_MASK_NBITS);
4629 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
4632 static void mvpp22_xlg_link_state(struct mvpp2_port *port,
4633 struct phylink_link_state *state)
4637 state->speed = SPEED_10000;
4639 state->an_complete = 1;
4641 val = readl(port->base + MVPP22_XLG_STATUS);
4642 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
4645 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4646 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
4647 state->pause |= MLO_PAUSE_TX;
4648 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
4649 state->pause |= MLO_PAUSE_RX;
4652 static void mvpp2_gmac_link_state(struct mvpp2_port *port,
4653 struct phylink_link_state *state)
4657 val = readl(port->base + MVPP2_GMAC_STATUS0);
4659 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
4660 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
4661 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
4663 switch (port->phy_interface) {
4664 case PHY_INTERFACE_MODE_1000BASEX:
4665 state->speed = SPEED_1000;
4667 case PHY_INTERFACE_MODE_2500BASEX:
4668 state->speed = SPEED_2500;
4671 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
4672 state->speed = SPEED_1000;
4673 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
4674 state->speed = SPEED_100;
4676 state->speed = SPEED_10;
4680 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
4681 state->pause |= MLO_PAUSE_RX;
4682 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
4683 state->pause |= MLO_PAUSE_TX;
4686 static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
4687 struct phylink_link_state *state)
4689 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4692 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
4693 u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
4694 mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4696 if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
4697 mvpp22_xlg_link_state(port, state);
4702 mvpp2_gmac_link_state(port, state);
4706 static void mvpp2_mac_an_restart(struct phylink_config *config)
4708 struct mvpp2_port *port = container_of(config, struct mvpp2_port,
4710 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4712 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
4713 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4714 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
4715 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4718 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
4719 const struct phylink_link_state *state)
4721 u32 old_ctrl0, ctrl0;
4722 u32 old_ctrl4, ctrl4;
4724 old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
4725 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
4727 ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4729 if (state->pause & MLO_PAUSE_TX)
4730 ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4732 ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
4734 if (state->pause & MLO_PAUSE_RX)
4735 ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4737 ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4739 ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
4740 MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
4741 ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4743 if (old_ctrl0 != ctrl0)
4744 writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
4745 if (old_ctrl4 != ctrl4)
4746 writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
4748 if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
4749 while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
4750 MVPP22_XLG_CTRL0_MAC_RESET_DIS))
4755 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
4756 const struct phylink_link_state *state)
4759 u32 old_ctrl0, ctrl0;
4760 u32 old_ctrl2, ctrl2;
4761 u32 old_ctrl4, ctrl4;
4763 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4764 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4765 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4766 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4768 an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED |
4769 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
4770 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4771 MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN |
4772 MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
4773 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4774 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
4775 MVPP2_GMAC_PCS_ENABLE_MASK);
4776 ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
4778 /* Configure port type */
4779 if (phy_interface_mode_is_8023z(state->interface)) {
4780 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
4781 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4782 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4783 MVPP22_CTRL4_DP_CLK_SEL |
4784 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4785 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4786 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
4787 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4788 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
4789 MVPP22_CTRL4_DP_CLK_SEL |
4790 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4791 } else if (phy_interface_mode_is_rgmii(state->interface)) {
4792 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
4793 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4794 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4795 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4798 /* Configure advertisement bits */
4799 if (phylink_test(state->advertising, Pause))
4800 an |= MVPP2_GMAC_FC_ADV_EN;
4801 if (phylink_test(state->advertising, Asym_Pause))
4802 an |= MVPP2_GMAC_FC_ADV_ASM_EN;
4804 /* Configure negotiation style */
4805 if (!phylink_autoneg_inband(mode)) {
4806 /* Phy or fixed speed - no in-band AN */
4808 an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4810 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
4811 an |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4812 else if (state->speed == SPEED_100)
4813 an |= MVPP2_GMAC_CONFIG_MII_SPEED;
4815 if (state->pause & MLO_PAUSE_TX)
4816 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
4817 if (state->pause & MLO_PAUSE_RX)
4818 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
4819 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
4820 /* SGMII in-band mode receives the speed and duplex from
4821 * the PHY. Flow control information is not received. */
4822 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
4823 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
4824 MVPP2_GMAC_AN_SPEED_EN |
4825 MVPP2_GMAC_AN_DUPLEX_EN;
4827 if (state->pause & MLO_PAUSE_TX)
4828 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
4829 if (state->pause & MLO_PAUSE_RX)
4830 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
4831 } else if (phy_interface_mode_is_8023z(state->interface)) {
4832 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
4833 * they negotiate duplex: they are always operating with a fixed
4834 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
4835 * speed and full duplex here.
4837 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
4838 an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS);
4839 an |= MVPP2_GMAC_IN_BAND_AUTONEG |
4840 MVPP2_GMAC_CONFIG_GMII_SPEED |
4841 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4843 if (state->pause & MLO_PAUSE_AN && state->an_enabled) {
4844 an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
4846 if (state->pause & MLO_PAUSE_TX)
4847 ctrl4 |= MVPP22_CTRL4_TX_FC_EN;
4848 if (state->pause & MLO_PAUSE_RX)
4849 ctrl4 |= MVPP22_CTRL4_RX_FC_EN;
4853 /* Some fields of the auto-negotiation register require the port to be down when
4854 * their value is updated.
4856 #define MVPP2_GMAC_AN_PORT_DOWN_MASK \
4857 (MVPP2_GMAC_IN_BAND_AUTONEG | \
4858 MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
4859 MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
4860 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
4861 MVPP2_GMAC_AN_DUPLEX_EN)
4863 if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
4864 (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
4865 (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
4866 /* Force link down */
4867 old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4868 old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
4869 writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4871 /* Set the GMAC in a reset state - do this in a way that
4872 * ensures we clear it below.
4874 old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
4875 writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4878 if (old_ctrl0 != ctrl0)
4879 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
4880 if (old_ctrl2 != ctrl2)
4881 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
4882 if (old_ctrl4 != ctrl4)
4883 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
4885 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4887 if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
4888 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4889 MVPP2_GMAC_PORT_RESET_MASK)
4894 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
4895 const struct phylink_link_state *state)
4897 struct net_device *dev = to_net_dev(config->dev);
4898 struct mvpp2_port *port = netdev_priv(dev);
4899 bool change_interface = port->phy_interface != state->interface;
4901 /* Check for invalid configuration */
4902 if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
4903 netdev_err(dev, "Invalid mode on %s\n", dev->name);
4907 /* Make sure the port is disabled when reconfiguring the mode */
4908 mvpp2_port_disable(port);
4910 if (port->priv->hw_version == MVPP22 && change_interface) {
4911 mvpp22_gop_mask_irq(port);
4913 port->phy_interface = state->interface;
4915 /* Reconfigure the serdes lanes */
4916 phy_power_off(port->comphy);
4917 mvpp22_mode_reconfigure(port);
4920 /* mac (re)configuration */
4921 if (mvpp2_is_xlg(state->interface))
4922 mvpp2_xlg_config(port, mode, state);
4923 else if (phy_interface_mode_is_rgmii(state->interface) ||
4924 phy_interface_mode_is_8023z(state->interface) ||
4925 state->interface == PHY_INTERFACE_MODE_SGMII)
4926 mvpp2_gmac_config(port, mode, state);
4928 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
4929 mvpp2_port_loopback_set(port, state);
4931 if (port->priv->hw_version == MVPP22 && change_interface)
4932 mvpp22_gop_unmask_irq(port);
4934 mvpp2_port_enable(port);
4937 static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
4938 phy_interface_t interface, struct phy_device *phy)
4940 struct net_device *dev = to_net_dev(config->dev);
4941 struct mvpp2_port *port = netdev_priv(dev);
4944 if (!phylink_autoneg_inband(mode)) {
4945 if (mvpp2_is_xlg(interface)) {
4946 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4947 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
4948 val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
4949 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4951 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4952 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4953 val |= MVPP2_GMAC_FORCE_LINK_PASS;
4954 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4958 mvpp2_port_enable(port);
4960 mvpp2_egress_enable(port);
4961 mvpp2_ingress_enable(port);
4962 netif_tx_wake_all_queues(dev);
4965 static void mvpp2_mac_link_down(struct phylink_config *config,
4966 unsigned int mode, phy_interface_t interface)
4968 struct net_device *dev = to_net_dev(config->dev);
4969 struct mvpp2_port *port = netdev_priv(dev);
4972 if (!phylink_autoneg_inband(mode)) {
4973 if (mvpp2_is_xlg(interface)) {
4974 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4975 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
4976 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
4977 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4979 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4980 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4981 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4982 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4986 netif_tx_stop_all_queues(dev);
4987 mvpp2_egress_disable(port);
4988 mvpp2_ingress_disable(port);
4990 mvpp2_port_disable(port);
4993 static const struct phylink_mac_ops mvpp2_phylink_ops = {
4994 .validate = mvpp2_phylink_validate,
4995 .mac_link_state = mvpp2_phylink_mac_link_state,
4996 .mac_an_restart = mvpp2_mac_an_restart,
4997 .mac_config = mvpp2_mac_config,
4998 .mac_link_up = mvpp2_mac_link_up,
4999 .mac_link_down = mvpp2_mac_link_down,
5002 /* Ports initialization */
5003 static int mvpp2_port_probe(struct platform_device *pdev,
5004 struct fwnode_handle *port_fwnode,
5007 struct phy *comphy = NULL;
5008 struct mvpp2_port *port;
5009 struct mvpp2_port_pcpu *port_pcpu;
5010 struct device_node *port_node = to_of_node(port_fwnode);
5011 netdev_features_t features;
5012 struct net_device *dev;
5013 struct resource *res;
5014 struct phylink *phylink;
5015 char *mac_from = "";
5016 unsigned int ntxqs, nrxqs, thread;
5017 unsigned long flags = 0;
5023 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
5024 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
5026 "not enough IRQs to support multi queue mode\n");
5030 ntxqs = MVPP2_MAX_TXQ;
5031 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) {
5034 /* According to the PPv2.2 datasheet and our experiments on
5035 * PPv2.1, RX queues have an allocation granularity of 4 (when
5036 * more than a single one on PPv2.2).
5037 * Round up to nearest multiple of 4.
5039 nrxqs = (num_possible_cpus() + 3) & ~0x3;
5040 if (nrxqs > MVPP2_PORT_MAX_RXQ)
5041 nrxqs = MVPP2_PORT_MAX_RXQ;
5044 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
5048 phy_mode = fwnode_get_phy_mode(port_fwnode);
5050 dev_err(&pdev->dev, "incorrect phy mode\n");
5052 goto err_free_netdev;
5056 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
5057 if (IS_ERR(comphy)) {
5058 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
5059 err = -EPROBE_DEFER;
5060 goto err_free_netdev;
5066 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
5068 dev_err(&pdev->dev, "missing port-id value\n");
5069 goto err_free_netdev;
5072 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
5073 dev->watchdog_timeo = 5 * HZ;
5074 dev->netdev_ops = &mvpp2_netdev_ops;
5075 dev->ethtool_ops = &mvpp2_eth_tool_ops;
5077 port = netdev_priv(dev);
5079 port->fwnode = port_fwnode;
5080 port->has_phy = !!of_find_property(port_node, "phy", NULL);
5081 port->ntxqs = ntxqs;
5082 port->nrxqs = nrxqs;
5084 port->has_tx_irqs = has_tx_irqs;
5085 port->flags = flags;
5087 err = mvpp2_queue_vectors_init(port, port_node);
5089 goto err_free_netdev;
5092 port->link_irq = of_irq_get_byname(port_node, "link");
5094 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
5095 if (port->link_irq == -EPROBE_DEFER) {
5096 err = -EPROBE_DEFER;
5097 goto err_deinit_qvecs;
5099 if (port->link_irq <= 0)
5100 /* the link irq is optional */
5103 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
5104 port->flags |= MVPP2_F_LOOPBACK;
5107 if (priv->hw_version == MVPP21)
5108 port->first_rxq = port->id * port->nrxqs;
5110 port->first_rxq = port->id * priv->max_port_rxqs;
5112 port->of_node = port_node;
5113 port->phy_interface = phy_mode;
5114 port->comphy = comphy;
5116 if (priv->hw_version == MVPP21) {
5117 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
5118 port->base = devm_ioremap_resource(&pdev->dev, res);
5119 if (IS_ERR(port->base)) {
5120 err = PTR_ERR(port->base);
5124 port->stats_base = port->priv->lms_base +
5125 MVPP21_MIB_COUNTERS_OFFSET +
5126 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
5128 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
5131 dev_err(&pdev->dev, "missing gop-port-id value\n");
5132 goto err_deinit_qvecs;
5135 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
5136 port->stats_base = port->priv->iface_base +
5137 MVPP22_MIB_COUNTERS_OFFSET +
5138 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
5141 /* Alloc per-cpu and ethtool stats */
5142 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
5148 port->ethtool_stats = devm_kcalloc(&pdev->dev,
5149 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
5150 sizeof(u64), GFP_KERNEL);
5151 if (!port->ethtool_stats) {
5153 goto err_free_stats;
5156 mutex_init(&port->gather_stats_lock);
5157 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
5159 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
5161 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
5162 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
5163 SET_NETDEV_DEV(dev, &pdev->dev);
5165 err = mvpp2_port_init(port);
5167 dev_err(&pdev->dev, "failed to init port %d\n", id);
5168 goto err_free_stats;
5171 mvpp2_port_periodic_xon_disable(port);
5173 mvpp2_mac_reset_assert(port);
5174 mvpp22_pcs_reset_assert(port);
5176 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
5179 goto err_free_txq_pcpu;
5182 if (!port->has_tx_irqs) {
5183 for (thread = 0; thread < priv->nthreads; thread++) {
5184 port_pcpu = per_cpu_ptr(port->pcpu, thread);
5186 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
5187 HRTIMER_MODE_REL_PINNED);
5188 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
5189 port_pcpu->timer_scheduled = false;
5191 tasklet_init(&port_pcpu->tx_done_tasklet,
5193 (unsigned long)dev);
5197 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5199 dev->features = features | NETIF_F_RXCSUM;
5200 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
5201 NETIF_F_HW_VLAN_CTAG_FILTER;
5203 if (mvpp22_rss_is_supported()) {
5204 dev->hw_features |= NETIF_F_RXHASH;
5205 dev->features |= NETIF_F_NTUPLE;
5208 mvpp2_set_hw_csum(port, port->pool_long->id);
5210 dev->vlan_features |= features;
5211 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
5212 dev->priv_flags |= IFF_UNICAST_FLT;
5214 /* MTU range: 68 - 9704 */
5215 dev->min_mtu = ETH_MIN_MTU;
5216 /* 9704 == 9728 - 20 and rounding to 8 */
5217 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
5218 dev->dev.of_node = port_node;
5220 /* Phylink isn't used w/ ACPI as of now */
5222 port->phylink_config.dev = &dev->dev;
5223 port->phylink_config.type = PHYLINK_NETDEV;
5225 phylink = phylink_create(&port->phylink_config, port_fwnode,
5226 phy_mode, &mvpp2_phylink_ops);
5227 if (IS_ERR(phylink)) {
5228 err = PTR_ERR(phylink);
5229 goto err_free_port_pcpu;
5231 port->phylink = phylink;
5233 port->phylink = NULL;
5236 err = register_netdev(dev);
5238 dev_err(&pdev->dev, "failed to register netdev\n");
5241 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
5243 priv->port_list[priv->port_count++] = port;
5249 phylink_destroy(port->phylink);
5251 free_percpu(port->pcpu);
5253 for (i = 0; i < port->ntxqs; i++)
5254 free_percpu(port->txqs[i]->pcpu);
5256 free_percpu(port->stats);
5259 irq_dispose_mapping(port->link_irq);
5261 mvpp2_queue_vectors_deinit(port);
5267 /* Ports removal routine */
5268 static void mvpp2_port_remove(struct mvpp2_port *port)
5272 unregister_netdev(port->dev);
5274 phylink_destroy(port->phylink);
5275 free_percpu(port->pcpu);
5276 free_percpu(port->stats);
5277 for (i = 0; i < port->ntxqs; i++)
5278 free_percpu(port->txqs[i]->pcpu);
5279 mvpp2_queue_vectors_deinit(port);
5281 irq_dispose_mapping(port->link_irq);
5282 free_netdev(port->dev);
5285 /* Initialize decoding windows */
5286 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
5292 for (i = 0; i < 6; i++) {
5293 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
5294 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
5297 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
5302 for (i = 0; i < dram->num_cs; i++) {
5303 const struct mbus_dram_window *cs = dram->cs + i;
5305 mvpp2_write(priv, MVPP2_WIN_BASE(i),
5306 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
5307 dram->mbus_dram_target_id);
5309 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
5310 (cs->size - 1) & 0xffff0000);
5312 win_enable |= (1 << i);
5315 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
5318 /* Initialize Rx FIFO's */
5319 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
5323 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5324 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5325 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5326 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5327 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5330 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5331 MVPP2_RX_FIFO_PORT_MIN_PKT);
5332 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5335 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
5339 /* The FIFO size parameters are set depending on the maximum speed a
5340 * given port can handle:
5343 * - Ports 2 and 3: 1Gbps
5346 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
5347 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
5348 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
5349 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
5351 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
5352 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
5353 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
5354 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
5356 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
5357 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
5358 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
5359 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
5360 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
5363 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
5364 MVPP2_RX_FIFO_PORT_MIN_PKT);
5365 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
5368 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
5369 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
5370 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
5372 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
5374 int port, size, thrs;
5376 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
5378 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
5379 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
5381 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
5382 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
5384 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
5385 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
5389 static void mvpp2_axi_init(struct mvpp2 *priv)
5391 u32 val, rdval, wrval;
5393 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
5395 /* AXI Bridge Configuration */
5397 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
5398 << MVPP22_AXI_ATTR_CACHE_OFFS;
5399 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5400 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5402 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
5403 << MVPP22_AXI_ATTR_CACHE_OFFS;
5404 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5405 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
5408 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
5409 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
5412 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
5413 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
5414 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
5415 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
5418 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
5419 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
5421 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
5422 << MVPP22_AXI_CODE_CACHE_OFFS;
5423 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5424 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5425 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
5426 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
5428 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
5429 << MVPP22_AXI_CODE_CACHE_OFFS;
5430 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5431 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5433 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
5435 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
5436 << MVPP22_AXI_CODE_CACHE_OFFS;
5437 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5438 << MVPP22_AXI_CODE_DOMAIN_OFFS;
5440 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
5443 /* Initialize network controller common part HW */
5444 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
5446 const struct mbus_dram_target_info *dram_target_info;
5450 /* MBUS windows configuration */
5451 dram_target_info = mv_mbus_dram_info();
5452 if (dram_target_info)
5453 mvpp2_conf_mbus_windows(dram_target_info, priv);
5455 if (priv->hw_version == MVPP22)
5456 mvpp2_axi_init(priv);
5458 /* Disable HW PHY polling */
5459 if (priv->hw_version == MVPP21) {
5460 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5461 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5462 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5464 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5465 val &= ~MVPP22_SMI_POLLING_EN;
5466 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5469 /* Allocate and initialize aggregated TXQs */
5470 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
5471 sizeof(*priv->aggr_txqs),
5473 if (!priv->aggr_txqs)
5476 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5477 priv->aggr_txqs[i].id = i;
5478 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5479 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
5485 if (priv->hw_version == MVPP21) {
5486 mvpp2_rx_fifo_init(priv);
5488 mvpp22_rx_fifo_init(priv);
5489 mvpp22_tx_fifo_init(priv);
5492 if (priv->hw_version == MVPP21)
5493 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5494 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5496 /* Allow cache snoop when transmiting packets */
5497 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5499 /* Buffer Manager initialization */
5500 err = mvpp2_bm_init(pdev, priv);
5504 /* Parser default initialization */
5505 err = mvpp2_prs_default_init(pdev, priv);
5509 /* Classifier default initialization */
5510 mvpp2_cls_init(priv);
5515 static int mvpp2_probe(struct platform_device *pdev)
5517 const struct acpi_device_id *acpi_id;
5518 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5519 struct fwnode_handle *port_fwnode;
5521 struct resource *res;
5526 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
5530 if (has_acpi_companion(&pdev->dev)) {
5531 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
5535 priv->hw_version = (unsigned long)acpi_id->driver_data;
5538 (unsigned long)of_device_get_match_data(&pdev->dev);
5541 /* multi queue mode isn't supported on PPV2.1, fallback to single
5544 if (priv->hw_version == MVPP21)
5545 queue_mode = MVPP2_QDIST_SINGLE_MODE;
5547 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5548 base = devm_ioremap_resource(&pdev->dev, res);
5550 return PTR_ERR(base);
5552 if (priv->hw_version == MVPP21) {
5553 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5554 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
5555 if (IS_ERR(priv->lms_base))
5556 return PTR_ERR(priv->lms_base);
5558 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
5559 if (has_acpi_companion(&pdev->dev)) {
5560 /* In case the MDIO memory region is declared in
5561 * the ACPI, it can already appear as 'in-use'
5562 * in the OS. Because it is overlapped by second
5563 * region of the network controller, make
5564 * sure it is released, before requesting it again.
5565 * The care is taken by mvpp2 driver to avoid
5566 * concurrent access to this memory region.
5568 release_resource(res);
5570 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
5571 if (IS_ERR(priv->iface_base))
5572 return PTR_ERR(priv->iface_base);
5575 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
5576 priv->sysctrl_base =
5577 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
5578 "marvell,system-controller");
5579 if (IS_ERR(priv->sysctrl_base))
5580 /* The system controller regmap is optional for dt
5581 * compatibility reasons. When not provided, the
5582 * configuration of the GoP relies on the
5583 * firmware/bootloader.
5585 priv->sysctrl_base = NULL;
5588 mvpp2_setup_bm_pool();
5591 priv->nthreads = min_t(unsigned int, num_present_cpus(),
5594 shared = num_present_cpus() - priv->nthreads;
5596 bitmap_fill(&priv->lock_map,
5597 min_t(int, shared, MVPP2_MAX_THREADS));
5599 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5602 addr_space_sz = (priv->hw_version == MVPP21 ?
5603 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
5604 priv->swth_base[i] = base + i * addr_space_sz;
5607 if (priv->hw_version == MVPP21)
5608 priv->max_port_rxqs = 8;
5610 priv->max_port_rxqs = 32;
5612 if (dev_of_node(&pdev->dev)) {
5613 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
5614 if (IS_ERR(priv->pp_clk))
5615 return PTR_ERR(priv->pp_clk);
5616 err = clk_prepare_enable(priv->pp_clk);
5620 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
5621 if (IS_ERR(priv->gop_clk)) {
5622 err = PTR_ERR(priv->gop_clk);
5625 err = clk_prepare_enable(priv->gop_clk);
5629 if (priv->hw_version == MVPP22) {
5630 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
5631 if (IS_ERR(priv->mg_clk)) {
5632 err = PTR_ERR(priv->mg_clk);
5636 err = clk_prepare_enable(priv->mg_clk);
5640 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
5641 if (IS_ERR(priv->mg_core_clk)) {
5642 priv->mg_core_clk = NULL;
5644 err = clk_prepare_enable(priv->mg_core_clk);
5650 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
5651 if (IS_ERR(priv->axi_clk)) {
5652 err = PTR_ERR(priv->axi_clk);
5653 if (err == -EPROBE_DEFER)
5654 goto err_mg_core_clk;
5655 priv->axi_clk = NULL;
5657 err = clk_prepare_enable(priv->axi_clk);
5659 goto err_mg_core_clk;
5662 /* Get system's tclk rate */
5663 priv->tclk = clk_get_rate(priv->pp_clk);
5664 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
5666 dev_err(&pdev->dev, "missing clock-frequency value\n");
5670 if (priv->hw_version == MVPP22) {
5671 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
5674 /* Sadly, the BM pools all share the same register to
5675 * store the high 32 bits of their address. So they
5676 * must all have the same high 32 bits, which forces
5677 * us to restrict coherent memory to DMA_BIT_MASK(32).
5679 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5684 /* Initialize network controller */
5685 err = mvpp2_init(pdev, priv);
5687 dev_err(&pdev->dev, "failed to initialize controller\n");
5691 /* Initialize ports */
5692 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5693 err = mvpp2_port_probe(pdev, port_fwnode, priv);
5695 goto err_port_probe;
5698 if (priv->port_count == 0) {
5699 dev_err(&pdev->dev, "no ports enabled\n");
5704 /* Statistics must be gathered regularly because some of them (like
5705 * packets counters) are 32-bit registers and could overflow quite
5706 * quickly. For instance, a 10Gb link used at full bandwidth with the
5707 * smallest packets (64B) will overflow a 32-bit counter in less than
5708 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
5710 snprintf(priv->queue_name, sizeof(priv->queue_name),
5711 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
5712 priv->port_count > 1 ? "+" : "");
5713 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
5714 if (!priv->stats_queue) {
5716 goto err_port_probe;
5719 mvpp2_dbgfs_init(priv, pdev->name);
5721 platform_set_drvdata(pdev, priv);
5726 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5727 if (priv->port_list[i])
5728 mvpp2_port_remove(priv->port_list[i]);
5732 clk_disable_unprepare(priv->axi_clk);
5735 if (priv->hw_version == MVPP22)
5736 clk_disable_unprepare(priv->mg_core_clk);
5738 if (priv->hw_version == MVPP22)
5739 clk_disable_unprepare(priv->mg_clk);
5741 clk_disable_unprepare(priv->gop_clk);
5743 clk_disable_unprepare(priv->pp_clk);
5747 static int mvpp2_remove(struct platform_device *pdev)
5749 struct mvpp2 *priv = platform_get_drvdata(pdev);
5750 struct fwnode_handle *fwnode = pdev->dev.fwnode;
5751 struct fwnode_handle *port_fwnode;
5754 mvpp2_dbgfs_cleanup(priv);
5756 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
5757 if (priv->port_list[i]) {
5758 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
5759 mvpp2_port_remove(priv->port_list[i]);
5764 destroy_workqueue(priv->stats_queue);
5766 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5767 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
5769 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
5772 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5773 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
5775 dma_free_coherent(&pdev->dev,
5776 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
5778 aggr_txq->descs_dma);
5781 if (is_acpi_node(port_fwnode))
5784 clk_disable_unprepare(priv->axi_clk);
5785 clk_disable_unprepare(priv->mg_core_clk);
5786 clk_disable_unprepare(priv->mg_clk);
5787 clk_disable_unprepare(priv->pp_clk);
5788 clk_disable_unprepare(priv->gop_clk);
5793 static const struct of_device_id mvpp2_match[] = {
5795 .compatible = "marvell,armada-375-pp2",
5796 .data = (void *)MVPP21,
5799 .compatible = "marvell,armada-7k-pp22",
5800 .data = (void *)MVPP22,
5804 MODULE_DEVICE_TABLE(of, mvpp2_match);
5806 static const struct acpi_device_id mvpp2_acpi_match[] = {
5807 { "MRVL0110", MVPP22 },
5810 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
5812 static struct platform_driver mvpp2_driver = {
5813 .probe = mvpp2_probe,
5814 .remove = mvpp2_remove,
5816 .name = MVPP2_DRIVER_NAME,
5817 .of_match_table = mvpp2_match,
5818 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
5822 module_platform_driver(mvpp2_driver);
5824 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
5825 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
5826 MODULE_LICENSE("GPL v2");