1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/ptp_classify.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <uapi/linux/ppp_defs.h>
40 #include <linux/bpf_trace.h>
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
46 enum mvpp2_bm_pool_log_num {
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59 * will be removed once phylink is used for all modes (dt+ACPI).
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
64 #define MVPP2_QDIST_SINGLE_MODE 0
65 #define MVPP2_QDIST_MULTI_MODE 1
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
72 /* Utility/helper methods */
74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
76 writel(data, priv->swth_base[0] + offset);
79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
81 return readl(priv->swth_base[0] + offset);
84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
86 return readl_relaxed(priv->swth_base[0] + offset);
89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
91 return cpu % priv->nthreads;
94 static struct page_pool *
95 mvpp2_create_page_pool(struct device *dev, int num, int len,
96 enum dma_data_direction dma_dir)
98 struct page_pool_params pp_params = {
99 /* internal DMA mapping in page_pool */
100 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
105 .offset = MVPP2_SKB_HEADROOM,
109 return page_pool_create(&pp_params);
112 /* These accessors should be used to access:
114 * - per-thread registers, where each thread has its own copy of the
117 * MVPP2_BM_VIRT_ALLOC_REG
118 * MVPP2_BM_ADDR_HIGH_ALLOC
119 * MVPP22_BM_ADDR_HIGH_RLS_REG
120 * MVPP2_BM_VIRT_RLS_REG
121 * MVPP2_ISR_RX_TX_CAUSE_REG
122 * MVPP2_ISR_RX_TX_MASK_REG
124 * MVPP2_AGGR_TXQ_UPDATE_REG
125 * MVPP2_TXQ_RSVD_REQ_REG
126 * MVPP2_TXQ_RSVD_RSLT_REG
130 * - global registers that must be accessed through a specific thread
131 * window, because they are related to an access to a per-thread
134 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
135 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
136 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
137 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
138 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
139 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
140 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
141 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
142 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
143 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
144 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
145 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
146 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
148 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
149 u32 offset, u32 data)
151 writel(data, priv->swth_base[thread] + offset);
154 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
157 return readl(priv->swth_base[thread] + offset);
160 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
161 u32 offset, u32 data)
163 writel_relaxed(data, priv->swth_base[thread] + offset);
166 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
169 return readl_relaxed(priv->swth_base[thread] + offset);
172 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
173 struct mvpp2_tx_desc *tx_desc)
175 if (port->priv->hw_version == MVPP21)
176 return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
178 return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
182 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
183 struct mvpp2_tx_desc *tx_desc,
186 dma_addr_t addr, offset;
188 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
189 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
191 if (port->priv->hw_version == MVPP21) {
192 tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
193 tx_desc->pp21.packet_offset = offset;
195 __le64 val = cpu_to_le64(addr);
197 tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
198 tx_desc->pp22.buf_dma_addr_ptp |= val;
199 tx_desc->pp22.packet_offset = offset;
203 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
204 struct mvpp2_tx_desc *tx_desc)
206 if (port->priv->hw_version == MVPP21)
207 return le16_to_cpu(tx_desc->pp21.data_size);
209 return le16_to_cpu(tx_desc->pp22.data_size);
212 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
213 struct mvpp2_tx_desc *tx_desc,
216 if (port->priv->hw_version == MVPP21)
217 tx_desc->pp21.data_size = cpu_to_le16(size);
219 tx_desc->pp22.data_size = cpu_to_le16(size);
222 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
223 struct mvpp2_tx_desc *tx_desc,
226 if (port->priv->hw_version == MVPP21)
227 tx_desc->pp21.phys_txq = txq;
229 tx_desc->pp22.phys_txq = txq;
232 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
233 struct mvpp2_tx_desc *tx_desc,
234 unsigned int command)
236 if (port->priv->hw_version == MVPP21)
237 tx_desc->pp21.command = cpu_to_le32(command);
239 tx_desc->pp22.command = cpu_to_le32(command);
242 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
243 struct mvpp2_tx_desc *tx_desc)
245 if (port->priv->hw_version == MVPP21)
246 return tx_desc->pp21.packet_offset;
248 return tx_desc->pp22.packet_offset;
251 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
252 struct mvpp2_rx_desc *rx_desc)
254 if (port->priv->hw_version == MVPP21)
255 return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
257 return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
261 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
262 struct mvpp2_rx_desc *rx_desc)
264 if (port->priv->hw_version == MVPP21)
265 return le32_to_cpu(rx_desc->pp21.buf_cookie);
267 return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
271 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
272 struct mvpp2_rx_desc *rx_desc)
274 if (port->priv->hw_version == MVPP21)
275 return le16_to_cpu(rx_desc->pp21.data_size);
277 return le16_to_cpu(rx_desc->pp22.data_size);
280 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
281 struct mvpp2_rx_desc *rx_desc)
283 if (port->priv->hw_version == MVPP21)
284 return le32_to_cpu(rx_desc->pp21.status);
286 return le32_to_cpu(rx_desc->pp22.status);
289 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
291 txq_pcpu->txq_get_index++;
292 if (txq_pcpu->txq_get_index == txq_pcpu->size)
293 txq_pcpu->txq_get_index = 0;
296 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
297 struct mvpp2_txq_pcpu *txq_pcpu,
299 struct mvpp2_tx_desc *tx_desc,
300 enum mvpp2_tx_buf_type buf_type)
302 struct mvpp2_txq_pcpu_buf *tx_buf =
303 txq_pcpu->buffs + txq_pcpu->txq_put_index;
304 tx_buf->type = buf_type;
305 if (buf_type == MVPP2_TYPE_SKB)
309 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
310 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
311 mvpp2_txdesc_offset_get(port, tx_desc);
312 txq_pcpu->txq_put_index++;
313 if (txq_pcpu->txq_put_index == txq_pcpu->size)
314 txq_pcpu->txq_put_index = 0;
317 /* Get number of maximum RXQ */
318 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
322 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
325 /* According to the PPv2.2 datasheet and our experiments on
326 * PPv2.1, RX queues have an allocation granularity of 4 (when
327 * more than a single one on PPv2.2).
328 * Round up to nearest multiple of 4.
330 nrxqs = (num_possible_cpus() + 3) & ~0x3;
331 if (nrxqs > MVPP2_PORT_MAX_RXQ)
332 nrxqs = MVPP2_PORT_MAX_RXQ;
337 /* Get number of physical egress port */
338 static inline int mvpp2_egress_port(struct mvpp2_port *port)
340 return MVPP2_MAX_TCONT + port->id;
343 /* Get number of physical TXQ */
344 static inline int mvpp2_txq_phys(int port, int txq)
346 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
349 /* Returns a struct page if page_pool is set, otherwise a buffer */
350 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
351 struct page_pool *page_pool)
354 return page_pool_dev_alloc_pages(page_pool);
356 if (likely(pool->frag_size <= PAGE_SIZE))
357 return netdev_alloc_frag(pool->frag_size);
359 return kmalloc(pool->frag_size, GFP_ATOMIC);
362 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
363 struct page_pool *page_pool, void *data)
366 page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
367 else if (likely(pool->frag_size <= PAGE_SIZE))
373 /* Buffer Manager configuration routines */
376 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
377 struct mvpp2_bm_pool *bm_pool, int size)
381 /* Number of buffer pointers must be a multiple of 16, as per
382 * hardware constraints
384 if (!IS_ALIGNED(size, 16))
387 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
388 * bytes per buffer pointer
390 if (priv->hw_version == MVPP21)
391 bm_pool->size_bytes = 2 * sizeof(u32) * size;
393 bm_pool->size_bytes = 2 * sizeof(u64) * size;
395 bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
398 if (!bm_pool->virt_addr)
401 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
402 MVPP2_BM_POOL_PTR_ALIGN)) {
403 dma_free_coherent(dev, bm_pool->size_bytes,
404 bm_pool->virt_addr, bm_pool->dma_addr);
405 dev_err(dev, "BM pool %d is not %d bytes aligned\n",
406 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
410 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
411 lower_32_bits(bm_pool->dma_addr));
412 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
414 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
415 val |= MVPP2_BM_START_MASK;
416 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
418 bm_pool->size = size;
419 bm_pool->pkt_size = 0;
420 bm_pool->buf_num = 0;
425 /* Set pool buffer size */
426 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
427 struct mvpp2_bm_pool *bm_pool,
432 bm_pool->buf_size = buf_size;
434 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
435 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
438 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
439 struct mvpp2_bm_pool *bm_pool,
440 dma_addr_t *dma_addr,
441 phys_addr_t *phys_addr)
443 unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
445 *dma_addr = mvpp2_thread_read(priv, thread,
446 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
447 *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
449 if (priv->hw_version == MVPP22) {
451 u32 dma_addr_highbits, phys_addr_highbits;
453 val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
454 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
455 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
456 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
458 if (sizeof(dma_addr_t) == 8)
459 *dma_addr |= (u64)dma_addr_highbits << 32;
461 if (sizeof(phys_addr_t) == 8)
462 *phys_addr |= (u64)phys_addr_highbits << 32;
468 /* Free all buffers from the pool */
469 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
470 struct mvpp2_bm_pool *bm_pool, int buf_num)
472 struct page_pool *pp = NULL;
475 if (buf_num > bm_pool->buf_num) {
476 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
477 bm_pool->id, buf_num);
478 buf_num = bm_pool->buf_num;
481 if (priv->percpu_pools)
482 pp = priv->page_pool[bm_pool->id];
484 for (i = 0; i < buf_num; i++) {
485 dma_addr_t buf_dma_addr;
486 phys_addr_t buf_phys_addr;
489 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
490 &buf_dma_addr, &buf_phys_addr);
493 dma_unmap_single(dev, buf_dma_addr,
494 bm_pool->buf_size, DMA_FROM_DEVICE);
496 data = (void *)phys_to_virt(buf_phys_addr);
500 mvpp2_frag_free(bm_pool, pp, data);
503 /* Update BM driver with number of buffers removed from pool */
504 bm_pool->buf_num -= i;
507 /* Check number of buffers in BM pool */
508 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
512 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
513 MVPP22_BM_POOL_PTRS_NUM_MASK;
514 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
515 MVPP2_BM_BPPI_PTR_NUM_MASK;
517 /* HW has one buffer ready which is not reflected in the counters */
525 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
526 struct mvpp2_bm_pool *bm_pool)
531 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
532 mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
534 /* Check buffer counters after free */
535 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
537 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
538 bm_pool->id, bm_pool->buf_num);
542 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
543 val |= MVPP2_BM_STOP_MASK;
544 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
546 if (priv->percpu_pools) {
547 page_pool_destroy(priv->page_pool[bm_pool->id]);
548 priv->page_pool[bm_pool->id] = NULL;
551 dma_free_coherent(dev, bm_pool->size_bytes,
557 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
559 int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
560 struct mvpp2_bm_pool *bm_pool;
562 if (priv->percpu_pools)
563 poolnum = mvpp2_get_nrxqs(priv) * 2;
565 /* Create all pools with maximum size */
566 size = MVPP2_BM_POOL_SIZE_MAX;
567 for (i = 0; i < poolnum; i++) {
568 bm_pool = &priv->bm_pools[i];
570 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
572 goto err_unroll_pools;
573 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
578 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
579 for (i = i - 1; i >= 0; i--)
580 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
584 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
586 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
587 int i, err, poolnum = MVPP2_BM_POOLS_NUM;
588 struct mvpp2_port *port;
590 if (priv->percpu_pools) {
591 for (i = 0; i < priv->port_count; i++) {
592 port = priv->port_list[i];
593 if (port->xdp_prog) {
594 dma_dir = DMA_BIDIRECTIONAL;
599 poolnum = mvpp2_get_nrxqs(priv) * 2;
600 for (i = 0; i < poolnum; i++) {
601 /* the pool in use */
602 int pn = i / (poolnum / 2);
605 mvpp2_create_page_pool(dev,
606 mvpp2_pools[pn].buf_num,
607 mvpp2_pools[pn].pkt_size,
609 if (IS_ERR(priv->page_pool[i])) {
612 for (j = 0; j < i; j++) {
613 page_pool_destroy(priv->page_pool[j]);
614 priv->page_pool[j] = NULL;
616 return PTR_ERR(priv->page_pool[i]);
621 dev_info(dev, "using %d %s buffers\n", poolnum,
622 priv->percpu_pools ? "per-cpu" : "shared");
624 for (i = 0; i < poolnum; i++) {
625 /* Mask BM all interrupts */
626 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
627 /* Clear BM cause register */
628 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
631 /* Allocate and initialize BM pools */
632 priv->bm_pools = devm_kcalloc(dev, poolnum,
633 sizeof(*priv->bm_pools), GFP_KERNEL);
637 err = mvpp2_bm_pools_init(dev, priv);
643 static void mvpp2_setup_bm_pool(void)
646 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
647 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
650 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
651 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
654 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
655 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
658 /* Attach long pool to rxq */
659 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
660 int lrxq, int long_pool)
665 /* Get queue physical ID */
666 prxq = port->rxqs[lrxq]->id;
668 if (port->priv->hw_version == MVPP21)
669 mask = MVPP21_RXQ_POOL_LONG_MASK;
671 mask = MVPP22_RXQ_POOL_LONG_MASK;
673 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
675 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
676 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
679 /* Attach short pool to rxq */
680 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
681 int lrxq, int short_pool)
686 /* Get queue physical ID */
687 prxq = port->rxqs[lrxq]->id;
689 if (port->priv->hw_version == MVPP21)
690 mask = MVPP21_RXQ_POOL_SHORT_MASK;
692 mask = MVPP22_RXQ_POOL_SHORT_MASK;
694 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
696 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
697 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
700 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
701 struct mvpp2_bm_pool *bm_pool,
702 struct page_pool *page_pool,
703 dma_addr_t *buf_dma_addr,
704 phys_addr_t *buf_phys_addr,
711 data = mvpp2_frag_alloc(bm_pool, page_pool);
716 page = (struct page *)data;
717 dma_addr = page_pool_get_dma_addr(page);
718 data = page_to_virt(page);
720 dma_addr = dma_map_single(port->dev->dev.parent, data,
721 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
723 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
724 mvpp2_frag_free(bm_pool, NULL, data);
728 *buf_dma_addr = dma_addr;
729 *buf_phys_addr = virt_to_phys(data);
734 /* Release buffer to BM */
735 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
736 dma_addr_t buf_dma_addr,
737 phys_addr_t buf_phys_addr)
739 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
740 unsigned long flags = 0;
742 if (test_bit(thread, &port->priv->lock_map))
743 spin_lock_irqsave(&port->bm_lock[thread], flags);
745 if (port->priv->hw_version == MVPP22) {
748 if (sizeof(dma_addr_t) == 8)
749 val |= upper_32_bits(buf_dma_addr) &
750 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
752 if (sizeof(phys_addr_t) == 8)
753 val |= (upper_32_bits(buf_phys_addr)
754 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
755 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
757 mvpp2_thread_write_relaxed(port->priv, thread,
758 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
761 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
762 * returned in the "cookie" field of the RX
763 * descriptor. Instead of storing the virtual address, we
764 * store the physical address
766 mvpp2_thread_write_relaxed(port->priv, thread,
767 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
768 mvpp2_thread_write_relaxed(port->priv, thread,
769 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
771 if (test_bit(thread, &port->priv->lock_map))
772 spin_unlock_irqrestore(&port->bm_lock[thread], flags);
777 /* Allocate buffers for the pool */
778 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
779 struct mvpp2_bm_pool *bm_pool, int buf_num)
781 int i, buf_size, total_size;
783 phys_addr_t phys_addr;
784 struct page_pool *pp = NULL;
787 if (port->priv->percpu_pools &&
788 bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
789 netdev_err(port->dev,
790 "attempted to use jumbo frames with per-cpu pools");
794 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
795 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
798 (buf_num + bm_pool->buf_num > bm_pool->size)) {
799 netdev_err(port->dev,
800 "cannot allocate %d buffers for pool %d\n",
801 buf_num, bm_pool->id);
805 if (port->priv->percpu_pools)
806 pp = port->priv->page_pool[bm_pool->id];
807 for (i = 0; i < buf_num; i++) {
808 buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
809 &phys_addr, GFP_KERNEL);
813 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
817 /* Update BM driver with number of buffers added to pool */
818 bm_pool->buf_num += i;
820 netdev_dbg(port->dev,
821 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
822 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
824 netdev_dbg(port->dev,
825 "pool %d: %d of %d buffers added\n",
826 bm_pool->id, i, buf_num);
830 /* Notify the driver that BM pool is being used as specific type and return the
831 * pool pointer on success
833 static struct mvpp2_bm_pool *
834 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
836 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
839 if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
840 (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
841 netdev_err(port->dev, "Invalid pool %d\n", pool);
845 /* Allocate buffers in case BM pool is used as long pool, but packet
846 * size doesn't match MTU or BM pool hasn't being used yet
848 if (new_pool->pkt_size == 0) {
851 /* Set default buffer number or free all the buffers in case
852 * the pool is not empty
854 pkts_num = new_pool->buf_num;
856 if (port->priv->percpu_pools) {
857 if (pool < port->nrxqs)
858 pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
860 pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
862 pkts_num = mvpp2_pools[pool].buf_num;
865 mvpp2_bm_bufs_free(port->dev->dev.parent,
866 port->priv, new_pool, pkts_num);
869 new_pool->pkt_size = pkt_size;
870 new_pool->frag_size =
871 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
872 MVPP2_SKB_SHINFO_SIZE;
874 /* Allocate buffers for this pool */
875 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
876 if (num != pkts_num) {
877 WARN(1, "pool %d: %d of %d allocated\n",
878 new_pool->id, num, pkts_num);
883 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
884 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
889 static struct mvpp2_bm_pool *
890 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
891 unsigned int pool, int pkt_size)
893 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
896 if (pool > port->nrxqs * 2) {
897 netdev_err(port->dev, "Invalid pool %d\n", pool);
901 /* Allocate buffers in case BM pool is used as long pool, but packet
902 * size doesn't match MTU or BM pool hasn't being used yet
904 if (new_pool->pkt_size == 0) {
907 /* Set default buffer number or free all the buffers in case
908 * the pool is not empty
910 pkts_num = new_pool->buf_num;
912 pkts_num = mvpp2_pools[type].buf_num;
914 mvpp2_bm_bufs_free(port->dev->dev.parent,
915 port->priv, new_pool, pkts_num);
917 new_pool->pkt_size = pkt_size;
918 new_pool->frag_size =
919 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
920 MVPP2_SKB_SHINFO_SIZE;
922 /* Allocate buffers for this pool */
923 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
924 if (num != pkts_num) {
925 WARN(1, "pool %d: %d of %d allocated\n",
926 new_pool->id, num, pkts_num);
931 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
932 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
937 /* Initialize pools for swf, shared buffers variant */
938 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
940 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
943 /* If port pkt_size is higher than 1518B:
944 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
945 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
947 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
948 long_log_pool = MVPP2_BM_JUMBO;
949 short_log_pool = MVPP2_BM_LONG;
951 long_log_pool = MVPP2_BM_LONG;
952 short_log_pool = MVPP2_BM_SHORT;
955 if (!port->pool_long) {
957 mvpp2_bm_pool_use(port, long_log_pool,
958 mvpp2_pools[long_log_pool].pkt_size);
959 if (!port->pool_long)
962 port->pool_long->port_map |= BIT(port->id);
964 for (rxq = 0; rxq < port->nrxqs; rxq++)
965 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
968 if (!port->pool_short) {
970 mvpp2_bm_pool_use(port, short_log_pool,
971 mvpp2_pools[short_log_pool].pkt_size);
972 if (!port->pool_short)
975 port->pool_short->port_map |= BIT(port->id);
977 for (rxq = 0; rxq < port->nrxqs; rxq++)
978 mvpp2_rxq_short_pool_set(port, rxq,
979 port->pool_short->id);
985 /* Initialize pools for swf, percpu buffers variant */
986 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
988 struct mvpp2_bm_pool *bm_pool;
991 for (i = 0; i < port->nrxqs; i++) {
992 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
993 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
997 bm_pool->port_map |= BIT(port->id);
998 mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1001 for (i = 0; i < port->nrxqs; i++) {
1002 bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1003 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1007 bm_pool->port_map |= BIT(port->id);
1008 mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1011 port->pool_long = NULL;
1012 port->pool_short = NULL;
1017 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1019 if (port->priv->percpu_pools)
1020 return mvpp2_swf_bm_pool_init_percpu(port);
1022 return mvpp2_swf_bm_pool_init_shared(port);
1025 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1026 enum mvpp2_bm_pool_log_num new_long_pool)
1028 const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1030 /* Update L4 checksum when jumbo enable/disable on port.
1031 * Only port 0 supports hardware checksum offload due to
1032 * the Tx FIFO size limitation.
1033 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1034 * has 7 bits, so the maximum L3 offset is 128.
1036 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1037 port->dev->features &= ~csums;
1038 port->dev->hw_features &= ~csums;
1040 port->dev->features |= csums;
1041 port->dev->hw_features |= csums;
1045 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1047 struct mvpp2_port *port = netdev_priv(dev);
1048 enum mvpp2_bm_pool_log_num new_long_pool;
1049 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1051 if (port->priv->percpu_pools)
1054 /* If port MTU is higher than 1518B:
1055 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1056 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1058 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1059 new_long_pool = MVPP2_BM_JUMBO;
1061 new_long_pool = MVPP2_BM_LONG;
1063 if (new_long_pool != port->pool_long->id) {
1064 /* Remove port from old short & long pool */
1065 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1066 port->pool_long->pkt_size);
1067 port->pool_long->port_map &= ~BIT(port->id);
1068 port->pool_long = NULL;
1070 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1071 port->pool_short->pkt_size);
1072 port->pool_short->port_map &= ~BIT(port->id);
1073 port->pool_short = NULL;
1075 port->pkt_size = pkt_size;
1077 /* Add port to new short & long pool */
1078 mvpp2_swf_bm_pool_init(port);
1080 mvpp2_set_hw_csum(port, new_long_pool);
1085 dev->wanted_features = dev->features;
1087 netdev_update_features(dev);
1091 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1093 int i, sw_thread_mask = 0;
1095 for (i = 0; i < port->nqvecs; i++)
1096 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1098 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1099 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1102 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1104 int i, sw_thread_mask = 0;
1106 for (i = 0; i < port->nqvecs; i++)
1107 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1109 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1110 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1113 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1115 struct mvpp2_port *port = qvec->port;
1117 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1118 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1121 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1123 struct mvpp2_port *port = qvec->port;
1125 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1126 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1129 /* Mask the current thread's Rx/Tx interrupts
1130 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1131 * using smp_processor_id() is OK.
1133 static void mvpp2_interrupts_mask(void *arg)
1135 struct mvpp2_port *port = arg;
1137 /* If the thread isn't used, don't do anything */
1138 if (smp_processor_id() > port->priv->nthreads)
1141 mvpp2_thread_write(port->priv,
1142 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1143 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1146 /* Unmask the current thread's Rx/Tx interrupts.
1147 * Called by on_each_cpu(), guaranteed to run with migration disabled,
1148 * using smp_processor_id() is OK.
1150 static void mvpp2_interrupts_unmask(void *arg)
1152 struct mvpp2_port *port = arg;
1155 /* If the thread isn't used, don't do anything */
1156 if (smp_processor_id() > port->priv->nthreads)
1159 val = MVPP2_CAUSE_MISC_SUM_MASK |
1160 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1161 if (port->has_tx_irqs)
1162 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1164 mvpp2_thread_write(port->priv,
1165 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1166 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1170 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1175 if (port->priv->hw_version != MVPP22)
1181 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1183 for (i = 0; i < port->nqvecs; i++) {
1184 struct mvpp2_queue_vector *v = port->qvecs + i;
1186 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1189 mvpp2_thread_write(port->priv, v->sw_thread_id,
1190 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1194 /* Only GOP port 0 has an XLG MAC */
1195 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1197 return port->gop_id == 0;
1200 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1202 return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
1205 /* Port configuration routines */
1206 static bool mvpp2_is_xlg(phy_interface_t interface)
1208 return interface == PHY_INTERFACE_MODE_10GBASER ||
1209 interface == PHY_INTERFACE_MODE_XAUI;
1212 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1216 old = val = readl(ptr);
1223 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1225 struct mvpp2 *priv = port->priv;
1228 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1229 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1230 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1232 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1233 if (port->gop_id == 2)
1234 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1235 else if (port->gop_id == 3)
1236 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1237 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1240 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1242 struct mvpp2 *priv = port->priv;
1245 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1246 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1247 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1248 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1250 if (port->gop_id > 1) {
1251 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1252 if (port->gop_id == 2)
1253 val &= ~GENCONF_CTRL0_PORT0_RGMII;
1254 else if (port->gop_id == 3)
1255 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1256 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1260 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1262 struct mvpp2 *priv = port->priv;
1263 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1264 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1267 val = readl(xpcs + MVPP22_XPCS_CFG0);
1268 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1269 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1270 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1271 writel(val, xpcs + MVPP22_XPCS_CFG0);
1273 val = readl(mpcs + MVPP22_MPCS_CTRL);
1274 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1275 writel(val, mpcs + MVPP22_MPCS_CTRL);
1277 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1278 val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1279 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1280 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1283 static int mvpp22_gop_init(struct mvpp2_port *port)
1285 struct mvpp2 *priv = port->priv;
1288 if (!priv->sysctrl_base)
1291 switch (port->phy_interface) {
1292 case PHY_INTERFACE_MODE_RGMII:
1293 case PHY_INTERFACE_MODE_RGMII_ID:
1294 case PHY_INTERFACE_MODE_RGMII_RXID:
1295 case PHY_INTERFACE_MODE_RGMII_TXID:
1296 if (!mvpp2_port_supports_rgmii(port))
1298 mvpp22_gop_init_rgmii(port);
1300 case PHY_INTERFACE_MODE_SGMII:
1301 case PHY_INTERFACE_MODE_1000BASEX:
1302 case PHY_INTERFACE_MODE_2500BASEX:
1303 mvpp22_gop_init_sgmii(port);
1305 case PHY_INTERFACE_MODE_10GBASER:
1306 if (!mvpp2_port_supports_xlg(port))
1308 mvpp22_gop_init_10gkr(port);
1311 goto unsupported_conf;
1314 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1315 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1316 GENCONF_PORT_CTRL1_EN(port->gop_id);
1317 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1319 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1320 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1321 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1323 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1324 val |= GENCONF_SOFT_RESET1_GOP;
1325 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1331 netdev_err(port->dev, "Invalid port configuration\n");
1335 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1339 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1340 phy_interface_mode_is_8023z(port->phy_interface) ||
1341 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1342 /* Enable the GMAC link status irq for this port */
1343 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1344 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1345 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1348 if (mvpp2_port_supports_xlg(port)) {
1349 /* Enable the XLG/GIG irqs for this port */
1350 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1351 if (mvpp2_is_xlg(port->phy_interface))
1352 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1354 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1355 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1359 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1363 if (mvpp2_port_supports_xlg(port)) {
1364 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1365 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1366 MVPP22_XLG_EXT_INT_MASK_GIG);
1367 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1370 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1371 phy_interface_mode_is_8023z(port->phy_interface) ||
1372 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1373 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1374 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1375 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1379 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1383 mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1384 MVPP22_GMAC_INT_SUM_MASK_PTP,
1385 MVPP22_GMAC_INT_SUM_MASK_PTP);
1387 if (port->phylink ||
1388 phy_interface_mode_is_rgmii(port->phy_interface) ||
1389 phy_interface_mode_is_8023z(port->phy_interface) ||
1390 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1391 val = readl(port->base + MVPP22_GMAC_INT_MASK);
1392 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1393 writel(val, port->base + MVPP22_GMAC_INT_MASK);
1396 if (mvpp2_port_supports_xlg(port)) {
1397 val = readl(port->base + MVPP22_XLG_INT_MASK);
1398 val |= MVPP22_XLG_INT_MASK_LINK;
1399 writel(val, port->base + MVPP22_XLG_INT_MASK);
1401 mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1402 MVPP22_XLG_EXT_INT_MASK_PTP,
1403 MVPP22_XLG_EXT_INT_MASK_PTP);
1406 mvpp22_gop_unmask_irq(port);
1409 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1411 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1412 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1415 * The COMPHY configures the serdes lanes regardless of the actual use of the
1416 * lanes by the physical layer. This is why configurations like
1417 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1419 static int mvpp22_comphy_init(struct mvpp2_port *port)
1426 ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1427 port->phy_interface);
1431 return phy_power_on(port->comphy);
1434 static void mvpp2_port_enable(struct mvpp2_port *port)
1438 if (mvpp2_port_supports_xlg(port) &&
1439 mvpp2_is_xlg(port->phy_interface)) {
1440 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1441 val |= MVPP22_XLG_CTRL0_PORT_EN;
1442 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1443 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1445 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1446 val |= MVPP2_GMAC_PORT_EN_MASK;
1447 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1448 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1452 static void mvpp2_port_disable(struct mvpp2_port *port)
1456 if (mvpp2_port_supports_xlg(port) &&
1457 mvpp2_is_xlg(port->phy_interface)) {
1458 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1459 val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1460 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1463 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1464 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1465 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1468 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1469 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1473 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1474 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1475 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1478 /* Configure loopback port */
1479 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1480 const struct phylink_link_state *state)
1484 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1486 if (state->speed == 1000)
1487 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1489 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1491 if (phy_interface_mode_is_8023z(state->interface) ||
1492 state->interface == PHY_INTERFACE_MODE_SGMII)
1493 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1495 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1497 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1501 ETHTOOL_XDP_REDIRECT,
1507 ETHTOOL_XDP_XMIT_ERR,
1510 struct mvpp2_ethtool_counter {
1511 unsigned int offset;
1512 const char string[ETH_GSTRING_LEN];
1516 static u64 mvpp2_read_count(struct mvpp2_port *port,
1517 const struct mvpp2_ethtool_counter *counter)
1521 val = readl(port->stats_base + counter->offset);
1522 if (counter->reg_is_64b)
1523 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1528 /* Some counters are accessed indirectly by first writing an index to
1529 * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1530 * register we access, it can be a hit counter for some classification tables,
1531 * a counter specific to a rxq, a txq or a buffer pool.
1533 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1535 mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1536 return mvpp2_read(priv, reg);
1539 /* Due to the fact that software statistics and hardware statistics are, by
1540 * design, incremented at different moments in the chain of packet processing,
1541 * it is very likely that incoming packets could have been dropped after being
1542 * counted by hardware but before reaching software statistics (most probably
1543 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1544 * are added in between as well as TSO skb will be split and header bytes added.
1545 * Hence, statistics gathered from userspace with ifconfig (software) and
1546 * ethtool (hardware) cannot be compared.
1548 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1549 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1550 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1551 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1552 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1553 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1554 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1555 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1556 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1557 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1558 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1559 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1560 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1561 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1562 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1563 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1564 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1565 { MVPP2_MIB_FC_SENT, "fc_sent" },
1566 { MVPP2_MIB_FC_RCVD, "fc_received" },
1567 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1568 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1569 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1570 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1571 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1572 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1573 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1574 { MVPP2_MIB_COLLISION, "collision" },
1575 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
1578 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1579 { MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1580 { MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1583 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1584 { MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1585 { MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1586 { MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1587 { MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1588 { MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1589 { MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1590 { MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1591 { MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1592 { MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1595 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1596 { MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1597 { MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1598 { MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1599 { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1602 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1603 { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1604 { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1605 { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1606 { ETHTOOL_XDP_TX, "rx_xdp_tx", },
1607 { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1608 { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1609 { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1612 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1613 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1614 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1615 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1616 ARRAY_SIZE(mvpp2_ethtool_xdp))
1618 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1621 struct mvpp2_port *port = netdev_priv(netdev);
1624 if (sset != ETH_SS_STATS)
1627 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1628 strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1630 data += ETH_GSTRING_LEN;
1633 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1634 strscpy(data, mvpp2_ethtool_port_regs[i].string,
1636 data += ETH_GSTRING_LEN;
1639 for (q = 0; q < port->ntxqs; q++) {
1640 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1641 snprintf(data, ETH_GSTRING_LEN,
1642 mvpp2_ethtool_txq_regs[i].string, q);
1643 data += ETH_GSTRING_LEN;
1647 for (q = 0; q < port->nrxqs; q++) {
1648 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1649 snprintf(data, ETH_GSTRING_LEN,
1650 mvpp2_ethtool_rxq_regs[i].string,
1652 data += ETH_GSTRING_LEN;
1656 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1657 strscpy(data, mvpp2_ethtool_xdp[i].string,
1659 data += ETH_GSTRING_LEN;
1664 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1669 /* Gather XDP Statistics */
1670 for_each_possible_cpu(cpu) {
1671 struct mvpp2_pcpu_stats *cpu_stats;
1680 cpu_stats = per_cpu_ptr(port->stats, cpu);
1682 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1683 xdp_redirect = cpu_stats->xdp_redirect;
1684 xdp_pass = cpu_stats->xdp_pass;
1685 xdp_drop = cpu_stats->xdp_drop;
1686 xdp_xmit = cpu_stats->xdp_xmit;
1687 xdp_xmit_err = cpu_stats->xdp_xmit_err;
1688 xdp_tx = cpu_stats->xdp_tx;
1689 xdp_tx_err = cpu_stats->xdp_tx_err;
1690 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1692 xdp_stats->xdp_redirect += xdp_redirect;
1693 xdp_stats->xdp_pass += xdp_pass;
1694 xdp_stats->xdp_drop += xdp_drop;
1695 xdp_stats->xdp_xmit += xdp_xmit;
1696 xdp_stats->xdp_xmit_err += xdp_xmit_err;
1697 xdp_stats->xdp_tx += xdp_tx;
1698 xdp_stats->xdp_tx_err += xdp_tx_err;
1702 static void mvpp2_read_stats(struct mvpp2_port *port)
1704 struct mvpp2_pcpu_stats xdp_stats = {};
1705 const struct mvpp2_ethtool_counter *s;
1709 pstats = port->ethtool_stats;
1711 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1712 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1714 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1715 *pstats++ += mvpp2_read(port->priv,
1716 mvpp2_ethtool_port_regs[i].offset +
1719 for (q = 0; q < port->ntxqs; q++)
1720 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1721 *pstats++ += mvpp2_read_index(port->priv,
1722 MVPP22_CTRS_TX_CTR(port->id, q),
1723 mvpp2_ethtool_txq_regs[i].offset);
1725 /* Rxqs are numbered from 0 from the user standpoint, but not from the
1726 * driver's. We need to add the port->first_rxq offset.
1728 for (q = 0; q < port->nrxqs; q++)
1729 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1730 *pstats++ += mvpp2_read_index(port->priv,
1731 port->first_rxq + q,
1732 mvpp2_ethtool_rxq_regs[i].offset);
1734 /* Gather XDP Statistics */
1735 mvpp2_get_xdp_stats(port, &xdp_stats);
1737 for (i = 0, s = mvpp2_ethtool_xdp;
1738 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
1740 switch (s->offset) {
1741 case ETHTOOL_XDP_REDIRECT:
1742 *pstats++ = xdp_stats.xdp_redirect;
1744 case ETHTOOL_XDP_PASS:
1745 *pstats++ = xdp_stats.xdp_pass;
1747 case ETHTOOL_XDP_DROP:
1748 *pstats++ = xdp_stats.xdp_drop;
1750 case ETHTOOL_XDP_TX:
1751 *pstats++ = xdp_stats.xdp_tx;
1753 case ETHTOOL_XDP_TX_ERR:
1754 *pstats++ = xdp_stats.xdp_tx_err;
1756 case ETHTOOL_XDP_XMIT:
1757 *pstats++ = xdp_stats.xdp_xmit;
1759 case ETHTOOL_XDP_XMIT_ERR:
1760 *pstats++ = xdp_stats.xdp_xmit_err;
1766 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1768 struct delayed_work *del_work = to_delayed_work(work);
1769 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1772 mutex_lock(&port->gather_stats_lock);
1774 mvpp2_read_stats(port);
1776 /* No need to read again the counters right after this function if it
1777 * was called asynchronously by the user (ie. use of ethtool).
1779 cancel_delayed_work(&port->stats_work);
1780 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1781 MVPP2_MIB_COUNTERS_STATS_DELAY);
1783 mutex_unlock(&port->gather_stats_lock);
1786 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1787 struct ethtool_stats *stats, u64 *data)
1789 struct mvpp2_port *port = netdev_priv(dev);
1791 /* Update statistics for the given port, then take the lock to avoid
1792 * concurrent accesses on the ethtool_stats structure during its copy.
1794 mvpp2_gather_hw_statistics(&port->stats_work.work);
1796 mutex_lock(&port->gather_stats_lock);
1797 memcpy(data, port->ethtool_stats,
1798 sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1799 mutex_unlock(&port->gather_stats_lock);
1802 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1804 struct mvpp2_port *port = netdev_priv(dev);
1806 if (sset == ETH_SS_STATS)
1807 return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1812 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1816 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1817 MVPP2_GMAC_PORT_RESET_MASK;
1818 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1820 if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1821 val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1822 ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1823 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1827 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1829 struct mvpp2 *priv = port->priv;
1830 void __iomem *mpcs, *xpcs;
1833 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1836 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1837 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1839 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1840 val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1841 val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1842 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1844 val = readl(xpcs + MVPP22_XPCS_CFG0);
1845 writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1848 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1850 struct mvpp2 *priv = port->priv;
1851 void __iomem *mpcs, *xpcs;
1854 if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1857 mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1858 xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1860 switch (port->phy_interface) {
1861 case PHY_INTERFACE_MODE_10GBASER:
1862 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1863 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1864 MAC_CLK_RESET_SD_TX;
1865 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1866 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1868 case PHY_INTERFACE_MODE_XAUI:
1869 case PHY_INTERFACE_MODE_RXAUI:
1870 val = readl(xpcs + MVPP22_XPCS_CFG0);
1871 writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1878 /* Change maximum receive size of the port */
1879 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1883 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1884 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1885 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1886 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1887 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1890 /* Change maximum receive size of the port */
1891 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1895 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
1896 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1897 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1898 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1899 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1902 /* Set defaults to the MVPP2 port */
1903 static void mvpp2_defaults_set(struct mvpp2_port *port)
1905 int tx_port_num, val, queue, lrxq;
1907 if (port->priv->hw_version == MVPP21) {
1908 /* Update TX FIFO MIN Threshold */
1909 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1910 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1911 /* Min. TX threshold must be less than minimal packet length */
1912 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1913 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1916 /* Disable Legacy WRR, Disable EJP, Release from reset */
1917 tx_port_num = mvpp2_egress_port(port);
1918 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1920 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1922 /* Set TXQ scheduling to Round-Robin */
1923 mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1925 /* Close bandwidth for all queues */
1926 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1927 mvpp2_write(port->priv,
1928 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1930 /* Set refill period to 1 usec, refill tokens
1931 * and bucket size to maximum
1933 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1934 port->priv->tclk / USEC_PER_SEC);
1935 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1936 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1937 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1938 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1939 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1940 val = MVPP2_TXP_TOKEN_SIZE_MAX;
1941 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1943 /* Set MaximumLowLatencyPacketSize value to 256 */
1944 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1945 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1946 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1948 /* Enable Rx cache snoop */
1949 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1950 queue = port->rxqs[lrxq]->id;
1951 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1952 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1953 MVPP2_SNOOP_BUF_HDR_MASK;
1954 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1957 /* At default, mask all interrupts to all present cpus */
1958 mvpp2_interrupts_disable(port);
1961 /* Enable/disable receiving packets */
1962 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1967 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1968 queue = port->rxqs[lrxq]->id;
1969 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1970 val &= ~MVPP2_RXQ_DISABLE_MASK;
1971 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1975 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1980 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1981 queue = port->rxqs[lrxq]->id;
1982 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1983 val |= MVPP2_RXQ_DISABLE_MASK;
1984 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1988 /* Enable transmit via physical egress queue
1989 * - HW starts take descriptors from DRAM
1991 static void mvpp2_egress_enable(struct mvpp2_port *port)
1995 int tx_port_num = mvpp2_egress_port(port);
1997 /* Enable all initialized TXs. */
1999 for (queue = 0; queue < port->ntxqs; queue++) {
2000 struct mvpp2_tx_queue *txq = port->txqs[queue];
2003 qmap |= (1 << queue);
2006 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2007 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2010 /* Disable transmit via physical egress queue
2011 * - HW doesn't take descriptors from DRAM
2013 static void mvpp2_egress_disable(struct mvpp2_port *port)
2017 int tx_port_num = mvpp2_egress_port(port);
2019 /* Issue stop command for active channels only */
2020 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2021 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2022 MVPP2_TXP_SCHED_ENQ_MASK;
2024 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2025 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2027 /* Wait for all Tx activity to terminate. */
2030 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2031 netdev_warn(port->dev,
2032 "Tx stop timed out, status=0x%08x\n",
2039 /* Check port TX Command register that all
2040 * Tx queues are stopped
2042 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2043 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2046 /* Rx descriptors helper methods */
2048 /* Get number of Rx descriptors occupied by received packets */
2050 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2052 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2054 return val & MVPP2_RXQ_OCCUPIED_MASK;
2057 /* Update Rx queue status with the number of occupied and available
2058 * Rx descriptor slots.
2061 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2062 int used_count, int free_count)
2064 /* Decrement the number of used descriptors and increment count
2065 * increment the number of free descriptors.
2067 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2069 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2072 /* Get pointer to next RX descriptor to be processed by SW */
2073 static inline struct mvpp2_rx_desc *
2074 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2076 int rx_desc = rxq->next_desc_to_proc;
2078 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2079 prefetch(rxq->descs + rxq->next_desc_to_proc);
2080 return rxq->descs + rx_desc;
2083 /* Set rx queue offset */
2084 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2085 int prxq, int offset)
2089 /* Convert offset from bytes to units of 32 bytes */
2090 offset = offset >> 5;
2092 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2093 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2096 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2097 MVPP2_RXQ_PACKET_OFFSET_MASK);
2099 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2102 /* Tx descriptors helper methods */
2104 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2105 static struct mvpp2_tx_desc *
2106 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2108 int tx_desc = txq->next_desc_to_proc;
2110 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2111 return txq->descs + tx_desc;
2114 /* Update HW with number of aggregated Tx descriptors to be sent
2116 * Called only from mvpp2_tx(), so migration is disabled, using
2117 * smp_processor_id() is OK.
2119 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2121 /* aggregated access - relevant TXQ number is written in TX desc */
2122 mvpp2_thread_write(port->priv,
2123 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2124 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2127 /* Check if there are enough free descriptors in aggregated txq.
2128 * If not, update the number of occupied descriptors and repeat the check.
2130 * Called only from mvpp2_tx(), so migration is disabled, using
2131 * smp_processor_id() is OK.
2133 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2134 struct mvpp2_tx_queue *aggr_txq, int num)
2136 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2137 /* Update number of occupied aggregated Tx descriptors */
2138 unsigned int thread =
2139 mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2140 u32 val = mvpp2_read_relaxed(port->priv,
2141 MVPP2_AGGR_TXQ_STATUS_REG(thread));
2143 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2145 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2151 /* Reserved Tx descriptors allocation request
2153 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2154 * only by mvpp2_tx(), so migration is disabled, using
2155 * smp_processor_id() is OK.
2157 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2158 struct mvpp2_tx_queue *txq, int num)
2160 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2161 struct mvpp2 *priv = port->priv;
2164 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2165 mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2167 val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2169 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2172 /* Check if there are enough reserved descriptors for transmission.
2173 * If not, request chunk of reserved descriptors and check again.
2175 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2176 struct mvpp2_tx_queue *txq,
2177 struct mvpp2_txq_pcpu *txq_pcpu,
2180 int req, desc_count;
2181 unsigned int thread;
2183 if (txq_pcpu->reserved_num >= num)
2186 /* Not enough descriptors reserved! Update the reserved descriptor
2187 * count and check again.
2191 /* Compute total of used descriptors */
2192 for (thread = 0; thread < port->priv->nthreads; thread++) {
2193 struct mvpp2_txq_pcpu *txq_pcpu_aux;
2195 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2196 desc_count += txq_pcpu_aux->count;
2197 desc_count += txq_pcpu_aux->reserved_num;
2200 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2204 (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2207 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2209 /* OK, the descriptor could have been updated: check again. */
2210 if (txq_pcpu->reserved_num < num)
2215 /* Release the last allocated Tx descriptor. Useful to handle DMA
2216 * mapping failures in the Tx path.
2218 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2220 if (txq->next_desc_to_proc == 0)
2221 txq->next_desc_to_proc = txq->last_desc - 1;
2223 txq->next_desc_to_proc--;
2226 /* Set Tx descriptors fields relevant for CSUM calculation */
2227 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2228 int ip_hdr_len, int l4_proto)
2232 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2233 * G_L4_chk, L4_type required only for checksum calculation
2235 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2236 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2237 command |= MVPP2_TXD_IP_CSUM_DISABLE;
2239 if (l3_proto == htons(ETH_P_IP)) {
2240 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
2241 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
2243 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
2246 if (l4_proto == IPPROTO_TCP) {
2247 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
2248 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2249 } else if (l4_proto == IPPROTO_UDP) {
2250 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
2251 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
2253 command |= MVPP2_TXD_L4_CSUM_NOT;
2259 /* Get number of sent descriptors and decrement counter.
2260 * The number of sent descriptors is returned.
2263 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2264 * (migration disabled) and from the TX completion tasklet (migration
2265 * disabled) so using smp_processor_id() is OK.
2267 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2268 struct mvpp2_tx_queue *txq)
2272 /* Reading status reg resets transmitted descriptor counter */
2273 val = mvpp2_thread_read_relaxed(port->priv,
2274 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2275 MVPP2_TXQ_SENT_REG(txq->id));
2277 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2278 MVPP2_TRANSMITTED_COUNT_OFFSET;
2281 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2282 * disabled, therefore using smp_processor_id() is OK.
2284 static void mvpp2_txq_sent_counter_clear(void *arg)
2286 struct mvpp2_port *port = arg;
2289 /* If the thread isn't used, don't do anything */
2290 if (smp_processor_id() > port->priv->nthreads)
2293 for (queue = 0; queue < port->ntxqs; queue++) {
2294 int id = port->txqs[queue]->id;
2296 mvpp2_thread_read(port->priv,
2297 mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2298 MVPP2_TXQ_SENT_REG(id));
2302 /* Set max sizes for Tx queues */
2303 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2306 int txq, tx_port_num;
2308 mtu = port->pkt_size * 8;
2309 if (mtu > MVPP2_TXP_MTU_MAX)
2310 mtu = MVPP2_TXP_MTU_MAX;
2312 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2315 /* Indirect access to registers */
2316 tx_port_num = mvpp2_egress_port(port);
2317 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2320 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2321 val &= ~MVPP2_TXP_MTU_MAX;
2323 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2325 /* TXP token size and all TXQs token size must be larger that MTU */
2326 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2327 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2330 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2332 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2335 for (txq = 0; txq < port->ntxqs; txq++) {
2336 val = mvpp2_read(port->priv,
2337 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2338 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2342 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2344 mvpp2_write(port->priv,
2345 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2351 /* Set the number of packets that will be received before Rx interrupt
2352 * will be generated by HW.
2354 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2355 struct mvpp2_rx_queue *rxq)
2357 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2359 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2360 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2362 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2363 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2369 /* For some reason in the LSP this is done on each CPU. Why ? */
2370 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2371 struct mvpp2_tx_queue *txq)
2373 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2376 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2377 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2379 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2380 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2381 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2386 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2388 u64 tmp = (u64)clk_hz * usec;
2390 do_div(tmp, USEC_PER_SEC);
2392 return tmp > U32_MAX ? U32_MAX : tmp;
2395 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2397 u64 tmp = (u64)cycles * USEC_PER_SEC;
2399 do_div(tmp, clk_hz);
2401 return tmp > U32_MAX ? U32_MAX : tmp;
2404 /* Set the time delay in usec before Rx interrupt */
2405 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2406 struct mvpp2_rx_queue *rxq)
2408 unsigned long freq = port->priv->tclk;
2409 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2411 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2413 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2415 /* re-evaluate to get actual register value */
2416 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2419 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2422 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2424 unsigned long freq = port->priv->tclk;
2425 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2427 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2428 port->tx_time_coal =
2429 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2431 /* re-evaluate to get actual register value */
2432 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2435 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2438 /* Free Tx queue skbuffs */
2439 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2440 struct mvpp2_tx_queue *txq,
2441 struct mvpp2_txq_pcpu *txq_pcpu, int num)
2443 struct xdp_frame_bulk bq;
2446 xdp_frame_bulk_init(&bq);
2448 rcu_read_lock(); /* need for xdp_return_frame_bulk */
2450 for (i = 0; i < num; i++) {
2451 struct mvpp2_txq_pcpu_buf *tx_buf =
2452 txq_pcpu->buffs + txq_pcpu->txq_get_index;
2454 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2455 tx_buf->type != MVPP2_TYPE_XDP_TX)
2456 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2457 tx_buf->size, DMA_TO_DEVICE);
2458 if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2459 dev_kfree_skb_any(tx_buf->skb);
2460 else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2461 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2462 xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2464 mvpp2_txq_inc_get(txq_pcpu);
2466 xdp_flush_frame_bulk(&bq);
2471 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2474 int queue = fls(cause) - 1;
2476 return port->rxqs[queue];
2479 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2482 int queue = fls(cause) - 1;
2484 return port->txqs[queue];
2487 /* Handle end of transmission */
2488 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2489 struct mvpp2_txq_pcpu *txq_pcpu)
2491 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2494 if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2495 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2497 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2500 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2502 txq_pcpu->count -= tx_done;
2504 if (netif_tx_queue_stopped(nq))
2505 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2506 netif_tx_wake_queue(nq);
2509 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2510 unsigned int thread)
2512 struct mvpp2_tx_queue *txq;
2513 struct mvpp2_txq_pcpu *txq_pcpu;
2514 unsigned int tx_todo = 0;
2517 txq = mvpp2_get_tx_queue(port, cause);
2521 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2523 if (txq_pcpu->count) {
2524 mvpp2_txq_done(port, txq, txq_pcpu);
2525 tx_todo += txq_pcpu->count;
2528 cause &= ~(1 << txq->log_id);
2533 /* Rx/Tx queue initialization/cleanup methods */
2535 /* Allocate and initialize descriptors for aggr TXQ */
2536 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2537 struct mvpp2_tx_queue *aggr_txq,
2538 unsigned int thread, struct mvpp2 *priv)
2542 /* Allocate memory for TX descriptors */
2543 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2544 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2545 &aggr_txq->descs_dma, GFP_KERNEL);
2546 if (!aggr_txq->descs)
2549 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2551 /* Aggr TXQ no reset WA */
2552 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2553 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2555 /* Set Tx descriptors queue starting address indirect
2558 if (priv->hw_version == MVPP21)
2559 txq_dma = aggr_txq->descs_dma;
2561 txq_dma = aggr_txq->descs_dma >>
2562 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2564 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2565 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2566 MVPP2_AGGR_TXQ_SIZE);
2571 /* Create a specified Rx queue */
2572 static int mvpp2_rxq_init(struct mvpp2_port *port,
2573 struct mvpp2_rx_queue *rxq)
2575 struct mvpp2 *priv = port->priv;
2576 unsigned int thread;
2580 rxq->size = port->rx_ring_size;
2582 /* Allocate memory for RX descriptors */
2583 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2584 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2585 &rxq->descs_dma, GFP_KERNEL);
2589 rxq->last_desc = rxq->size - 1;
2591 /* Zero occupied and non-occupied counters - direct access */
2592 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2594 /* Set Rx descriptors queue starting address - indirect access */
2595 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2596 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2597 if (port->priv->hw_version == MVPP21)
2598 rxq_dma = rxq->descs_dma;
2600 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2601 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2602 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2603 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2607 mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2609 /* Set coalescing pkts and time */
2610 mvpp2_rx_pkts_coal_set(port, rxq);
2611 mvpp2_rx_time_coal_set(port, rxq);
2613 /* Add number of descriptors ready for receiving packets */
2614 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2616 if (priv->percpu_pools) {
2617 err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
2621 err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
2623 goto err_unregister_rxq_short;
2625 /* Every RXQ has a pool for short and another for long packets */
2626 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2628 priv->page_pool[rxq->logic_rxq]);
2630 goto err_unregister_rxq_long;
2632 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2634 priv->page_pool[rxq->logic_rxq +
2637 goto err_unregister_mem_rxq_short;
2642 err_unregister_mem_rxq_short:
2643 xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2644 err_unregister_rxq_long:
2645 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2646 err_unregister_rxq_short:
2647 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2649 dma_free_coherent(port->dev->dev.parent,
2650 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2651 rxq->descs, rxq->descs_dma);
2655 /* Push packets received by the RXQ to BM pool */
2656 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2657 struct mvpp2_rx_queue *rxq)
2661 rx_received = mvpp2_rxq_received(port, rxq->id);
2665 for (i = 0; i < rx_received; i++) {
2666 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2667 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2670 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2671 MVPP2_RXD_BM_POOL_ID_OFFS;
2673 mvpp2_bm_pool_put(port, pool,
2674 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2675 mvpp2_rxdesc_cookie_get(port, rx_desc));
2677 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2680 /* Cleanup Rx queue */
2681 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2682 struct mvpp2_rx_queue *rxq)
2684 unsigned int thread;
2686 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
2687 xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2689 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
2690 xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2692 mvpp2_rxq_drop_pkts(port, rxq);
2695 dma_free_coherent(port->dev->dev.parent,
2696 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2702 rxq->next_desc_to_proc = 0;
2705 /* Clear Rx descriptors queue starting address and size;
2706 * free descriptor number
2708 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2709 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2710 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2711 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2712 mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2716 /* Create and initialize a Tx queue */
2717 static int mvpp2_txq_init(struct mvpp2_port *port,
2718 struct mvpp2_tx_queue *txq)
2721 unsigned int thread;
2722 int desc, desc_per_txq, tx_port_num;
2723 struct mvpp2_txq_pcpu *txq_pcpu;
2725 txq->size = port->tx_ring_size;
2727 /* Allocate memory for Tx descriptors */
2728 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2729 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2730 &txq->descs_dma, GFP_KERNEL);
2734 txq->last_desc = txq->size - 1;
2736 /* Set Tx descriptors queue starting address - indirect access */
2737 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2738 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2739 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2741 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2742 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2743 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2744 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2745 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2746 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2747 val &= ~MVPP2_TXQ_PENDING_MASK;
2748 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2750 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2751 * for each existing TXQ.
2752 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2753 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2756 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2757 (txq->log_id * desc_per_txq);
2759 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2760 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2761 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2764 /* WRR / EJP configuration - indirect access */
2765 tx_port_num = mvpp2_egress_port(port);
2766 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2768 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2769 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2770 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2771 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2772 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2774 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2775 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2778 for (thread = 0; thread < port->priv->nthreads; thread++) {
2779 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2780 txq_pcpu->size = txq->size;
2781 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2782 sizeof(*txq_pcpu->buffs),
2784 if (!txq_pcpu->buffs)
2787 txq_pcpu->count = 0;
2788 txq_pcpu->reserved_num = 0;
2789 txq_pcpu->txq_put_index = 0;
2790 txq_pcpu->txq_get_index = 0;
2791 txq_pcpu->tso_headers = NULL;
2793 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2794 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2796 txq_pcpu->tso_headers =
2797 dma_alloc_coherent(port->dev->dev.parent,
2798 txq_pcpu->size * TSO_HEADER_SIZE,
2799 &txq_pcpu->tso_headers_dma,
2801 if (!txq_pcpu->tso_headers)
2808 /* Free allocated TXQ resources */
2809 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2810 struct mvpp2_tx_queue *txq)
2812 struct mvpp2_txq_pcpu *txq_pcpu;
2813 unsigned int thread;
2815 for (thread = 0; thread < port->priv->nthreads; thread++) {
2816 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2817 kfree(txq_pcpu->buffs);
2819 if (txq_pcpu->tso_headers)
2820 dma_free_coherent(port->dev->dev.parent,
2821 txq_pcpu->size * TSO_HEADER_SIZE,
2822 txq_pcpu->tso_headers,
2823 txq_pcpu->tso_headers_dma);
2825 txq_pcpu->tso_headers = NULL;
2829 dma_free_coherent(port->dev->dev.parent,
2830 txq->size * MVPP2_DESC_ALIGNED_SIZE,
2831 txq->descs, txq->descs_dma);
2835 txq->next_desc_to_proc = 0;
2838 /* Set minimum bandwidth for disabled TXQs */
2839 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2841 /* Set Tx descriptors queue starting address and size */
2842 thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2843 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2844 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2845 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2849 /* Cleanup Tx ports */
2850 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2852 struct mvpp2_txq_pcpu *txq_pcpu;
2854 unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2857 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2858 val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2859 val |= MVPP2_TXQ_DRAIN_EN_MASK;
2860 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2862 /* The napi queue has been stopped so wait for all packets
2863 * to be transmitted.
2867 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2868 netdev_warn(port->dev,
2869 "port %d: cleaning queue %d timed out\n",
2870 port->id, txq->log_id);
2876 pending = mvpp2_thread_read(port->priv, thread,
2877 MVPP2_TXQ_PENDING_REG);
2878 pending &= MVPP2_TXQ_PENDING_MASK;
2881 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2882 mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2885 for (thread = 0; thread < port->priv->nthreads; thread++) {
2886 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2888 /* Release all packets */
2889 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2892 txq_pcpu->count = 0;
2893 txq_pcpu->txq_put_index = 0;
2894 txq_pcpu->txq_get_index = 0;
2898 /* Cleanup all Tx queues */
2899 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2901 struct mvpp2_tx_queue *txq;
2905 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2907 /* Reset Tx ports and delete Tx queues */
2908 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2909 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2911 for (queue = 0; queue < port->ntxqs; queue++) {
2912 txq = port->txqs[queue];
2913 mvpp2_txq_clean(port, txq);
2914 mvpp2_txq_deinit(port, txq);
2917 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2919 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2920 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2923 /* Cleanup all Rx queues */
2924 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2928 for (queue = 0; queue < port->nrxqs; queue++)
2929 mvpp2_rxq_deinit(port, port->rxqs[queue]);
2932 /* Init all Rx queues for port */
2933 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2937 for (queue = 0; queue < port->nrxqs; queue++) {
2938 err = mvpp2_rxq_init(port, port->rxqs[queue]);
2945 mvpp2_cleanup_rxqs(port);
2949 /* Init all tx queues for port */
2950 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2952 struct mvpp2_tx_queue *txq;
2955 for (queue = 0; queue < port->ntxqs; queue++) {
2956 txq = port->txqs[queue];
2957 err = mvpp2_txq_init(port, txq);
2961 /* Assign this queue to a CPU */
2962 if (queue < num_possible_cpus())
2963 netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
2966 if (port->has_tx_irqs) {
2967 mvpp2_tx_time_coal_set(port);
2968 for (queue = 0; queue < port->ntxqs; queue++) {
2969 txq = port->txqs[queue];
2970 mvpp2_tx_pkts_coal_set(port, txq);
2974 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2978 mvpp2_cleanup_txqs(port);
2982 /* The callback for per-port interrupt */
2983 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2985 struct mvpp2_queue_vector *qv = dev_id;
2987 mvpp2_qvec_interrupt_disable(qv);
2989 napi_schedule(&qv->napi);
2994 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
2996 struct skb_shared_hwtstamps shhwtstamps;
2997 struct mvpp2_hwtstamp_queue *queue;
2998 struct sk_buff *skb;
2999 void __iomem *ptp_q;
3003 ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3005 ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3007 queue = &port->tx_hwtstamp_queue[nq];
3010 r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3014 r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3015 r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3017 id = (r0 >> 1) & 31;
3019 skb = queue->skb[id];
3020 queue->skb[id] = NULL;
3022 u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3024 mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3025 skb_tstamp_tx(skb, &shhwtstamps);
3026 dev_kfree_skb_any(skb);
3031 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3036 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3037 val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3038 if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3039 mvpp2_isr_handle_ptp_queue(port, 0);
3040 if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3041 mvpp2_isr_handle_ptp_queue(port, 1);
3044 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3046 struct net_device *dev = port->dev;
3048 if (port->phylink) {
3049 phylink_mac_change(port->phylink, link);
3053 if (!netif_running(dev))
3057 mvpp2_interrupts_enable(port);
3059 mvpp2_egress_enable(port);
3060 mvpp2_ingress_enable(port);
3061 netif_carrier_on(dev);
3062 netif_tx_wake_all_queues(dev);
3064 netif_tx_stop_all_queues(dev);
3065 netif_carrier_off(dev);
3066 mvpp2_ingress_disable(port);
3067 mvpp2_egress_disable(port);
3069 mvpp2_interrupts_disable(port);
3073 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3078 val = readl(port->base + MVPP22_XLG_INT_STAT);
3079 if (val & MVPP22_XLG_INT_STAT_LINK) {
3080 val = readl(port->base + MVPP22_XLG_STATUS);
3081 link = (val & MVPP22_XLG_STATUS_LINK_UP);
3082 mvpp2_isr_handle_link(port, link);
3086 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3091 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3092 phy_interface_mode_is_8023z(port->phy_interface) ||
3093 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3094 val = readl(port->base + MVPP22_GMAC_INT_STAT);
3095 if (val & MVPP22_GMAC_INT_STAT_LINK) {
3096 val = readl(port->base + MVPP2_GMAC_STATUS0);
3097 link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3098 mvpp2_isr_handle_link(port, link);
3103 /* Per-port interrupt for link status changes */
3104 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3106 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3109 mvpp22_gop_mask_irq(port);
3111 if (mvpp2_port_supports_xlg(port) &&
3112 mvpp2_is_xlg(port->phy_interface)) {
3113 /* Check the external status register */
3114 val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3115 if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3116 mvpp2_isr_handle_xlg(port);
3117 if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3118 mvpp2_isr_handle_ptp(port);
3120 /* If it's not the XLG, we must be using the GMAC.
3121 * Check the summary status.
3123 val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3124 if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3125 mvpp2_isr_handle_gmac_internal(port);
3126 if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3127 mvpp2_isr_handle_ptp(port);
3130 mvpp22_gop_unmask_irq(port);
3134 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3136 struct net_device *dev;
3137 struct mvpp2_port *port;
3138 struct mvpp2_port_pcpu *port_pcpu;
3139 unsigned int tx_todo, cause;
3141 port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3142 dev = port_pcpu->dev;
3144 if (!netif_running(dev))
3145 return HRTIMER_NORESTART;
3147 port_pcpu->timer_scheduled = false;
3148 port = netdev_priv(dev);
3150 /* Process all the Tx queues */
3151 cause = (1 << port->ntxqs) - 1;
3152 tx_todo = mvpp2_tx_done(port, cause,
3153 mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3155 /* Set the timer in case not all the packets were processed */
3156 if (tx_todo && !port_pcpu->timer_scheduled) {
3157 port_pcpu->timer_scheduled = true;
3158 hrtimer_forward_now(&port_pcpu->tx_done_timer,
3159 MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3161 return HRTIMER_RESTART;
3163 return HRTIMER_NORESTART;
3166 /* Main RX/TX processing routines */
3168 /* Display more error info */
3169 static void mvpp2_rx_error(struct mvpp2_port *port,
3170 struct mvpp2_rx_desc *rx_desc)
3172 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3173 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3174 char *err_str = NULL;
3176 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3177 case MVPP2_RXD_ERR_CRC:
3180 case MVPP2_RXD_ERR_OVERRUN:
3181 err_str = "overrun";
3183 case MVPP2_RXD_ERR_RESOURCE:
3184 err_str = "resource";
3187 if (err_str && net_ratelimit())
3188 netdev_err(port->dev,
3189 "bad rx status %08x (%s error), size=%zu\n",
3190 status, err_str, sz);
3193 /* Handle RX checksum offload */
3194 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
3195 struct sk_buff *skb)
3197 if (((status & MVPP2_RXD_L3_IP4) &&
3198 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3199 (status & MVPP2_RXD_L3_IP6))
3200 if (((status & MVPP2_RXD_L4_UDP) ||
3201 (status & MVPP2_RXD_L4_TCP)) &&
3202 (status & MVPP2_RXD_L4_CSUM_OK)) {
3204 skb->ip_summed = CHECKSUM_UNNECESSARY;
3208 skb->ip_summed = CHECKSUM_NONE;
3211 /* Allocate a new skb and add it to BM pool */
3212 static int mvpp2_rx_refill(struct mvpp2_port *port,
3213 struct mvpp2_bm_pool *bm_pool,
3214 struct page_pool *page_pool, int pool)
3216 dma_addr_t dma_addr;
3217 phys_addr_t phys_addr;
3220 buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3221 &dma_addr, &phys_addr, GFP_ATOMIC);
3225 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3230 /* Handle tx checksum */
3231 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3233 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3236 __be16 l3_proto = vlan_get_protocol(skb);
3238 if (l3_proto == htons(ETH_P_IP)) {
3239 struct iphdr *ip4h = ip_hdr(skb);
3241 /* Calculate IPv4 checksum and L4 checksum */
3242 ip_hdr_len = ip4h->ihl;
3243 l4_proto = ip4h->protocol;
3244 } else if (l3_proto == htons(ETH_P_IPV6)) {
3245 struct ipv6hdr *ip6h = ipv6_hdr(skb);
3247 /* Read l4_protocol from one of IPv6 extra headers */
3248 if (skb_network_header_len(skb) > 0)
3249 ip_hdr_len = (skb_network_header_len(skb) >> 2);
3250 l4_proto = ip6h->nexthdr;
3252 return MVPP2_TXD_L4_CSUM_NOT;
3255 return mvpp2_txq_desc_csum(skb_network_offset(skb),
3256 l3_proto, ip_hdr_len, l4_proto);
3259 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3262 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3264 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3265 struct mvpp2_tx_queue *aggr_txq;
3266 struct mvpp2_txq_pcpu *txq_pcpu;
3267 struct mvpp2_tx_queue *txq;
3268 struct netdev_queue *nq;
3270 txq = port->txqs[txq_id];
3271 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3272 nq = netdev_get_tx_queue(port->dev, txq_id);
3273 aggr_txq = &port->priv->aggr_txqs[thread];
3275 txq_pcpu->reserved_num -= nxmit;
3276 txq_pcpu->count += nxmit;
3277 aggr_txq->count += nxmit;
3279 /* Enable transmit */
3281 mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3283 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3284 netif_tx_stop_queue(nq);
3286 /* Finalize TX processing */
3287 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3288 mvpp2_txq_done(port, txq, txq_pcpu);
3292 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3293 struct xdp_frame *xdpf, bool dma_map)
3295 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3296 u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3297 MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3298 enum mvpp2_tx_buf_type buf_type;
3299 struct mvpp2_txq_pcpu *txq_pcpu;
3300 struct mvpp2_tx_queue *aggr_txq;
3301 struct mvpp2_tx_desc *tx_desc;
3302 struct mvpp2_tx_queue *txq;
3303 int ret = MVPP2_XDP_TX;
3304 dma_addr_t dma_addr;
3306 txq = port->txqs[txq_id];
3307 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3308 aggr_txq = &port->priv->aggr_txqs[thread];
3310 /* Check number of available descriptors */
3311 if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3312 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3313 ret = MVPP2_XDP_DROPPED;
3317 /* Get a descriptor for the first part of the packet */
3318 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3319 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3320 mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3323 /* XDP_REDIRECT or AF_XDP */
3324 dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3325 xdpf->len, DMA_TO_DEVICE);
3327 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3328 mvpp2_txq_desc_put(txq);
3329 ret = MVPP2_XDP_DROPPED;
3333 buf_type = MVPP2_TYPE_XDP_NDO;
3336 struct page *page = virt_to_page(xdpf->data);
3338 dma_addr = page_pool_get_dma_addr(page) +
3339 sizeof(*xdpf) + xdpf->headroom;
3340 dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3341 xdpf->len, DMA_BIDIRECTIONAL);
3343 buf_type = MVPP2_TYPE_XDP_TX;
3346 mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3348 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3349 mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3356 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3358 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3359 struct xdp_frame *xdpf;
3363 xdpf = xdp_convert_buff_to_frame(xdp);
3364 if (unlikely(!xdpf))
3365 return MVPP2_XDP_DROPPED;
3367 /* The first of the TX queues are used for XPS,
3368 * the second half for XDP_TX
3370 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3372 ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3373 if (ret == MVPP2_XDP_TX) {
3374 u64_stats_update_begin(&stats->syncp);
3375 stats->tx_bytes += xdpf->len;
3376 stats->tx_packets++;
3378 u64_stats_update_end(&stats->syncp);
3380 mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3382 u64_stats_update_begin(&stats->syncp);
3383 stats->xdp_tx_err++;
3384 u64_stats_update_end(&stats->syncp);
3391 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3392 struct xdp_frame **frames, u32 flags)
3394 struct mvpp2_port *port = netdev_priv(dev);
3395 int i, nxmit_byte = 0, nxmit = num_frame;
3396 struct mvpp2_pcpu_stats *stats;
3400 if (unlikely(test_bit(0, &port->state)))
3403 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3406 /* The first of the TX queues are used for XPS,
3407 * the second half for XDP_TX
3409 txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3411 for (i = 0; i < num_frame; i++) {
3412 ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3413 if (ret == MVPP2_XDP_TX) {
3414 nxmit_byte += frames[i]->len;
3416 xdp_return_frame_rx_napi(frames[i]);
3421 if (likely(nxmit > 0))
3422 mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3424 stats = this_cpu_ptr(port->stats);
3425 u64_stats_update_begin(&stats->syncp);
3426 stats->tx_bytes += nxmit_byte;
3427 stats->tx_packets += nxmit;
3428 stats->xdp_xmit += nxmit;
3429 stats->xdp_xmit_err += num_frame - nxmit;
3430 u64_stats_update_end(&stats->syncp);
3436 mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3437 struct bpf_prog *prog, struct xdp_buff *xdp,
3438 struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
3440 unsigned int len, sync, err;
3444 len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3445 act = bpf_prog_run_xdp(prog, xdp);
3447 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3448 sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3449 sync = max(sync, len);
3454 ret = MVPP2_XDP_PASS;
3457 err = xdp_do_redirect(port->dev, xdp, prog);
3458 if (unlikely(err)) {
3459 ret = MVPP2_XDP_DROPPED;
3460 page = virt_to_head_page(xdp->data);
3461 page_pool_put_page(pp, page, sync, true);
3463 ret = MVPP2_XDP_REDIR;
3464 stats->xdp_redirect++;
3468 ret = mvpp2_xdp_xmit_back(port, xdp);
3469 if (ret != MVPP2_XDP_TX) {
3470 page = virt_to_head_page(xdp->data);
3471 page_pool_put_page(pp, page, sync, true);
3475 bpf_warn_invalid_xdp_action(act);
3478 trace_xdp_exception(port->dev, prog, act);
3481 page = virt_to_head_page(xdp->data);
3482 page_pool_put_page(pp, page, sync, true);
3483 ret = MVPP2_XDP_DROPPED;
3491 /* Main rx processing */
3492 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3493 int rx_todo, struct mvpp2_rx_queue *rxq)
3495 struct net_device *dev = port->dev;
3496 struct mvpp2_pcpu_stats ps = {};
3497 enum dma_data_direction dma_dir;
3498 struct bpf_prog *xdp_prog;
3499 struct xdp_buff xdp;
3506 xdp_prog = READ_ONCE(port->xdp_prog);
3508 /* Get number of received packets and clamp the to-do */
3509 rx_received = mvpp2_rxq_received(port, rxq->id);
3510 if (rx_todo > rx_received)
3511 rx_todo = rx_received;
3513 while (rx_done < rx_todo) {
3514 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3515 struct mvpp2_bm_pool *bm_pool;
3516 struct page_pool *pp = NULL;
3517 struct sk_buff *skb;
3518 unsigned int frag_size;
3519 dma_addr_t dma_addr;
3520 phys_addr_t phys_addr;
3521 u32 rx_status, timestamp;
3522 int pool, rx_bytes, err, ret;
3526 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3527 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3528 rx_bytes -= MVPP2_MH_SIZE;
3529 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3530 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3531 data = (void *)phys_to_virt(phys_addr);
3533 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3534 MVPP2_RXD_BM_POOL_ID_OFFS;
3535 bm_pool = &port->priv->bm_pools[pool];
3537 /* In case of an error, release the requested buffer pointer
3538 * to the Buffer Manager. This request process is controlled
3539 * by the hardware, and the information about the buffer is
3540 * comprised by the RX descriptor.
3542 if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3543 goto err_drop_frame;
3545 if (port->priv->percpu_pools) {
3546 pp = port->priv->page_pool[pool];
3547 dma_dir = page_pool_get_dma_dir(pp);
3549 dma_dir = DMA_FROM_DEVICE;
3552 dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3553 rx_bytes + MVPP2_MH_SIZE,
3556 /* Prefetch header */
3559 if (bm_pool->frag_size > PAGE_SIZE)
3562 frag_size = bm_pool->frag_size;
3565 xdp.data_hard_start = data;
3566 xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
3567 xdp.data_end = xdp.data + rx_bytes;
3568 xdp.frame_sz = PAGE_SIZE;
3570 if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3571 xdp.rxq = &rxq->xdp_rxq_short;
3573 xdp.rxq = &rxq->xdp_rxq_long;
3575 xdp_set_data_meta_invalid(&xdp);
3577 ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
3581 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3583 netdev_err(port->dev, "failed to refill BM pools\n");
3584 goto err_drop_frame;
3588 ps.rx_bytes += rx_bytes;
3593 skb = build_skb(data, frag_size);
3595 netdev_warn(port->dev, "skb build failed\n");
3596 goto err_drop_frame;
3599 /* If we have RX hardware timestamping enabled, grab the
3600 * timestamp from the queue and convert.
3602 if (mvpp22_rx_hwtstamping(port)) {
3603 timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
3604 mvpp22_tai_tstamp(port->priv->tai, timestamp,
3605 skb_hwtstamps(skb));
3608 err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3610 netdev_err(port->dev, "failed to refill BM pools\n");
3611 dev_kfree_skb_any(skb);
3612 goto err_drop_frame;
3616 page_pool_release_page(pp, virt_to_page(data));
3618 dma_unmap_single_attrs(dev->dev.parent, dma_addr,
3619 bm_pool->buf_size, DMA_FROM_DEVICE,
3620 DMA_ATTR_SKIP_CPU_SYNC);
3623 ps.rx_bytes += rx_bytes;
3625 skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3626 skb_put(skb, rx_bytes);
3627 skb->protocol = eth_type_trans(skb, dev);
3628 mvpp2_rx_csum(port, rx_status, skb);
3630 napi_gro_receive(napi, skb);
3634 dev->stats.rx_errors++;
3635 mvpp2_rx_error(port, rx_desc);
3636 /* Return the buffer to the pool */
3637 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3642 if (xdp_ret & MVPP2_XDP_REDIR)
3645 if (ps.rx_packets) {
3646 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3648 u64_stats_update_begin(&stats->syncp);
3649 stats->rx_packets += ps.rx_packets;
3650 stats->rx_bytes += ps.rx_bytes;
3652 stats->xdp_redirect += ps.xdp_redirect;
3653 stats->xdp_pass += ps.xdp_pass;
3654 stats->xdp_drop += ps.xdp_drop;
3655 u64_stats_update_end(&stats->syncp);
3658 /* Update Rx queue management counters */
3660 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3666 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3667 struct mvpp2_tx_desc *desc)
3669 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3670 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3672 dma_addr_t buf_dma_addr =
3673 mvpp2_txdesc_dma_addr_get(port, desc);
3675 mvpp2_txdesc_size_get(port, desc);
3676 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3677 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3678 buf_sz, DMA_TO_DEVICE);
3679 mvpp2_txq_desc_put(txq);
3682 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
3683 struct mvpp2_tx_desc *desc)
3685 /* We only need to clear the low bits */
3686 if (port->priv->hw_version != MVPP21)
3687 desc->pp22.ptp_descriptor &=
3688 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3691 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
3692 struct mvpp2_tx_desc *tx_desc,
3693 struct sk_buff *skb)
3695 struct mvpp2_hwtstamp_queue *queue;
3696 unsigned int mtype, type, i;
3697 struct ptp_header *hdr;
3700 if (port->priv->hw_version == MVPP21 ||
3701 port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
3704 type = ptp_classify_raw(skb);
3708 hdr = ptp_parse_header(skb, type);
3712 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3714 ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
3715 MVPP22_PTP_ACTION_CAPTURE;
3716 queue = &port->tx_hwtstamp_queue[0];
3718 switch (type & PTP_CLASS_VMASK) {
3720 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
3724 ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
3725 mtype = hdr->tsmt & 15;
3726 /* Direct PTP Sync messages to queue 1 */
3728 ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
3729 queue = &port->tx_hwtstamp_queue[1];
3734 /* Take a reference on the skb and insert into our queue */
3736 queue->next = (i + 1) & 31;
3738 dev_kfree_skb_any(queue->skb[i]);
3739 queue->skb[i] = skb_get(skb);
3741 ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
3745 * 6:4 - PTPPacketFormat
3746 * 7 - PTP_CF_WraparoundCheckEn
3747 * 9:8 - IngressTimestampSeconds[1:0]
3749 * 11 - MACTimestampingEn
3750 * 17:12 - PTP_TimestampQueueEntryID[5:0]
3751 * 18 - PTPTimestampQueueSelect
3752 * 19 - UDPChecksumUpdateEn
3753 * 27:20 - TimestampOffset
3754 * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
3755 * NTPTs, Y.1731 - L3 to timestamp entry
3756 * 35:28 - UDP Checksum Offset
3758 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
3760 tx_desc->pp22.ptp_descriptor &=
3761 cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3762 tx_desc->pp22.ptp_descriptor |=
3763 cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
3764 tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
3765 tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
3770 /* Handle tx fragmentation processing */
3771 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3772 struct mvpp2_tx_queue *aggr_txq,
3773 struct mvpp2_tx_queue *txq)
3775 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3776 struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3777 struct mvpp2_tx_desc *tx_desc;
3779 dma_addr_t buf_dma_addr;
3781 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3783 void *addr = skb_frag_address(frag);
3785 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3786 mvpp2_txdesc_clear_ptp(port, tx_desc);
3787 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3788 mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3790 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3791 skb_frag_size(frag),
3793 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3794 mvpp2_txq_desc_put(txq);
3798 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3800 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3801 /* Last descriptor */
3802 mvpp2_txdesc_cmd_set(port, tx_desc,
3804 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3806 /* Descriptor in the middle: Not First, Not Last */
3807 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3808 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3814 /* Release all descriptors that were used to map fragments of
3815 * this packet, as well as the corresponding DMA mappings
3817 for (i = i - 1; i >= 0; i--) {
3818 tx_desc = txq->descs + i;
3819 tx_desc_unmap_put(port, txq, tx_desc);
3825 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3826 struct net_device *dev,
3827 struct mvpp2_tx_queue *txq,
3828 struct mvpp2_tx_queue *aggr_txq,
3829 struct mvpp2_txq_pcpu *txq_pcpu,
3832 struct mvpp2_port *port = netdev_priv(dev);
3833 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3836 mvpp2_txdesc_clear_ptp(port, tx_desc);
3837 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3838 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3840 addr = txq_pcpu->tso_headers_dma +
3841 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3842 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3844 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3846 MVPP2_TXD_PADDING_DISABLE);
3847 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3850 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3851 struct net_device *dev, struct tso_t *tso,
3852 struct mvpp2_tx_queue *txq,
3853 struct mvpp2_tx_queue *aggr_txq,
3854 struct mvpp2_txq_pcpu *txq_pcpu,
3855 int sz, bool left, bool last)
3857 struct mvpp2_port *port = netdev_priv(dev);
3858 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3859 dma_addr_t buf_dma_addr;
3861 mvpp2_txdesc_clear_ptp(port, tx_desc);
3862 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3863 mvpp2_txdesc_size_set(port, tx_desc, sz);
3865 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3867 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3868 mvpp2_txq_desc_put(txq);
3872 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3875 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3877 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3881 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3884 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3888 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3889 struct mvpp2_tx_queue *txq,
3890 struct mvpp2_tx_queue *aggr_txq,
3891 struct mvpp2_txq_pcpu *txq_pcpu)
3893 struct mvpp2_port *port = netdev_priv(dev);
3894 int hdr_sz, i, len, descs = 0;
3897 /* Check number of available descriptors */
3898 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3899 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3900 tso_count_descs(skb)))
3903 hdr_sz = tso_start(skb, &tso);
3905 len = skb->len - hdr_sz;
3907 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3908 char *hdr = txq_pcpu->tso_headers +
3909 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3914 tso_build_hdr(skb, hdr, &tso, left, len == 0);
3915 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3918 int sz = min_t(int, tso.size, left);
3922 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3923 txq_pcpu, sz, left, len == 0))
3925 tso_build_data(skb, &tso, sz);
3932 for (i = descs - 1; i >= 0; i--) {
3933 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3934 tx_desc_unmap_put(port, txq, tx_desc);
3939 /* Main tx processing */
3940 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3942 struct mvpp2_port *port = netdev_priv(dev);
3943 struct mvpp2_tx_queue *txq, *aggr_txq;
3944 struct mvpp2_txq_pcpu *txq_pcpu;
3945 struct mvpp2_tx_desc *tx_desc;
3946 dma_addr_t buf_dma_addr;
3947 unsigned long flags = 0;
3948 unsigned int thread;
3953 thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3955 txq_id = skb_get_queue_mapping(skb);
3956 txq = port->txqs[txq_id];
3957 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3958 aggr_txq = &port->priv->aggr_txqs[thread];
3960 if (test_bit(thread, &port->priv->lock_map))
3961 spin_lock_irqsave(&port->tx_lock[thread], flags);
3963 if (skb_is_gso(skb)) {
3964 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3967 frags = skb_shinfo(skb)->nr_frags + 1;
3969 /* Check number of available descriptors */
3970 if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3971 mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3976 /* Get a descriptor for the first part of the packet */
3977 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3978 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
3979 !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
3980 mvpp2_txdesc_clear_ptp(port, tx_desc);
3981 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3982 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3984 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3985 skb_headlen(skb), DMA_TO_DEVICE);
3986 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3987 mvpp2_txq_desc_put(txq);
3992 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3994 tx_cmd = mvpp2_skb_tx_csum(port, skb);
3997 /* First and Last descriptor */
3998 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3999 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4000 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4002 /* First but not Last */
4003 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4004 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4005 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4007 /* Continue with other skb fragments */
4008 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4009 tx_desc_unmap_put(port, txq, tx_desc);
4016 struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4017 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4019 txq_pcpu->reserved_num -= frags;
4020 txq_pcpu->count += frags;
4021 aggr_txq->count += frags;
4023 /* Enable transmit */
4025 mvpp2_aggr_txq_pend_desc_add(port, frags);
4027 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4028 netif_tx_stop_queue(nq);
4030 u64_stats_update_begin(&stats->syncp);
4031 stats->tx_packets++;
4032 stats->tx_bytes += skb->len;
4033 u64_stats_update_end(&stats->syncp);
4035 dev->stats.tx_dropped++;
4036 dev_kfree_skb_any(skb);
4039 /* Finalize TX processing */
4040 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4041 mvpp2_txq_done(port, txq, txq_pcpu);
4043 /* Set the timer in case not all frags were processed */
4044 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4045 txq_pcpu->count > 0) {
4046 struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4048 if (!port_pcpu->timer_scheduled) {
4049 port_pcpu->timer_scheduled = true;
4050 hrtimer_start(&port_pcpu->tx_done_timer,
4051 MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4052 HRTIMER_MODE_REL_PINNED_SOFT);
4056 if (test_bit(thread, &port->priv->lock_map))
4057 spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4059 return NETDEV_TX_OK;
4062 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4064 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4065 netdev_err(dev, "FCS error\n");
4066 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4067 netdev_err(dev, "rx fifo overrun error\n");
4068 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4069 netdev_err(dev, "tx fifo underrun error\n");
4072 static int mvpp2_poll(struct napi_struct *napi, int budget)
4074 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4076 struct mvpp2_port *port = netdev_priv(napi->dev);
4077 struct mvpp2_queue_vector *qv;
4078 unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4080 qv = container_of(napi, struct mvpp2_queue_vector, napi);
4082 /* Rx/Tx cause register
4084 * Bits 0-15: each bit indicates received packets on the Rx queue
4085 * (bit 0 is for Rx queue 0).
4087 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4088 * (bit 16 is for Tx queue 0).
4090 * Each CPU has its own Rx/Tx cause register
4092 cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4093 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4095 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4097 mvpp2_cause_error(port->dev, cause_misc);
4099 /* Clear the cause register */
4100 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4101 mvpp2_thread_write(port->priv, thread,
4102 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4103 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4106 if (port->has_tx_irqs) {
4107 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4109 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4110 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4114 /* Process RX packets */
4115 cause_rx = cause_rx_tx &
4116 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4117 cause_rx <<= qv->first_rxq;
4118 cause_rx |= qv->pending_cause_rx;
4119 while (cause_rx && budget > 0) {
4121 struct mvpp2_rx_queue *rxq;
4123 rxq = mvpp2_get_rx_queue(port, cause_rx);
4127 count = mvpp2_rx(port, napi, budget, rxq);
4131 /* Clear the bit associated to this Rx queue
4132 * so that next iteration will continue from
4133 * the next Rx queue.
4135 cause_rx &= ~(1 << rxq->logic_rxq);
4141 napi_complete_done(napi, rx_done);
4143 mvpp2_qvec_interrupt_enable(qv);
4145 qv->pending_cause_rx = cause_rx;
4149 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
4153 /* Set the GMAC & XLG MAC in reset */
4154 mvpp2_mac_reset_assert(port);
4156 /* Set the MPCS and XPCS in reset */
4157 mvpp22_pcs_reset_assert(port);
4159 /* comphy reconfiguration */
4160 mvpp22_comphy_init(port);
4162 /* gop reconfiguration */
4163 mvpp22_gop_init(port);
4165 mvpp22_pcs_reset_deassert(port);
4167 if (mvpp2_port_supports_xlg(port)) {
4168 ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4169 ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4171 if (mvpp2_is_xlg(port->phy_interface))
4172 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4174 ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4176 writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4179 if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
4180 mvpp2_xlg_max_rx_size_set(port);
4182 mvpp2_gmac_max_rx_size_set(port);
4185 /* Set hw internals when starting port */
4186 static void mvpp2_start_dev(struct mvpp2_port *port)
4190 mvpp2_txp_max_tx_size_set(port);
4192 for (i = 0; i < port->nqvecs; i++)
4193 napi_enable(&port->qvecs[i].napi);
4195 /* Enable interrupts on all threads */
4196 mvpp2_interrupts_enable(port);
4198 if (port->priv->hw_version == MVPP22)
4199 mvpp22_mode_reconfigure(port);
4201 if (port->phylink) {
4202 phylink_start(port->phylink);
4204 mvpp2_acpi_start(port);
4207 netif_tx_start_all_queues(port->dev);
4209 clear_bit(0, &port->state);
4212 /* Set hw internals when stopping port */
4213 static void mvpp2_stop_dev(struct mvpp2_port *port)
4217 set_bit(0, &port->state);
4219 /* Disable interrupts on all threads */
4220 mvpp2_interrupts_disable(port);
4222 for (i = 0; i < port->nqvecs; i++)
4223 napi_disable(&port->qvecs[i].napi);
4226 phylink_stop(port->phylink);
4227 phy_power_off(port->comphy);
4230 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4231 struct ethtool_ringparam *ring)
4233 u16 new_rx_pending = ring->rx_pending;
4234 u16 new_tx_pending = ring->tx_pending;
4236 if (ring->rx_pending == 0 || ring->tx_pending == 0)
4239 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4240 new_rx_pending = MVPP2_MAX_RXD_MAX;
4241 else if (!IS_ALIGNED(ring->rx_pending, 16))
4242 new_rx_pending = ALIGN(ring->rx_pending, 16);
4244 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4245 new_tx_pending = MVPP2_MAX_TXD_MAX;
4246 else if (!IS_ALIGNED(ring->tx_pending, 32))
4247 new_tx_pending = ALIGN(ring->tx_pending, 32);
4249 /* The Tx ring size cannot be smaller than the minimum number of
4250 * descriptors needed for TSO.
4252 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4253 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4255 if (ring->rx_pending != new_rx_pending) {
4256 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4257 ring->rx_pending, new_rx_pending);
4258 ring->rx_pending = new_rx_pending;
4261 if (ring->tx_pending != new_tx_pending) {
4262 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4263 ring->tx_pending, new_tx_pending);
4264 ring->tx_pending = new_tx_pending;
4270 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4272 u32 mac_addr_l, mac_addr_m, mac_addr_h;
4274 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4275 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4276 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4277 addr[0] = (mac_addr_h >> 24) & 0xFF;
4278 addr[1] = (mac_addr_h >> 16) & 0xFF;
4279 addr[2] = (mac_addr_h >> 8) & 0xFF;
4280 addr[3] = mac_addr_h & 0xFF;
4281 addr[4] = mac_addr_m & 0xFF;
4282 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4285 static int mvpp2_irqs_init(struct mvpp2_port *port)
4289 for (i = 0; i < port->nqvecs; i++) {
4290 struct mvpp2_queue_vector *qv = port->qvecs + i;
4292 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4293 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4299 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4302 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4306 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4309 for_each_present_cpu(cpu) {
4310 if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4312 cpumask_set_cpu(cpu, qv->mask);
4315 irq_set_affinity_hint(qv->irq, qv->mask);
4321 for (i = 0; i < port->nqvecs; i++) {
4322 struct mvpp2_queue_vector *qv = port->qvecs + i;
4324 irq_set_affinity_hint(qv->irq, NULL);
4327 free_irq(qv->irq, qv);
4333 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4337 for (i = 0; i < port->nqvecs; i++) {
4338 struct mvpp2_queue_vector *qv = port->qvecs + i;
4340 irq_set_affinity_hint(qv->irq, NULL);
4343 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4344 free_irq(qv->irq, qv);
4348 static bool mvpp22_rss_is_supported(void)
4350 return queue_mode == MVPP2_QDIST_MULTI_MODE;
4353 static int mvpp2_open(struct net_device *dev)
4355 struct mvpp2_port *port = netdev_priv(dev);
4356 struct mvpp2 *priv = port->priv;
4357 unsigned char mac_bcast[ETH_ALEN] = {
4358 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4362 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4364 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4367 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4369 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4372 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4374 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4377 err = mvpp2_prs_def_flow(port);
4379 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4383 /* Allocate the Rx/Tx queues */
4384 err = mvpp2_setup_rxqs(port);
4386 netdev_err(port->dev, "cannot allocate Rx queues\n");
4390 err = mvpp2_setup_txqs(port);
4392 netdev_err(port->dev, "cannot allocate Tx queues\n");
4393 goto err_cleanup_rxqs;
4396 err = mvpp2_irqs_init(port);
4398 netdev_err(port->dev, "cannot init IRQs\n");
4399 goto err_cleanup_txqs;
4402 /* Phylink isn't supported yet in ACPI mode */
4403 if (port->of_node) {
4404 err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
4406 netdev_err(port->dev, "could not attach PHY (%d)\n",
4414 if (priv->hw_version == MVPP22 && port->port_irq) {
4415 err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4418 netdev_err(port->dev,
4419 "cannot request port link/ptp IRQ %d\n",
4424 mvpp22_gop_setup_irq(port);
4426 /* In default link is down */
4427 netif_carrier_off(port->dev);
4435 netdev_err(port->dev,
4436 "invalid configuration: no dt or link IRQ");
4441 /* Unmask interrupts on all CPUs */
4442 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4443 mvpp2_shared_interrupt_mask_unmask(port, false);
4445 mvpp2_start_dev(port);
4447 /* Start hardware statistics gathering */
4448 queue_delayed_work(priv->stats_queue, &port->stats_work,
4449 MVPP2_MIB_COUNTERS_STATS_DELAY);
4454 mvpp2_irqs_deinit(port);
4456 mvpp2_cleanup_txqs(port);
4458 mvpp2_cleanup_rxqs(port);
4462 static int mvpp2_stop(struct net_device *dev)
4464 struct mvpp2_port *port = netdev_priv(dev);
4465 struct mvpp2_port_pcpu *port_pcpu;
4466 unsigned int thread;
4468 mvpp2_stop_dev(port);
4470 /* Mask interrupts on all threads */
4471 on_each_cpu(mvpp2_interrupts_mask, port, 1);
4472 mvpp2_shared_interrupt_mask_unmask(port, true);
4475 phylink_disconnect_phy(port->phylink);
4477 free_irq(port->port_irq, port);
4479 mvpp2_irqs_deinit(port);
4480 if (!port->has_tx_irqs) {
4481 for (thread = 0; thread < port->priv->nthreads; thread++) {
4482 port_pcpu = per_cpu_ptr(port->pcpu, thread);
4484 hrtimer_cancel(&port_pcpu->tx_done_timer);
4485 port_pcpu->timer_scheduled = false;
4488 mvpp2_cleanup_rxqs(port);
4489 mvpp2_cleanup_txqs(port);
4491 cancel_delayed_work_sync(&port->stats_work);
4493 mvpp2_mac_reset_assert(port);
4494 mvpp22_pcs_reset_assert(port);
4499 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4500 struct netdev_hw_addr_list *list)
4502 struct netdev_hw_addr *ha;
4505 netdev_hw_addr_list_for_each(ha, list) {
4506 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4514 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4516 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4517 mvpp2_prs_vid_enable_filtering(port);
4519 mvpp2_prs_vid_disable_filtering(port);
4521 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4522 MVPP2_PRS_L2_UNI_CAST, enable);
4524 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4525 MVPP2_PRS_L2_MULTI_CAST, enable);
4528 static void mvpp2_set_rx_mode(struct net_device *dev)
4530 struct mvpp2_port *port = netdev_priv(dev);
4532 /* Clear the whole UC and MC list */
4533 mvpp2_prs_mac_del_all(port);
4535 if (dev->flags & IFF_PROMISC) {
4536 mvpp2_set_rx_promisc(port, true);
4540 mvpp2_set_rx_promisc(port, false);
4542 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4543 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4544 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4545 MVPP2_PRS_L2_UNI_CAST, true);
4547 if (dev->flags & IFF_ALLMULTI) {
4548 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4549 MVPP2_PRS_L2_MULTI_CAST, true);
4553 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4554 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4555 mvpp2_prs_mac_promisc_set(port->priv, port->id,
4556 MVPP2_PRS_L2_MULTI_CAST, true);
4559 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4561 const struct sockaddr *addr = p;
4564 if (!is_valid_ether_addr(addr->sa_data))
4565 return -EADDRNOTAVAIL;
4567 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4569 /* Reconfigure parser accept the original MAC address */
4570 mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4571 netdev_err(dev, "failed to change MAC address\n");
4576 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4577 * then bring up again all ports.
4579 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4581 int numbufs = MVPP2_BM_POOLS_NUM, i;
4582 struct mvpp2_port *port = NULL;
4583 bool status[MVPP2_MAX_PORTS];
4585 for (i = 0; i < priv->port_count; i++) {
4586 port = priv->port_list[i];
4587 status[i] = netif_running(port->dev);
4589 mvpp2_stop(port->dev);
4592 /* nrxqs is the same for all ports */
4593 if (priv->percpu_pools)
4594 numbufs = port->nrxqs * 2;
4596 for (i = 0; i < numbufs; i++)
4597 mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4599 devm_kfree(port->dev->dev.parent, priv->bm_pools);
4600 priv->percpu_pools = percpu;
4601 mvpp2_bm_init(port->dev->dev.parent, priv);
4603 for (i = 0; i < priv->port_count; i++) {
4604 port = priv->port_list[i];
4605 mvpp2_swf_bm_pool_init(port);
4607 mvpp2_open(port->dev);
4613 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
4615 struct mvpp2_port *port = netdev_priv(dev);
4616 bool running = netif_running(dev);
4617 struct mvpp2 *priv = port->priv;
4620 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
4621 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
4622 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
4623 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
4626 if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
4627 if (port->xdp_prog) {
4628 netdev_err(dev, "Jumbo frames are not supported with XDP\n");
4631 if (priv->percpu_pools) {
4632 netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
4633 mvpp2_bm_switch_buffers(priv, false);
4639 for (i = 0; i < priv->port_count; i++)
4640 if (priv->port_list[i] != port &&
4641 MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
4642 MVPP2_BM_LONG_PKT_SIZE) {
4647 /* No port is using jumbo frames */
4649 dev_info(port->dev->dev.parent,
4650 "all ports have a low MTU, switching to per-cpu buffers");
4651 mvpp2_bm_switch_buffers(priv, true);
4656 mvpp2_stop_dev(port);
4658 err = mvpp2_bm_update_mtu(dev, mtu);
4660 netdev_err(dev, "failed to change MTU\n");
4661 /* Reconfigure BM to the original MTU */
4662 mvpp2_bm_update_mtu(dev, dev->mtu);
4664 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4668 mvpp2_start_dev(port);
4669 mvpp2_egress_enable(port);
4670 mvpp2_ingress_enable(port);
4676 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
4678 enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
4679 struct mvpp2 *priv = port->priv;
4682 if (!priv->percpu_pools)
4685 if (!priv->page_pool[0])
4688 for (i = 0; i < priv->port_count; i++) {
4689 port = priv->port_list[i];
4690 if (port->xdp_prog) {
4691 dma_dir = DMA_BIDIRECTIONAL;
4696 /* All pools are equal in terms of DMA direction */
4697 if (priv->page_pool[0]->p.dma_dir != dma_dir)
4698 err = mvpp2_bm_switch_buffers(priv, true);
4704 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4706 struct mvpp2_port *port = netdev_priv(dev);
4710 for_each_possible_cpu(cpu) {
4711 struct mvpp2_pcpu_stats *cpu_stats;
4717 cpu_stats = per_cpu_ptr(port->stats, cpu);
4719 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4720 rx_packets = cpu_stats->rx_packets;
4721 rx_bytes = cpu_stats->rx_bytes;
4722 tx_packets = cpu_stats->tx_packets;
4723 tx_bytes = cpu_stats->tx_bytes;
4724 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4726 stats->rx_packets += rx_packets;
4727 stats->rx_bytes += rx_bytes;
4728 stats->tx_packets += tx_packets;
4729 stats->tx_bytes += tx_bytes;
4732 stats->rx_errors = dev->stats.rx_errors;
4733 stats->rx_dropped = dev->stats.rx_dropped;
4734 stats->tx_dropped = dev->stats.tx_dropped;
4737 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4739 struct hwtstamp_config config;
4743 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4749 if (config.tx_type != HWTSTAMP_TX_OFF &&
4750 config.tx_type != HWTSTAMP_TX_ON)
4753 ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
4756 if (config.tx_type != HWTSTAMP_TX_OFF) {
4757 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
4758 int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
4759 MVPP22_PTP_INT_MASK_QUEUE0;
4762 /* It seems we must also release the TX reset when enabling the TSU */
4763 if (config.rx_filter != HWTSTAMP_FILTER_NONE)
4764 gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
4765 MVPP22_PTP_GCR_TX_RESET;
4767 if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
4768 mvpp22_tai_start(port->priv->tai);
4770 if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
4771 config.rx_filter = HWTSTAMP_FILTER_ALL;
4772 mvpp2_modify(ptp + MVPP22_PTP_GCR,
4773 MVPP22_PTP_GCR_RX_RESET |
4774 MVPP22_PTP_GCR_TX_RESET |
4775 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4776 port->rx_hwtstamp = true;
4778 port->rx_hwtstamp = false;
4779 mvpp2_modify(ptp + MVPP22_PTP_GCR,
4780 MVPP22_PTP_GCR_RX_RESET |
4781 MVPP22_PTP_GCR_TX_RESET |
4782 MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4785 mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
4786 MVPP22_PTP_INT_MASK_QUEUE1 |
4787 MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
4789 if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
4790 mvpp22_tai_stop(port->priv->tai);
4792 port->tx_hwtstamp_type = config.tx_type;
4794 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4800 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4802 struct hwtstamp_config config;
4804 memset(&config, 0, sizeof(config));
4806 config.tx_type = port->tx_hwtstamp_type;
4807 config.rx_filter = port->rx_hwtstamp ?
4808 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
4810 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4816 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
4817 struct ethtool_ts_info *info)
4819 struct mvpp2_port *port = netdev_priv(dev);
4821 if (!port->hwtstamp)
4824 info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
4825 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4826 SOF_TIMESTAMPING_RX_SOFTWARE |
4827 SOF_TIMESTAMPING_SOFTWARE |
4828 SOF_TIMESTAMPING_TX_HARDWARE |
4829 SOF_TIMESTAMPING_RX_HARDWARE |
4830 SOF_TIMESTAMPING_RAW_HARDWARE;
4831 info->tx_types = BIT(HWTSTAMP_TX_OFF) |
4832 BIT(HWTSTAMP_TX_ON);
4833 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
4834 BIT(HWTSTAMP_FILTER_ALL);
4839 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4841 struct mvpp2_port *port = netdev_priv(dev);
4846 return mvpp2_set_ts_config(port, ifr);
4851 return mvpp2_get_ts_config(port, ifr);
4858 return phylink_mii_ioctl(port->phylink, ifr, cmd);
4861 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4863 struct mvpp2_port *port = netdev_priv(dev);
4866 ret = mvpp2_prs_vid_entry_add(port, vid);
4868 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
4869 MVPP2_PRS_VLAN_FILT_MAX - 1);
4873 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4875 struct mvpp2_port *port = netdev_priv(dev);
4877 mvpp2_prs_vid_entry_remove(port, vid);
4881 static int mvpp2_set_features(struct net_device *dev,
4882 netdev_features_t features)
4884 netdev_features_t changed = dev->features ^ features;
4885 struct mvpp2_port *port = netdev_priv(dev);
4887 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4888 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4889 mvpp2_prs_vid_enable_filtering(port);
4891 /* Invalidate all registered VID filters for this
4894 mvpp2_prs_vid_remove_all(port);
4896 mvpp2_prs_vid_disable_filtering(port);
4900 if (changed & NETIF_F_RXHASH) {
4901 if (features & NETIF_F_RXHASH)
4902 mvpp22_port_rss_enable(port);
4904 mvpp22_port_rss_disable(port);
4910 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
4912 struct bpf_prog *prog = bpf->prog, *old_prog;
4913 bool running = netif_running(port->dev);
4914 bool reset = !prog != !port->xdp_prog;
4916 if (port->dev->mtu > ETH_DATA_LEN) {
4917 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
4921 if (!port->priv->percpu_pools) {
4922 NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
4926 if (port->ntxqs < num_possible_cpus() * 2) {
4927 NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
4931 /* device is up and bpf is added/removed, must setup the RX queues */
4932 if (running && reset)
4933 mvpp2_stop(port->dev);
4935 old_prog = xchg(&port->xdp_prog, prog);
4937 bpf_prog_put(old_prog);
4939 /* bpf is just replaced, RXQ and MTU are already setup */
4943 /* device was up, restore the link */
4945 mvpp2_open(port->dev);
4947 /* Check Page Pool DMA Direction */
4948 mvpp2_check_pagepool_dma(port);
4953 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4955 struct mvpp2_port *port = netdev_priv(dev);
4957 switch (xdp->command) {
4958 case XDP_SETUP_PROG:
4959 return mvpp2_xdp_setup(port, xdp);
4965 /* Ethtool methods */
4967 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4969 struct mvpp2_port *port = netdev_priv(dev);
4974 return phylink_ethtool_nway_reset(port->phylink);
4977 /* Set interrupt coalescing for ethtools */
4978 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4979 struct ethtool_coalesce *c)
4981 struct mvpp2_port *port = netdev_priv(dev);
4984 for (queue = 0; queue < port->nrxqs; queue++) {
4985 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4987 rxq->time_coal = c->rx_coalesce_usecs;
4988 rxq->pkts_coal = c->rx_max_coalesced_frames;
4989 mvpp2_rx_pkts_coal_set(port, rxq);
4990 mvpp2_rx_time_coal_set(port, rxq);
4993 if (port->has_tx_irqs) {
4994 port->tx_time_coal = c->tx_coalesce_usecs;
4995 mvpp2_tx_time_coal_set(port);
4998 for (queue = 0; queue < port->ntxqs; queue++) {
4999 struct mvpp2_tx_queue *txq = port->txqs[queue];
5001 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5003 if (port->has_tx_irqs)
5004 mvpp2_tx_pkts_coal_set(port, txq);
5010 /* get coalescing for ethtools */
5011 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5012 struct ethtool_coalesce *c)
5014 struct mvpp2_port *port = netdev_priv(dev);
5016 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5017 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5018 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5019 c->tx_coalesce_usecs = port->tx_time_coal;
5023 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5024 struct ethtool_drvinfo *drvinfo)
5026 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5027 sizeof(drvinfo->driver));
5028 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5029 sizeof(drvinfo->version));
5030 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5031 sizeof(drvinfo->bus_info));
5034 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5035 struct ethtool_ringparam *ring)
5037 struct mvpp2_port *port = netdev_priv(dev);
5039 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5040 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5041 ring->rx_pending = port->rx_ring_size;
5042 ring->tx_pending = port->tx_ring_size;
5045 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5046 struct ethtool_ringparam *ring)
5048 struct mvpp2_port *port = netdev_priv(dev);
5049 u16 prev_rx_ring_size = port->rx_ring_size;
5050 u16 prev_tx_ring_size = port->tx_ring_size;
5053 err = mvpp2_check_ringparam_valid(dev, ring);
5057 if (!netif_running(dev)) {
5058 port->rx_ring_size = ring->rx_pending;
5059 port->tx_ring_size = ring->tx_pending;
5063 /* The interface is running, so we have to force a
5064 * reallocation of the queues
5066 mvpp2_stop_dev(port);
5067 mvpp2_cleanup_rxqs(port);
5068 mvpp2_cleanup_txqs(port);
5070 port->rx_ring_size = ring->rx_pending;
5071 port->tx_ring_size = ring->tx_pending;
5073 err = mvpp2_setup_rxqs(port);
5075 /* Reallocate Rx queues with the original ring size */
5076 port->rx_ring_size = prev_rx_ring_size;
5077 ring->rx_pending = prev_rx_ring_size;
5078 err = mvpp2_setup_rxqs(port);
5082 err = mvpp2_setup_txqs(port);
5084 /* Reallocate Tx queues with the original ring size */
5085 port->tx_ring_size = prev_tx_ring_size;
5086 ring->tx_pending = prev_tx_ring_size;
5087 err = mvpp2_setup_txqs(port);
5089 goto err_clean_rxqs;
5092 mvpp2_start_dev(port);
5093 mvpp2_egress_enable(port);
5094 mvpp2_ingress_enable(port);
5099 mvpp2_cleanup_rxqs(port);
5101 netdev_err(dev, "failed to change ring parameters");
5105 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5106 struct ethtool_pauseparam *pause)
5108 struct mvpp2_port *port = netdev_priv(dev);
5113 phylink_ethtool_get_pauseparam(port->phylink, pause);
5116 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5117 struct ethtool_pauseparam *pause)
5119 struct mvpp2_port *port = netdev_priv(dev);
5124 return phylink_ethtool_set_pauseparam(port->phylink, pause);
5127 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5128 struct ethtool_link_ksettings *cmd)
5130 struct mvpp2_port *port = netdev_priv(dev);
5135 return phylink_ethtool_ksettings_get(port->phylink, cmd);
5138 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5139 const struct ethtool_link_ksettings *cmd)
5141 struct mvpp2_port *port = netdev_priv(dev);
5146 return phylink_ethtool_ksettings_set(port->phylink, cmd);
5149 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5150 struct ethtool_rxnfc *info, u32 *rules)
5152 struct mvpp2_port *port = netdev_priv(dev);
5153 int ret = 0, i, loc = 0;
5155 if (!mvpp22_rss_is_supported())
5158 switch (info->cmd) {
5160 ret = mvpp2_ethtool_rxfh_get(port, info);
5162 case ETHTOOL_GRXRINGS:
5163 info->data = port->nrxqs;
5165 case ETHTOOL_GRXCLSRLCNT:
5166 info->rule_cnt = port->n_rfs_rules;
5168 case ETHTOOL_GRXCLSRULE:
5169 ret = mvpp2_ethtool_cls_rule_get(port, info);
5171 case ETHTOOL_GRXCLSRLALL:
5172 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5173 if (port->rfs_rules[i])
5184 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5185 struct ethtool_rxnfc *info)
5187 struct mvpp2_port *port = netdev_priv(dev);
5190 if (!mvpp22_rss_is_supported())
5193 switch (info->cmd) {
5195 ret = mvpp2_ethtool_rxfh_set(port, info);
5197 case ETHTOOL_SRXCLSRLINS:
5198 ret = mvpp2_ethtool_cls_rule_ins(port, info);
5200 case ETHTOOL_SRXCLSRLDEL:
5201 ret = mvpp2_ethtool_cls_rule_del(port, info);
5209 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5211 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
5214 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
5217 struct mvpp2_port *port = netdev_priv(dev);
5220 if (!mvpp22_rss_is_supported())
5224 ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
5227 *hfunc = ETH_RSS_HASH_CRC32;
5232 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
5233 const u8 *key, const u8 hfunc)
5235 struct mvpp2_port *port = netdev_priv(dev);
5238 if (!mvpp22_rss_is_supported())
5241 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5248 ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
5253 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5254 u8 *key, u8 *hfunc, u32 rss_context)
5256 struct mvpp2_port *port = netdev_priv(dev);
5259 if (!mvpp22_rss_is_supported())
5261 if (rss_context >= MVPP22_N_RSS_TABLES)
5265 *hfunc = ETH_RSS_HASH_CRC32;
5268 ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5273 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5274 const u32 *indir, const u8 *key,
5275 const u8 hfunc, u32 *rss_context,
5278 struct mvpp2_port *port = netdev_priv(dev);
5281 if (!mvpp22_rss_is_supported())
5284 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5291 return mvpp22_port_rss_ctx_delete(port, *rss_context);
5293 if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5294 ret = mvpp22_port_rss_ctx_create(port, rss_context);
5299 return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5303 static const struct net_device_ops mvpp2_netdev_ops = {
5304 .ndo_open = mvpp2_open,
5305 .ndo_stop = mvpp2_stop,
5306 .ndo_start_xmit = mvpp2_tx,
5307 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5308 .ndo_set_mac_address = mvpp2_set_mac_address,
5309 .ndo_change_mtu = mvpp2_change_mtu,
5310 .ndo_get_stats64 = mvpp2_get_stats64,
5311 .ndo_do_ioctl = mvpp2_ioctl,
5312 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
5313 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
5314 .ndo_set_features = mvpp2_set_features,
5315 .ndo_bpf = mvpp2_xdp,
5316 .ndo_xdp_xmit = mvpp2_xdp_xmit,
5319 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5320 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5321 ETHTOOL_COALESCE_MAX_FRAMES,
5322 .nway_reset = mvpp2_ethtool_nway_reset,
5323 .get_link = ethtool_op_get_link,
5324 .get_ts_info = mvpp2_ethtool_get_ts_info,
5325 .set_coalesce = mvpp2_ethtool_set_coalesce,
5326 .get_coalesce = mvpp2_ethtool_get_coalesce,
5327 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5328 .get_ringparam = mvpp2_ethtool_get_ringparam,
5329 .set_ringparam = mvpp2_ethtool_set_ringparam,
5330 .get_strings = mvpp2_ethtool_get_strings,
5331 .get_ethtool_stats = mvpp2_ethtool_get_stats,
5332 .get_sset_count = mvpp2_ethtool_get_sset_count,
5333 .get_pauseparam = mvpp2_ethtool_get_pause_param,
5334 .set_pauseparam = mvpp2_ethtool_set_pause_param,
5335 .get_link_ksettings = mvpp2_ethtool_get_link_ksettings,
5336 .set_link_ksettings = mvpp2_ethtool_set_link_ksettings,
5337 .get_rxnfc = mvpp2_ethtool_get_rxnfc,
5338 .set_rxnfc = mvpp2_ethtool_set_rxnfc,
5339 .get_rxfh_indir_size = mvpp2_ethtool_get_rxfh_indir_size,
5340 .get_rxfh = mvpp2_ethtool_get_rxfh,
5341 .set_rxfh = mvpp2_ethtool_set_rxfh,
5342 .get_rxfh_context = mvpp2_ethtool_get_rxfh_context,
5343 .set_rxfh_context = mvpp2_ethtool_set_rxfh_context,
5346 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5347 * had a single IRQ defined per-port.
5349 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5350 struct device_node *port_node)
5352 struct mvpp2_queue_vector *v = &port->qvecs[0];
5355 v->nrxqs = port->nrxqs;
5356 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5357 v->sw_thread_id = 0;
5358 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5360 v->irq = irq_of_parse_and_map(port_node, 0);
5363 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5371 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5372 struct device_node *port_node)
5374 struct mvpp2 *priv = port->priv;
5375 struct mvpp2_queue_vector *v;
5378 switch (queue_mode) {
5379 case MVPP2_QDIST_SINGLE_MODE:
5380 port->nqvecs = priv->nthreads + 1;
5382 case MVPP2_QDIST_MULTI_MODE:
5383 port->nqvecs = priv->nthreads;
5387 for (i = 0; i < port->nqvecs; i++) {
5390 v = port->qvecs + i;
5393 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5394 v->sw_thread_id = i;
5395 v->sw_thread_mask = BIT(i);
5397 if (port->flags & MVPP2_F_DT_COMPAT)
5398 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5400 snprintf(irqname, sizeof(irqname), "hif%d", i);
5402 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5405 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5406 i == (port->nqvecs - 1)) {
5408 v->nrxqs = port->nrxqs;
5409 v->type = MVPP2_QUEUE_VECTOR_SHARED;
5411 if (port->flags & MVPP2_F_DT_COMPAT)
5412 strncpy(irqname, "rx-shared", sizeof(irqname));
5416 v->irq = of_irq_get_byname(port_node, irqname);
5418 v->irq = fwnode_irq_get(port->fwnode, i);
5424 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5431 for (i = 0; i < port->nqvecs; i++)
5432 irq_dispose_mapping(port->qvecs[i].irq);
5436 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5437 struct device_node *port_node)
5439 if (port->has_tx_irqs)
5440 return mvpp2_multi_queue_vectors_init(port, port_node);
5442 return mvpp2_simple_queue_vectors_init(port, port_node);
5445 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5449 for (i = 0; i < port->nqvecs; i++)
5450 irq_dispose_mapping(port->qvecs[i].irq);
5453 /* Configure Rx queue group interrupt for this port */
5454 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5456 struct mvpp2 *priv = port->priv;
5460 if (priv->hw_version == MVPP21) {
5461 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5466 /* Handle the more complicated PPv2.2 case */
5467 for (i = 0; i < port->nqvecs; i++) {
5468 struct mvpp2_queue_vector *qv = port->qvecs + i;
5473 val = qv->sw_thread_id;
5474 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5475 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5477 val = qv->first_rxq;
5478 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5479 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5483 /* Initialize port HW */
5484 static int mvpp2_port_init(struct mvpp2_port *port)
5486 struct device *dev = port->dev->dev.parent;
5487 struct mvpp2 *priv = port->priv;
5488 struct mvpp2_txq_pcpu *txq_pcpu;
5489 unsigned int thread;
5492 /* Checks for hardware constraints */
5493 if (port->first_rxq + port->nrxqs >
5494 MVPP2_MAX_PORTS * priv->max_port_rxqs)
5497 if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5501 mvpp2_egress_disable(port);
5502 mvpp2_port_disable(port);
5504 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5506 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5511 /* Associate physical Tx queues to this port and initialize.
5512 * The mapping is predefined.
5514 for (queue = 0; queue < port->ntxqs; queue++) {
5515 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5516 struct mvpp2_tx_queue *txq;
5518 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5521 goto err_free_percpu;
5524 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5527 goto err_free_percpu;
5530 txq->id = queue_phy_id;
5531 txq->log_id = queue;
5532 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5533 for (thread = 0; thread < priv->nthreads; thread++) {
5534 txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5535 txq_pcpu->thread = thread;
5538 port->txqs[queue] = txq;
5541 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5545 goto err_free_percpu;
5548 /* Allocate and initialize Rx queue for this port */
5549 for (queue = 0; queue < port->nrxqs; queue++) {
5550 struct mvpp2_rx_queue *rxq;
5552 /* Map physical Rx queue to port's logical Rx queue */
5553 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5556 goto err_free_percpu;
5558 /* Map this Rx queue to a physical queue */
5559 rxq->id = port->first_rxq + queue;
5560 rxq->port = port->id;
5561 rxq->logic_rxq = queue;
5563 port->rxqs[queue] = rxq;
5566 mvpp2_rx_irqs_setup(port);
5568 /* Create Rx descriptor rings */
5569 for (queue = 0; queue < port->nrxqs; queue++) {
5570 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5572 rxq->size = port->rx_ring_size;
5573 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5574 rxq->time_coal = MVPP2_RX_COAL_USEC;
5577 mvpp2_ingress_disable(port);
5579 /* Port default configuration */
5580 mvpp2_defaults_set(port);
5582 /* Port's classifier configuration */
5583 mvpp2_cls_oversize_rxq_set(port);
5584 mvpp2_cls_port_config(port);
5586 if (mvpp22_rss_is_supported())
5587 mvpp22_port_rss_init(port);
5589 /* Provide an initial Rx packet size */
5590 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5592 /* Initialize pools for swf */
5593 err = mvpp2_swf_bm_pool_init(port);
5595 goto err_free_percpu;
5597 /* Clear all port stats */
5598 mvpp2_read_stats(port);
5599 memset(port->ethtool_stats, 0,
5600 MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
5605 for (queue = 0; queue < port->ntxqs; queue++) {
5606 if (!port->txqs[queue])
5608 free_percpu(port->txqs[queue]->pcpu);
5613 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
5614 unsigned long *flags)
5616 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
5620 for (i = 0; i < 5; i++)
5621 if (of_property_match_string(port_node, "interrupt-names",
5625 *flags |= MVPP2_F_DT_COMPAT;
5629 /* Checks if the port dt description has the required Tx interrupts:
5630 * - PPv2.1: there are no such interrupts.
5632 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
5633 * - The new ones have: "hifX" with X in [0..8]
5635 * All those variants are supported to keep the backward compatibility.
5637 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
5638 struct device_node *port_node,
5639 unsigned long *flags)
5648 if (priv->hw_version == MVPP21)
5651 if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
5654 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5655 snprintf(name, 5, "hif%d", i);
5656 if (of_property_match_string(port_node, "interrupt-names",
5664 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
5665 struct fwnode_handle *fwnode,
5668 struct mvpp2_port *port = netdev_priv(dev);
5669 char hw_mac_addr[ETH_ALEN] = {0};
5670 char fw_mac_addr[ETH_ALEN];
5672 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
5673 *mac_from = "firmware node";
5674 ether_addr_copy(dev->dev_addr, fw_mac_addr);
5678 if (priv->hw_version == MVPP21) {
5679 mvpp21_get_mac_address(port, hw_mac_addr);
5680 if (is_valid_ether_addr(hw_mac_addr)) {
5681 *mac_from = "hardware";
5682 ether_addr_copy(dev->dev_addr, hw_mac_addr);
5687 *mac_from = "random";
5688 eth_hw_addr_random(dev);
5691 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
5693 return container_of(config, struct mvpp2_port, phylink_config);
5696 static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
5698 return container_of(pcs, struct mvpp2_port, phylink_pcs);
5701 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
5702 struct phylink_link_state *state)
5704 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5707 state->speed = SPEED_10000;
5709 state->an_complete = 1;
5711 val = readl(port->base + MVPP22_XLG_STATUS);
5712 state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
5715 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5716 if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
5717 state->pause |= MLO_PAUSE_TX;
5718 if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
5719 state->pause |= MLO_PAUSE_RX;
5722 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
5724 phy_interface_t interface,
5725 const unsigned long *advertising,
5726 bool permit_pause_to_mac)
5731 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
5732 .pcs_get_state = mvpp2_xlg_pcs_get_state,
5733 .pcs_config = mvpp2_xlg_pcs_config,
5736 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
5737 struct phylink_link_state *state)
5739 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5742 val = readl(port->base + MVPP2_GMAC_STATUS0);
5744 state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
5745 state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
5746 state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
5748 switch (port->phy_interface) {
5749 case PHY_INTERFACE_MODE_1000BASEX:
5750 state->speed = SPEED_1000;
5752 case PHY_INTERFACE_MODE_2500BASEX:
5753 state->speed = SPEED_2500;
5756 if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
5757 state->speed = SPEED_1000;
5758 else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
5759 state->speed = SPEED_100;
5761 state->speed = SPEED_10;
5765 if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
5766 state->pause |= MLO_PAUSE_RX;
5767 if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
5768 state->pause |= MLO_PAUSE_TX;
5771 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
5772 phy_interface_t interface,
5773 const unsigned long *advertising,
5774 bool permit_pause_to_mac)
5776 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5777 u32 mask, val, an, old_an, changed;
5779 mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5780 MVPP2_GMAC_IN_BAND_AUTONEG |
5781 MVPP2_GMAC_AN_SPEED_EN |
5782 MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5783 MVPP2_GMAC_AN_DUPLEX_EN;
5785 if (phylink_autoneg_inband(mode)) {
5786 mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
5787 MVPP2_GMAC_CONFIG_GMII_SPEED |
5788 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5789 val = MVPP2_GMAC_IN_BAND_AUTONEG;
5791 if (interface == PHY_INTERFACE_MODE_SGMII) {
5792 /* SGMII mode receives the speed and duplex from PHY */
5793 val |= MVPP2_GMAC_AN_SPEED_EN |
5794 MVPP2_GMAC_AN_DUPLEX_EN;
5796 /* 802.3z mode has fixed speed and duplex */
5797 val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
5798 MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5800 /* The FLOW_CTRL_AUTONEG bit selects either the hardware
5801 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
5802 * manually controls the GMAC pause modes.
5804 if (permit_pause_to_mac)
5805 val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5807 /* Configure advertisement bits */
5808 mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
5809 if (phylink_test(advertising, Pause))
5810 val |= MVPP2_GMAC_FC_ADV_EN;
5811 if (phylink_test(advertising, Asym_Pause))
5812 val |= MVPP2_GMAC_FC_ADV_ASM_EN;
5818 old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5819 an = (an & ~mask) | val;
5820 changed = an ^ old_an;
5822 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5824 /* We are only interested in the advertisement bits changing */
5825 return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
5828 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
5830 struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5831 u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5833 writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
5834 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5835 writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
5836 port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5839 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
5840 .pcs_get_state = mvpp2_gmac_pcs_get_state,
5841 .pcs_config = mvpp2_gmac_pcs_config,
5842 .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
5845 static void mvpp2_phylink_validate(struct phylink_config *config,
5846 unsigned long *supported,
5847 struct phylink_link_state *state)
5849 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5850 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5852 /* Invalid combinations */
5853 switch (state->interface) {
5854 case PHY_INTERFACE_MODE_10GBASER:
5855 case PHY_INTERFACE_MODE_XAUI:
5856 if (!mvpp2_port_supports_xlg(port))
5859 case PHY_INTERFACE_MODE_RGMII:
5860 case PHY_INTERFACE_MODE_RGMII_ID:
5861 case PHY_INTERFACE_MODE_RGMII_RXID:
5862 case PHY_INTERFACE_MODE_RGMII_TXID:
5863 if (!mvpp2_port_supports_rgmii(port))
5870 phylink_set(mask, Autoneg);
5871 phylink_set_port_modes(mask);
5872 phylink_set(mask, Pause);
5873 phylink_set(mask, Asym_Pause);
5875 switch (state->interface) {
5876 case PHY_INTERFACE_MODE_10GBASER:
5877 case PHY_INTERFACE_MODE_XAUI:
5878 case PHY_INTERFACE_MODE_NA:
5879 if (mvpp2_port_supports_xlg(port)) {
5880 phylink_set(mask, 10000baseT_Full);
5881 phylink_set(mask, 10000baseCR_Full);
5882 phylink_set(mask, 10000baseSR_Full);
5883 phylink_set(mask, 10000baseLR_Full);
5884 phylink_set(mask, 10000baseLRM_Full);
5885 phylink_set(mask, 10000baseER_Full);
5886 phylink_set(mask, 10000baseKR_Full);
5888 if (state->interface != PHY_INTERFACE_MODE_NA)
5891 case PHY_INTERFACE_MODE_RGMII:
5892 case PHY_INTERFACE_MODE_RGMII_ID:
5893 case PHY_INTERFACE_MODE_RGMII_RXID:
5894 case PHY_INTERFACE_MODE_RGMII_TXID:
5895 case PHY_INTERFACE_MODE_SGMII:
5896 phylink_set(mask, 10baseT_Half);
5897 phylink_set(mask, 10baseT_Full);
5898 phylink_set(mask, 100baseT_Half);
5899 phylink_set(mask, 100baseT_Full);
5900 phylink_set(mask, 1000baseT_Full);
5901 phylink_set(mask, 1000baseX_Full);
5902 if (state->interface != PHY_INTERFACE_MODE_NA)
5905 case PHY_INTERFACE_MODE_1000BASEX:
5906 case PHY_INTERFACE_MODE_2500BASEX:
5908 state->interface != PHY_INTERFACE_MODE_2500BASEX) {
5909 phylink_set(mask, 1000baseT_Full);
5910 phylink_set(mask, 1000baseX_Full);
5913 state->interface == PHY_INTERFACE_MODE_2500BASEX) {
5914 phylink_set(mask, 2500baseT_Full);
5915 phylink_set(mask, 2500baseX_Full);
5922 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
5923 bitmap_and(state->advertising, state->advertising, mask,
5924 __ETHTOOL_LINK_MODE_MASK_NBITS);
5926 phylink_helper_basex_speed(state);
5930 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
5933 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
5934 const struct phylink_link_state *state)
5938 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5939 MVPP22_XLG_CTRL0_MAC_RESET_DIS,
5940 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5941 mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
5942 MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
5943 MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
5944 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
5945 MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
5947 /* Wait for reset to deassert */
5949 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5950 } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
5953 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
5954 const struct phylink_link_state *state)
5956 u32 old_ctrl0, ctrl0;
5957 u32 old_ctrl2, ctrl2;
5958 u32 old_ctrl4, ctrl4;
5960 old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5961 old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5962 old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5964 ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5965 ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
5967 /* Configure port type */
5968 if (phy_interface_mode_is_8023z(state->interface)) {
5969 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
5970 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5971 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5972 MVPP22_CTRL4_DP_CLK_SEL |
5973 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5974 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5975 ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
5976 ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5977 ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5978 MVPP22_CTRL4_DP_CLK_SEL |
5979 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5980 } else if (phy_interface_mode_is_rgmii(state->interface)) {
5981 ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
5982 ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5983 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5984 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5987 /* Configure negotiation style */
5988 if (!phylink_autoneg_inband(mode)) {
5989 /* Phy or fixed speed - no in-band AN, nothing to do, leave the
5990 * configured speed, duplex and flow control as-is.
5992 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5993 /* SGMII in-band mode receives the speed and duplex from
5994 * the PHY. Flow control information is not received. */
5995 } else if (phy_interface_mode_is_8023z(state->interface)) {
5996 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
5997 * they negotiate duplex: they are always operating with a fixed
5998 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
5999 * speed and full duplex here.
6001 ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6004 if (old_ctrl0 != ctrl0)
6005 writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6006 if (old_ctrl2 != ctrl2)
6007 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6008 if (old_ctrl4 != ctrl4)
6009 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6012 static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
6013 phy_interface_t interface)
6015 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6017 /* Check for invalid configuration */
6018 if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6019 netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6023 if (port->phy_interface != interface ||
6024 phylink_autoneg_inband(mode)) {
6025 /* Force the link down when changing the interface or if in
6026 * in-band mode to ensure we do not change the configuration
6027 * while the hardware is indicating link is up. We force both
6028 * XLG and GMAC down to ensure that they're both in a known
6031 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6032 MVPP2_GMAC_FORCE_LINK_PASS |
6033 MVPP2_GMAC_FORCE_LINK_DOWN,
6034 MVPP2_GMAC_FORCE_LINK_DOWN);
6036 if (mvpp2_port_supports_xlg(port))
6037 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6038 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6039 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6040 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6043 /* Make sure the port is disabled when reconfiguring the mode */
6044 mvpp2_port_disable(port);
6046 if (port->phy_interface != interface) {
6047 /* Place GMAC into reset */
6048 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6049 MVPP2_GMAC_PORT_RESET_MASK,
6050 MVPP2_GMAC_PORT_RESET_MASK);
6052 if (port->priv->hw_version == MVPP22) {
6053 mvpp22_gop_mask_irq(port);
6055 phy_power_off(port->comphy);
6059 /* Select the appropriate PCS operations depending on the
6060 * configured interface mode. We will only switch to a mode
6061 * that the validate() checks have already passed.
6063 if (mvpp2_is_xlg(interface))
6064 port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
6066 port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
6071 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6072 phy_interface_t interface)
6074 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6077 ret = mvpp2__mac_prepare(config, mode, interface);
6079 phylink_set_pcs(port->phylink, &port->phylink_pcs);
6084 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6085 const struct phylink_link_state *state)
6087 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6089 /* mac (re)configuration */
6090 if (mvpp2_is_xlg(state->interface))
6091 mvpp2_xlg_config(port, mode, state);
6092 else if (phy_interface_mode_is_rgmii(state->interface) ||
6093 phy_interface_mode_is_8023z(state->interface) ||
6094 state->interface == PHY_INTERFACE_MODE_SGMII)
6095 mvpp2_gmac_config(port, mode, state);
6097 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6098 mvpp2_port_loopback_set(port, state);
6101 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6102 phy_interface_t interface)
6104 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6106 if (port->priv->hw_version == MVPP22 &&
6107 port->phy_interface != interface) {
6108 port->phy_interface = interface;
6110 /* Reconfigure the serdes lanes */
6111 mvpp22_mode_reconfigure(port);
6113 /* Unmask interrupts */
6114 mvpp22_gop_unmask_irq(port);
6117 if (!mvpp2_is_xlg(interface)) {
6118 /* Release GMAC reset and wait */
6119 mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6120 MVPP2_GMAC_PORT_RESET_MASK, 0);
6122 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6123 MVPP2_GMAC_PORT_RESET_MASK)
6127 mvpp2_port_enable(port);
6129 /* Allow the link to come up if in in-band mode, otherwise the
6130 * link is forced via mac_link_down()/mac_link_up()
6132 if (phylink_autoneg_inband(mode)) {
6133 if (mvpp2_is_xlg(interface))
6134 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6135 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6136 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6138 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6139 MVPP2_GMAC_FORCE_LINK_PASS |
6140 MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6146 static void mvpp2_mac_link_up(struct phylink_config *config,
6147 struct phy_device *phy,
6148 unsigned int mode, phy_interface_t interface,
6149 int speed, int duplex,
6150 bool tx_pause, bool rx_pause)
6152 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6155 if (mvpp2_is_xlg(interface)) {
6156 if (!phylink_autoneg_inband(mode)) {
6157 val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6159 val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6161 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6163 mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6164 MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6165 MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6166 MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6167 MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6170 if (!phylink_autoneg_inband(mode)) {
6171 val = MVPP2_GMAC_FORCE_LINK_PASS;
6173 if (speed == SPEED_1000 || speed == SPEED_2500)
6174 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6175 else if (speed == SPEED_100)
6176 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6178 if (duplex == DUPLEX_FULL)
6179 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6181 mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6182 MVPP2_GMAC_FORCE_LINK_DOWN |
6183 MVPP2_GMAC_FORCE_LINK_PASS |
6184 MVPP2_GMAC_CONFIG_MII_SPEED |
6185 MVPP2_GMAC_CONFIG_GMII_SPEED |
6186 MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6189 /* We can always update the flow control enable bits;
6190 * these will only be effective if flow control AN
6191 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6195 val |= MVPP22_CTRL4_TX_FC_EN;
6197 val |= MVPP22_CTRL4_RX_FC_EN;
6199 mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6200 MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6204 mvpp2_port_enable(port);
6206 mvpp2_egress_enable(port);
6207 mvpp2_ingress_enable(port);
6208 netif_tx_wake_all_queues(port->dev);
6211 static void mvpp2_mac_link_down(struct phylink_config *config,
6212 unsigned int mode, phy_interface_t interface)
6214 struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6217 if (!phylink_autoneg_inband(mode)) {
6218 if (mvpp2_is_xlg(interface)) {
6219 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6220 val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6221 val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6222 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6224 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6225 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6226 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6227 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6231 netif_tx_stop_all_queues(port->dev);
6232 mvpp2_egress_disable(port);
6233 mvpp2_ingress_disable(port);
6235 mvpp2_port_disable(port);
6238 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6239 .validate = mvpp2_phylink_validate,
6240 .mac_prepare = mvpp2_mac_prepare,
6241 .mac_config = mvpp2_mac_config,
6242 .mac_finish = mvpp2_mac_finish,
6243 .mac_link_up = mvpp2_mac_link_up,
6244 .mac_link_down = mvpp2_mac_link_down,
6247 /* Work-around for ACPI */
6248 static void mvpp2_acpi_start(struct mvpp2_port *port)
6250 /* Phylink isn't used as of now for ACPI, so the MAC has to be
6251 * configured manually when the interface is started. This will
6252 * be removed as soon as the phylink ACPI support lands in.
6254 struct phylink_link_state state = {
6255 .interface = port->phy_interface,
6257 mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6258 port->phy_interface);
6259 mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6260 port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
6261 port->phy_interface,
6262 state.advertising, false);
6263 mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6264 port->phy_interface);
6265 mvpp2_mac_link_up(&port->phylink_config, NULL,
6266 MLO_AN_INBAND, port->phy_interface,
6267 SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6270 /* Ports initialization */
6271 static int mvpp2_port_probe(struct platform_device *pdev,
6272 struct fwnode_handle *port_fwnode,
6275 struct phy *comphy = NULL;
6276 struct mvpp2_port *port;
6277 struct mvpp2_port_pcpu *port_pcpu;
6278 struct device_node *port_node = to_of_node(port_fwnode);
6279 netdev_features_t features;
6280 struct net_device *dev;
6281 struct phylink *phylink;
6282 char *mac_from = "";
6283 unsigned int ntxqs, nrxqs, thread;
6284 unsigned long flags = 0;
6290 has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6291 if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6293 "not enough IRQs to support multi queue mode\n");
6297 ntxqs = MVPP2_MAX_TXQ;
6298 nrxqs = mvpp2_get_nrxqs(priv);
6300 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6304 phy_mode = fwnode_get_phy_mode(port_fwnode);
6306 dev_err(&pdev->dev, "incorrect phy mode\n");
6308 goto err_free_netdev;
6312 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6313 * Existing usage of 10GBASE-KR is not correct; no backplane
6314 * negotiation is done, and this driver does not actually support
6317 if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6318 phy_mode = PHY_INTERFACE_MODE_10GBASER;
6321 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6322 if (IS_ERR(comphy)) {
6323 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6324 err = -EPROBE_DEFER;
6325 goto err_free_netdev;
6331 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6333 dev_err(&pdev->dev, "missing port-id value\n");
6334 goto err_free_netdev;
6337 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6338 dev->watchdog_timeo = 5 * HZ;
6339 dev->netdev_ops = &mvpp2_netdev_ops;
6340 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6342 port = netdev_priv(dev);
6344 port->fwnode = port_fwnode;
6345 port->has_phy = !!of_find_property(port_node, "phy", NULL);
6346 port->ntxqs = ntxqs;
6347 port->nrxqs = nrxqs;
6349 port->has_tx_irqs = has_tx_irqs;
6350 port->flags = flags;
6352 err = mvpp2_queue_vectors_init(port, port_node);
6354 goto err_free_netdev;
6357 port->port_irq = of_irq_get_byname(port_node, "link");
6359 port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6360 if (port->port_irq == -EPROBE_DEFER) {
6361 err = -EPROBE_DEFER;
6362 goto err_deinit_qvecs;
6364 if (port->port_irq <= 0)
6365 /* the link irq is optional */
6368 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6369 port->flags |= MVPP2_F_LOOPBACK;
6372 if (priv->hw_version == MVPP21)
6373 port->first_rxq = port->id * port->nrxqs;
6375 port->first_rxq = port->id * priv->max_port_rxqs;
6377 port->of_node = port_node;
6378 port->phy_interface = phy_mode;
6379 port->comphy = comphy;
6381 if (priv->hw_version == MVPP21) {
6382 port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6383 if (IS_ERR(port->base)) {
6384 err = PTR_ERR(port->base);
6388 port->stats_base = port->priv->lms_base +
6389 MVPP21_MIB_COUNTERS_OFFSET +
6390 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6392 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6395 dev_err(&pdev->dev, "missing gop-port-id value\n");
6396 goto err_deinit_qvecs;
6399 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6400 port->stats_base = port->priv->iface_base +
6401 MVPP22_MIB_COUNTERS_OFFSET +
6402 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6404 /* We may want a property to describe whether we should use
6405 * MAC hardware timestamping.
6408 port->hwtstamp = true;
6411 /* Alloc per-cpu and ethtool stats */
6412 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6418 port->ethtool_stats = devm_kcalloc(&pdev->dev,
6419 MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6420 sizeof(u64), GFP_KERNEL);
6421 if (!port->ethtool_stats) {
6423 goto err_free_stats;
6426 mutex_init(&port->gather_stats_lock);
6427 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6429 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6431 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6432 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6433 SET_NETDEV_DEV(dev, &pdev->dev);
6435 err = mvpp2_port_init(port);
6437 dev_err(&pdev->dev, "failed to init port %d\n", id);
6438 goto err_free_stats;
6441 mvpp2_port_periodic_xon_disable(port);
6443 mvpp2_mac_reset_assert(port);
6444 mvpp22_pcs_reset_assert(port);
6446 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6449 goto err_free_txq_pcpu;
6452 if (!port->has_tx_irqs) {
6453 for (thread = 0; thread < priv->nthreads; thread++) {
6454 port_pcpu = per_cpu_ptr(port->pcpu, thread);
6456 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6457 HRTIMER_MODE_REL_PINNED_SOFT);
6458 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6459 port_pcpu->timer_scheduled = false;
6460 port_pcpu->dev = dev;
6464 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6466 dev->features = features | NETIF_F_RXCSUM;
6467 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6468 NETIF_F_HW_VLAN_CTAG_FILTER;
6470 if (mvpp22_rss_is_supported()) {
6471 dev->hw_features |= NETIF_F_RXHASH;
6472 dev->features |= NETIF_F_NTUPLE;
6475 if (!port->priv->percpu_pools)
6476 mvpp2_set_hw_csum(port, port->pool_long->id);
6478 dev->vlan_features |= features;
6479 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
6480 dev->priv_flags |= IFF_UNICAST_FLT;
6482 /* MTU range: 68 - 9704 */
6483 dev->min_mtu = ETH_MIN_MTU;
6484 /* 9704 == 9728 - 20 and rounding to 8 */
6485 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6486 dev->dev.of_node = port_node;
6488 /* Phylink isn't used w/ ACPI as of now */
6490 port->phylink_config.dev = &dev->dev;
6491 port->phylink_config.type = PHYLINK_NETDEV;
6493 phylink = phylink_create(&port->phylink_config, port_fwnode,
6494 phy_mode, &mvpp2_phylink_ops);
6495 if (IS_ERR(phylink)) {
6496 err = PTR_ERR(phylink);
6497 goto err_free_port_pcpu;
6499 port->phylink = phylink;
6501 port->phylink = NULL;
6504 /* Cycle the comphy to power it down, saving 270mW per port -
6505 * don't worry about an error powering it up. When the comphy
6506 * driver does this, we can remove this code.
6509 err = mvpp22_comphy_init(port);
6511 phy_power_off(port->comphy);
6514 err = register_netdev(dev);
6516 dev_err(&pdev->dev, "failed to register netdev\n");
6519 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6521 priv->port_list[priv->port_count++] = port;
6527 phylink_destroy(port->phylink);
6529 free_percpu(port->pcpu);
6531 for (i = 0; i < port->ntxqs; i++)
6532 free_percpu(port->txqs[i]->pcpu);
6534 free_percpu(port->stats);
6537 irq_dispose_mapping(port->port_irq);
6539 mvpp2_queue_vectors_deinit(port);
6545 /* Ports removal routine */
6546 static void mvpp2_port_remove(struct mvpp2_port *port)
6550 unregister_netdev(port->dev);
6552 phylink_destroy(port->phylink);
6553 free_percpu(port->pcpu);
6554 free_percpu(port->stats);
6555 for (i = 0; i < port->ntxqs; i++)
6556 free_percpu(port->txqs[i]->pcpu);
6557 mvpp2_queue_vectors_deinit(port);
6559 irq_dispose_mapping(port->port_irq);
6560 free_netdev(port->dev);
6563 /* Initialize decoding windows */
6564 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6570 for (i = 0; i < 6; i++) {
6571 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6572 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6575 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6580 for (i = 0; i < dram->num_cs; i++) {
6581 const struct mbus_dram_window *cs = dram->cs + i;
6583 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6584 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6585 dram->mbus_dram_target_id);
6587 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6588 (cs->size - 1) & 0xffff0000);
6590 win_enable |= (1 << i);
6593 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6596 /* Initialize Rx FIFO's */
6597 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6601 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6602 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6603 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6604 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6605 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6608 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6609 MVPP2_RX_FIFO_PORT_MIN_PKT);
6610 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6613 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
6615 int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
6617 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
6618 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
6621 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2.
6622 * 4kB fixed space must be assigned for the loopback port.
6623 * Redistribute remaining avialable 44kB space among all active ports.
6624 * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
6627 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
6629 int remaining_ports_count;
6630 unsigned long port_map;
6634 /* The loopback requires fixed 4kB of the FIFO space assignment. */
6635 mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
6636 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6637 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
6639 /* Set RX FIFO size to 0 for inactive ports. */
6640 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
6641 mvpp22_rx_fifo_set_hw(priv, port, 0);
6643 /* Assign remaining RX FIFO space among all active ports. */
6644 size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
6645 remaining_ports_count = hweight_long(port_map);
6647 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
6648 if (remaining_ports_count == 1)
6649 size = size_remainder;
6651 size = max(size_remainder / remaining_ports_count,
6652 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
6654 size = max(size_remainder / remaining_ports_count,
6655 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
6657 size = size_remainder / remaining_ports_count;
6659 size_remainder -= size;
6660 remaining_ports_count--;
6662 mvpp22_rx_fifo_set_hw(priv, port, size);
6665 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6666 MVPP2_RX_FIFO_PORT_MIN_PKT);
6667 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6670 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
6672 int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
6674 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
6675 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
6678 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2.
6679 * 3kB fixed space must be assigned for the loopback port.
6680 * Redistribute remaining avialable 16kB space among all active ports.
6681 * The 10G interface should use 10kB (which is maximum possible size
6684 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
6686 int remaining_ports_count;
6687 unsigned long port_map;
6691 /* The loopback requires fixed 3kB of the FIFO space assignment. */
6692 mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
6693 MVPP22_TX_FIFO_DATA_SIZE_3KB);
6694 port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
6696 /* Set TX FIFO size to 0 for inactive ports. */
6697 for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
6698 mvpp22_tx_fifo_set_hw(priv, port, 0);
6700 /* Assign remaining TX FIFO space among all active ports. */
6701 size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB;
6702 remaining_ports_count = hweight_long(port_map);
6704 for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
6705 if (remaining_ports_count == 1)
6706 size = min(size_remainder,
6707 MVPP22_TX_FIFO_DATA_SIZE_10KB);
6709 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
6711 size = size_remainder / remaining_ports_count;
6713 size_remainder -= size;
6714 remaining_ports_count--;
6716 mvpp22_tx_fifo_set_hw(priv, port, size);
6720 static void mvpp2_axi_init(struct mvpp2 *priv)
6722 u32 val, rdval, wrval;
6724 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6726 /* AXI Bridge Configuration */
6728 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6729 << MVPP22_AXI_ATTR_CACHE_OFFS;
6730 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6731 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6733 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6734 << MVPP22_AXI_ATTR_CACHE_OFFS;
6735 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6736 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
6739 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6740 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6743 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6744 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6745 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6746 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6749 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6750 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6752 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6753 << MVPP22_AXI_CODE_CACHE_OFFS;
6754 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6755 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6756 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6757 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6759 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6760 << MVPP22_AXI_CODE_CACHE_OFFS;
6761 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6762 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6764 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6766 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6767 << MVPP22_AXI_CODE_CACHE_OFFS;
6768 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6769 << MVPP22_AXI_CODE_DOMAIN_OFFS;
6771 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6774 /* Initialize network controller common part HW */
6775 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6777 const struct mbus_dram_target_info *dram_target_info;
6781 /* MBUS windows configuration */
6782 dram_target_info = mv_mbus_dram_info();
6783 if (dram_target_info)
6784 mvpp2_conf_mbus_windows(dram_target_info, priv);
6786 if (priv->hw_version == MVPP22)
6787 mvpp2_axi_init(priv);
6789 /* Disable HW PHY polling */
6790 if (priv->hw_version == MVPP21) {
6791 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6792 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6793 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6795 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6796 val &= ~MVPP22_SMI_POLLING_EN;
6797 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6800 /* Allocate and initialize aggregated TXQs */
6801 priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
6802 sizeof(*priv->aggr_txqs),
6804 if (!priv->aggr_txqs)
6807 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6808 priv->aggr_txqs[i].id = i;
6809 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6810 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
6816 if (priv->hw_version == MVPP21) {
6817 mvpp2_rx_fifo_init(priv);
6819 mvpp22_rx_fifo_init(priv);
6820 mvpp22_tx_fifo_init(priv);
6823 if (priv->hw_version == MVPP21)
6824 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6825 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6827 /* Allow cache snoop when transmiting packets */
6828 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6830 /* Buffer Manager initialization */
6831 err = mvpp2_bm_init(&pdev->dev, priv);
6835 /* Parser default initialization */
6836 err = mvpp2_prs_default_init(pdev, priv);
6840 /* Classifier default initialization */
6841 mvpp2_cls_init(priv);
6846 static int mvpp2_probe(struct platform_device *pdev)
6848 const struct acpi_device_id *acpi_id;
6849 struct fwnode_handle *fwnode = pdev->dev.fwnode;
6850 struct fwnode_handle *port_fwnode;
6852 struct resource *res;
6857 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6861 if (has_acpi_companion(&pdev->dev)) {
6862 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
6866 priv->hw_version = (unsigned long)acpi_id->driver_data;
6869 (unsigned long)of_device_get_match_data(&pdev->dev);
6872 /* multi queue mode isn't supported on PPV2.1, fallback to single
6875 if (priv->hw_version == MVPP21)
6876 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6878 base = devm_platform_ioremap_resource(pdev, 0);
6880 return PTR_ERR(base);
6882 if (priv->hw_version == MVPP21) {
6883 priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
6884 if (IS_ERR(priv->lms_base))
6885 return PTR_ERR(priv->lms_base);
6887 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6888 if (has_acpi_companion(&pdev->dev)) {
6889 /* In case the MDIO memory region is declared in
6890 * the ACPI, it can already appear as 'in-use'
6891 * in the OS. Because it is overlapped by second
6892 * region of the network controller, make
6893 * sure it is released, before requesting it again.
6894 * The care is taken by mvpp2 driver to avoid
6895 * concurrent access to this memory region.
6897 release_resource(res);
6899 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6900 if (IS_ERR(priv->iface_base))
6901 return PTR_ERR(priv->iface_base);
6904 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
6905 priv->sysctrl_base =
6906 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
6907 "marvell,system-controller");
6908 if (IS_ERR(priv->sysctrl_base))
6909 /* The system controller regmap is optional for dt
6910 * compatibility reasons. When not provided, the
6911 * configuration of the GoP relies on the
6912 * firmware/bootloader.
6914 priv->sysctrl_base = NULL;
6917 if (priv->hw_version == MVPP22 &&
6918 mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
6919 priv->percpu_pools = 1;
6921 mvpp2_setup_bm_pool();
6924 priv->nthreads = min_t(unsigned int, num_present_cpus(),
6927 shared = num_present_cpus() - priv->nthreads;
6929 bitmap_fill(&priv->lock_map,
6930 min_t(int, shared, MVPP2_MAX_THREADS));
6932 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6935 addr_space_sz = (priv->hw_version == MVPP21 ?
6936 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6937 priv->swth_base[i] = base + i * addr_space_sz;
6940 if (priv->hw_version == MVPP21)
6941 priv->max_port_rxqs = 8;
6943 priv->max_port_rxqs = 32;
6945 if (dev_of_node(&pdev->dev)) {
6946 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6947 if (IS_ERR(priv->pp_clk))
6948 return PTR_ERR(priv->pp_clk);
6949 err = clk_prepare_enable(priv->pp_clk);
6953 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6954 if (IS_ERR(priv->gop_clk)) {
6955 err = PTR_ERR(priv->gop_clk);
6958 err = clk_prepare_enable(priv->gop_clk);
6962 if (priv->hw_version == MVPP22) {
6963 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6964 if (IS_ERR(priv->mg_clk)) {
6965 err = PTR_ERR(priv->mg_clk);
6969 err = clk_prepare_enable(priv->mg_clk);
6973 priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
6974 if (IS_ERR(priv->mg_core_clk)) {
6975 priv->mg_core_clk = NULL;
6977 err = clk_prepare_enable(priv->mg_core_clk);
6983 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
6984 if (IS_ERR(priv->axi_clk)) {
6985 err = PTR_ERR(priv->axi_clk);
6986 if (err == -EPROBE_DEFER)
6987 goto err_mg_core_clk;
6988 priv->axi_clk = NULL;
6990 err = clk_prepare_enable(priv->axi_clk);
6992 goto err_mg_core_clk;
6995 /* Get system's tclk rate */
6996 priv->tclk = clk_get_rate(priv->pp_clk);
6997 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
6999 dev_err(&pdev->dev, "missing clock-frequency value\n");
7003 if (priv->hw_version == MVPP22) {
7004 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7007 /* Sadly, the BM pools all share the same register to
7008 * store the high 32 bits of their address. So they
7009 * must all have the same high 32 bits, which forces
7010 * us to restrict coherent memory to DMA_BIT_MASK(32).
7012 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7017 /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
7018 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7019 if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7020 priv->port_map |= BIT(i);
7023 /* Initialize network controller */
7024 err = mvpp2_init(pdev, priv);
7026 dev_err(&pdev->dev, "failed to initialize controller\n");
7030 err = mvpp22_tai_probe(&pdev->dev, priv);
7034 /* Initialize ports */
7035 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7036 err = mvpp2_port_probe(pdev, port_fwnode, priv);
7038 goto err_port_probe;
7041 if (priv->port_count == 0) {
7042 dev_err(&pdev->dev, "no ports enabled\n");
7047 /* Statistics must be gathered regularly because some of them (like
7048 * packets counters) are 32-bit registers and could overflow quite
7049 * quickly. For instance, a 10Gb link used at full bandwidth with the
7050 * smallest packets (64B) will overflow a 32-bit counter in less than
7051 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7053 snprintf(priv->queue_name, sizeof(priv->queue_name),
7054 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7055 priv->port_count > 1 ? "+" : "");
7056 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7057 if (!priv->stats_queue) {
7059 goto err_port_probe;
7062 mvpp2_dbgfs_init(priv, pdev->name);
7064 platform_set_drvdata(pdev, priv);
7069 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7070 if (priv->port_list[i])
7071 mvpp2_port_remove(priv->port_list[i]);
7075 clk_disable_unprepare(priv->axi_clk);
7078 if (priv->hw_version == MVPP22)
7079 clk_disable_unprepare(priv->mg_core_clk);
7081 if (priv->hw_version == MVPP22)
7082 clk_disable_unprepare(priv->mg_clk);
7084 clk_disable_unprepare(priv->gop_clk);
7086 clk_disable_unprepare(priv->pp_clk);
7090 static int mvpp2_remove(struct platform_device *pdev)
7092 struct mvpp2 *priv = platform_get_drvdata(pdev);
7093 struct fwnode_handle *fwnode = pdev->dev.fwnode;
7094 int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7095 struct fwnode_handle *port_fwnode;
7097 mvpp2_dbgfs_cleanup(priv);
7099 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7100 if (priv->port_list[i]) {
7101 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7102 mvpp2_port_remove(priv->port_list[i]);
7107 destroy_workqueue(priv->stats_queue);
7109 if (priv->percpu_pools)
7110 poolnum = mvpp2_get_nrxqs(priv) * 2;
7112 for (i = 0; i < poolnum; i++) {
7113 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7115 mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7118 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7119 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7121 dma_free_coherent(&pdev->dev,
7122 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7124 aggr_txq->descs_dma);
7127 if (is_acpi_node(port_fwnode))
7130 clk_disable_unprepare(priv->axi_clk);
7131 clk_disable_unprepare(priv->mg_core_clk);
7132 clk_disable_unprepare(priv->mg_clk);
7133 clk_disable_unprepare(priv->pp_clk);
7134 clk_disable_unprepare(priv->gop_clk);
7139 static const struct of_device_id mvpp2_match[] = {
7141 .compatible = "marvell,armada-375-pp2",
7142 .data = (void *)MVPP21,
7145 .compatible = "marvell,armada-7k-pp22",
7146 .data = (void *)MVPP22,
7150 MODULE_DEVICE_TABLE(of, mvpp2_match);
7153 static const struct acpi_device_id mvpp2_acpi_match[] = {
7154 { "MRVL0110", MVPP22 },
7157 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7160 static struct platform_driver mvpp2_driver = {
7161 .probe = mvpp2_probe,
7162 .remove = mvpp2_remove,
7164 .name = MVPP2_DRIVER_NAME,
7165 .of_match_table = mvpp2_match,
7166 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7170 module_platform_driver(mvpp2_driver);
7172 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7173 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7174 MODULE_LICENSE("GPL v2");