2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
6 * Licensed under the GNU/GPL. See COPYING for details.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/bcma/bcma.h>
13 #include <linux/etherdevice.h>
14 #include <linux/interrupt.h>
15 #include <linux/bcm47xx_nvram.h>
16 #include <linux/phy.h>
17 #include <linux/phy_fixed.h>
21 static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
22 u32 value, int timeout)
27 for (i = 0; i < timeout / 10; i++) {
28 val = bgmac_read(bgmac, reg);
29 if ((val & mask) == value)
33 dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
37 /**************************************************
39 **************************************************/
41 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49 /* Suspend DMA TX ring first.
50 * bgmac_wait_value doesn't support waiting for any of few values, so
51 * implement whole loop here.
53 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
54 BGMAC_DMA_TX_SUSPEND);
55 for (i = 0; i < 10000 / 10; i++) {
56 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
57 val &= BGMAC_DMA_TX_STAT;
58 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
59 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
60 val == BGMAC_DMA_TX_STAT_STOPPED) {
67 dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
68 ring->mmio_base, val);
70 /* Remove SUSPEND bit */
71 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
72 if (!bgmac_wait_value(bgmac,
73 ring->mmio_base + BGMAC_DMA_TX_STATUS,
74 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
76 dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
79 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
80 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
81 dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
86 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
87 struct bgmac_dma_ring *ring)
91 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
92 if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
93 ctl &= ~BGMAC_DMA_TX_BL_MASK;
94 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
96 ctl &= ~BGMAC_DMA_TX_MR_MASK;
97 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
99 ctl &= ~BGMAC_DMA_TX_PC_MASK;
100 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
102 ctl &= ~BGMAC_DMA_TX_PT_MASK;
103 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
105 ctl |= BGMAC_DMA_TX_ENABLE;
106 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
107 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
111 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
112 int i, int len, u32 ctl0)
114 struct bgmac_slot_info *slot;
115 struct bgmac_dma_desc *dma_desc;
118 if (i == BGMAC_TX_RING_SLOTS - 1)
119 ctl0 |= BGMAC_DESC_CTL0_EOT;
121 ctl1 = len & BGMAC_DESC_CTL1_LEN;
123 slot = &ring->slots[i];
124 dma_desc = &ring->cpu_base[i];
125 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
126 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
127 dma_desc->ctl0 = cpu_to_le32(ctl0);
128 dma_desc->ctl1 = cpu_to_le32(ctl1);
131 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
132 struct bgmac_dma_ring *ring,
135 struct device *dma_dev = bgmac->dma_dev;
136 struct net_device *net_dev = bgmac->net_dev;
137 int index = ring->end % BGMAC_TX_RING_SLOTS;
138 struct bgmac_slot_info *slot = &ring->slots[index];
143 if (skb->len > BGMAC_DESC_CTL1_LEN) {
144 netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
148 if (skb->ip_summed == CHECKSUM_PARTIAL)
149 skb_checksum_help(skb);
151 nr_frags = skb_shinfo(skb)->nr_frags;
153 /* ring->end - ring->start will return the number of valid slots,
154 * even when ring->end overflows
156 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
157 netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
158 netif_stop_queue(net_dev);
159 return NETDEV_TX_BUSY;
162 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
164 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
167 flags = BGMAC_DESC_CTL0_SOF;
169 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
171 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
174 for (i = 0; i < nr_frags; i++) {
175 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
176 int len = skb_frag_size(frag);
178 index = (index + 1) % BGMAC_TX_RING_SLOTS;
179 slot = &ring->slots[index];
180 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
182 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
185 if (i == nr_frags - 1)
186 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
188 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
192 ring->end += nr_frags + 1;
193 netdev_sent_queue(net_dev, skb->len);
197 /* Increase ring->end to point empty slot. We tell hardware the first
198 * slot it should *not* read.
200 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
202 (ring->end % BGMAC_TX_RING_SLOTS) *
203 sizeof(struct bgmac_dma_desc));
205 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
206 netif_stop_queue(net_dev);
211 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
215 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
216 struct bgmac_slot_info *slot = &ring->slots[index];
217 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
218 int len = ctl1 & BGMAC_DESC_CTL1_LEN;
220 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
224 netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
229 net_dev->stats.tx_dropped++;
230 net_dev->stats.tx_errors++;
234 /* Free transmitted packets */
235 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
237 struct device *dma_dev = bgmac->dma_dev;
240 unsigned bytes_compl = 0, pkts_compl = 0;
242 /* The last slot that hardware didn't consume yet */
243 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
244 empty_slot &= BGMAC_DMA_TX_STATDPTR;
245 empty_slot -= ring->index_base;
246 empty_slot &= BGMAC_DMA_TX_STATDPTR;
247 empty_slot /= sizeof(struct bgmac_dma_desc);
249 while (ring->start != ring->end) {
250 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
251 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
255 if (slot_idx == empty_slot)
258 ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
259 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
260 len = ctl1 & BGMAC_DESC_CTL1_LEN;
261 if (ctl0 & BGMAC_DESC_CTL0_SOF)
262 /* Unmap no longer used buffer */
263 dma_unmap_single(dma_dev, slot->dma_addr, len,
266 dma_unmap_page(dma_dev, slot->dma_addr, len,
270 bgmac->net_dev->stats.tx_bytes += slot->skb->len;
271 bgmac->net_dev->stats.tx_packets++;
272 bytes_compl += slot->skb->len;
275 /* Free memory! :) */
276 dev_kfree_skb(slot->skb);
288 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
290 if (netif_queue_stopped(bgmac->net_dev))
291 netif_wake_queue(bgmac->net_dev);
294 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
296 if (!ring->mmio_base)
299 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
300 if (!bgmac_wait_value(bgmac,
301 ring->mmio_base + BGMAC_DMA_RX_STATUS,
302 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
304 dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
308 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
309 struct bgmac_dma_ring *ring)
313 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
315 /* preserve ONLY bits 16-17 from current hardware value */
316 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
318 if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
319 ctl &= ~BGMAC_DMA_RX_BL_MASK;
320 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
322 ctl &= ~BGMAC_DMA_RX_PC_MASK;
323 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
325 ctl &= ~BGMAC_DMA_RX_PT_MASK;
326 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
328 ctl |= BGMAC_DMA_RX_ENABLE;
329 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
330 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
331 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
332 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
335 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
336 struct bgmac_slot_info *slot)
338 struct device *dma_dev = bgmac->dma_dev;
340 struct bgmac_rx_header *rx;
344 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
348 /* Poison - if everything goes fine, hardware will overwrite it */
349 rx = buf + BGMAC_RX_BUF_OFFSET;
350 rx->len = cpu_to_le16(0xdead);
351 rx->flags = cpu_to_le16(0xbeef);
353 /* Map skb for the DMA */
354 dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
355 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
356 if (dma_mapping_error(dma_dev, dma_addr)) {
357 netdev_err(bgmac->net_dev, "DMA mapping error\n");
358 put_page(virt_to_head_page(buf));
362 /* Update the slot */
364 slot->dma_addr = dma_addr;
369 static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
370 struct bgmac_dma_ring *ring)
374 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
376 ring->end * sizeof(struct bgmac_dma_desc));
379 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
380 struct bgmac_dma_ring *ring, int desc_idx)
382 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
383 u32 ctl0 = 0, ctl1 = 0;
385 if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
386 ctl0 |= BGMAC_DESC_CTL0_EOT;
387 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
388 /* Is there any BGMAC device that requires extension? */
389 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
390 * B43_DMA64_DCTL1_ADDREXT_MASK;
393 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
394 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
395 dma_desc->ctl0 = cpu_to_le32(ctl0);
396 dma_desc->ctl1 = cpu_to_le32(ctl1);
398 ring->end = desc_idx;
401 static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
402 struct bgmac_slot_info *slot)
404 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
406 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
408 rx->len = cpu_to_le16(0xdead);
409 rx->flags = cpu_to_le16(0xbeef);
410 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
414 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
420 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
421 end_slot &= BGMAC_DMA_RX_STATDPTR;
422 end_slot -= ring->index_base;
423 end_slot &= BGMAC_DMA_RX_STATDPTR;
424 end_slot /= sizeof(struct bgmac_dma_desc);
426 while (ring->start != end_slot) {
427 struct device *dma_dev = bgmac->dma_dev;
428 struct bgmac_slot_info *slot = &ring->slots[ring->start];
429 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
431 void *buf = slot->buf;
432 dma_addr_t dma_addr = slot->dma_addr;
436 /* Prepare new skb as replacement */
437 if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
438 bgmac_dma_rx_poison_buf(dma_dev, slot);
442 /* Unmap buffer to make it accessible to the CPU */
443 dma_unmap_single(dma_dev, dma_addr,
444 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
446 /* Get info from the header */
447 len = le16_to_cpu(rx->len);
448 flags = le16_to_cpu(rx->flags);
450 /* Check for poison and drop or pass the packet */
451 if (len == 0xdead && flags == 0xbeef) {
452 netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
454 put_page(virt_to_head_page(buf));
455 bgmac->net_dev->stats.rx_errors++;
459 if (len > BGMAC_RX_ALLOC_SIZE) {
460 netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
462 put_page(virt_to_head_page(buf));
463 bgmac->net_dev->stats.rx_length_errors++;
464 bgmac->net_dev->stats.rx_errors++;
471 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
472 if (unlikely(!skb)) {
473 netdev_err(bgmac->net_dev, "build_skb failed\n");
474 put_page(virt_to_head_page(buf));
475 bgmac->net_dev->stats.rx_errors++;
478 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
479 BGMAC_RX_BUF_OFFSET + len);
480 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
481 BGMAC_RX_BUF_OFFSET);
483 skb_checksum_none_assert(skb);
484 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
485 bgmac->net_dev->stats.rx_bytes += len;
486 bgmac->net_dev->stats.rx_packets++;
487 napi_gro_receive(&bgmac->napi, skb);
491 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
493 if (++ring->start >= BGMAC_RX_RING_SLOTS)
496 if (handled >= weight) /* Should never be greater */
500 bgmac_dma_rx_update_index(bgmac, ring);
505 /* Does ring support unaligned addressing? */
506 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
507 struct bgmac_dma_ring *ring,
508 enum bgmac_dma_ring_type ring_type)
511 case BGMAC_DMA_RING_TX:
512 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
514 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
517 case BGMAC_DMA_RING_RX:
518 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
520 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
527 static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
528 struct bgmac_dma_ring *ring)
530 struct device *dma_dev = bgmac->dma_dev;
531 struct bgmac_dma_desc *dma_desc = ring->cpu_base;
532 struct bgmac_slot_info *slot;
535 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
536 u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
537 unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
539 slot = &ring->slots[i];
540 dev_kfree_skb(slot->skb);
546 dma_unmap_single(dma_dev, slot->dma_addr,
549 dma_unmap_page(dma_dev, slot->dma_addr,
554 static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
555 struct bgmac_dma_ring *ring)
557 struct device *dma_dev = bgmac->dma_dev;
558 struct bgmac_slot_info *slot;
561 for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
562 slot = &ring->slots[i];
566 dma_unmap_single(dma_dev, slot->dma_addr,
569 put_page(virt_to_head_page(slot->buf));
574 static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
575 struct bgmac_dma_ring *ring,
578 struct device *dma_dev = bgmac->dma_dev;
584 /* Free ring of descriptors */
585 size = num_slots * sizeof(struct bgmac_dma_desc);
586 dma_free_coherent(dma_dev, size, ring->cpu_base,
590 static void bgmac_dma_cleanup(struct bgmac *bgmac)
594 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
595 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
597 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
598 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
601 static void bgmac_dma_free(struct bgmac *bgmac)
605 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
606 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
607 BGMAC_TX_RING_SLOTS);
609 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
610 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
611 BGMAC_RX_RING_SLOTS);
614 static int bgmac_dma_alloc(struct bgmac *bgmac)
616 struct device *dma_dev = bgmac->dma_dev;
617 struct bgmac_dma_ring *ring;
618 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
619 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
620 int size; /* ring size: different for Tx and Rx */
624 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
625 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
627 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
628 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
629 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
634 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
635 ring = &bgmac->tx_ring[i];
636 ring->mmio_base = ring_base[i];
638 /* Alloc ring of descriptors */
639 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
640 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
643 if (!ring->cpu_base) {
644 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
649 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
652 ring->index_base = lower_32_bits(ring->dma_base);
654 ring->index_base = 0;
656 /* No need to alloc TX slots yet */
659 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
660 ring = &bgmac->rx_ring[i];
661 ring->mmio_base = ring_base[i];
663 /* Alloc ring of descriptors */
664 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
665 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
668 if (!ring->cpu_base) {
669 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
675 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
678 ring->index_base = lower_32_bits(ring->dma_base);
680 ring->index_base = 0;
686 bgmac_dma_free(bgmac);
690 static int bgmac_dma_init(struct bgmac *bgmac)
692 struct bgmac_dma_ring *ring;
695 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
696 ring = &bgmac->tx_ring[i];
698 if (!ring->unaligned)
699 bgmac_dma_tx_enable(bgmac, ring);
700 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
701 lower_32_bits(ring->dma_base));
702 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
703 upper_32_bits(ring->dma_base));
705 bgmac_dma_tx_enable(bgmac, ring);
708 ring->end = 0; /* Points the slot that should *not* be read */
711 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
714 ring = &bgmac->rx_ring[i];
716 if (!ring->unaligned)
717 bgmac_dma_rx_enable(bgmac, ring);
718 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
719 lower_32_bits(ring->dma_base));
720 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
721 upper_32_bits(ring->dma_base));
723 bgmac_dma_rx_enable(bgmac, ring);
727 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
728 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
732 bgmac_dma_rx_setup_desc(bgmac, ring, j);
735 bgmac_dma_rx_update_index(bgmac, ring);
741 bgmac_dma_cleanup(bgmac);
746 /**************************************************
748 **************************************************/
750 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
751 * nothing to change? Try if after stabilizng driver.
753 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
756 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
757 u32 new_val = (cmdcfg & mask) | set;
760 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
761 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
763 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
765 bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
768 if (new_val != cmdcfg || force)
769 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
771 bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
775 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
779 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
780 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
781 tmp = (addr[4] << 8) | addr[5];
782 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
785 static void bgmac_set_rx_mode(struct net_device *net_dev)
787 struct bgmac *bgmac = netdev_priv(net_dev);
789 if (net_dev->flags & IFF_PROMISC)
790 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
792 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
795 #if 0 /* We don't use that regs yet */
796 static void bgmac_chip_stats_update(struct bgmac *bgmac)
800 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
801 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
802 bgmac->mib_tx_regs[i] =
804 BGMAC_TX_GOOD_OCTETS + (i * 4));
805 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
806 bgmac->mib_rx_regs[i] =
808 BGMAC_RX_GOOD_OCTETS + (i * 4));
811 /* TODO: what else? how to handle BCM4706? Specs are needed */
815 static void bgmac_clear_mib(struct bgmac *bgmac)
819 if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
822 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
823 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
824 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
825 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
826 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
829 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
830 static void bgmac_mac_speed(struct bgmac *bgmac)
832 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
835 switch (bgmac->mac_speed) {
837 set |= BGMAC_CMDCFG_ES_10;
840 set |= BGMAC_CMDCFG_ES_100;
843 set |= BGMAC_CMDCFG_ES_1000;
846 set |= BGMAC_CMDCFG_ES_2500;
849 dev_err(bgmac->dev, "Unsupported speed: %d\n",
853 if (bgmac->mac_duplex == DUPLEX_HALF)
854 set |= BGMAC_CMDCFG_HD;
856 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
859 static void bgmac_miiconfig(struct bgmac *bgmac)
861 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
862 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
863 bgmac_idm_write(bgmac, BCMA_IOCTL,
864 bgmac_idm_read(bgmac, BCMA_IOCTL) |
865 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
867 bgmac->mac_speed = SPEED_2500;
868 bgmac->mac_duplex = DUPLEX_FULL;
869 bgmac_mac_speed(bgmac);
873 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
874 BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
875 if (imode == 0 || imode == 1) {
876 bgmac->mac_speed = SPEED_100;
877 bgmac->mac_duplex = DUPLEX_FULL;
878 bgmac_mac_speed(bgmac);
883 static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
887 iost = bgmac_idm_read(bgmac, BCMA_IOST);
888 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
889 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
891 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
892 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
895 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
896 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
897 if (!bgmac->has_robosw)
898 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
900 bgmac_clk_enable(bgmac, flags);
903 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
904 bgmac_idm_write(bgmac, BCMA_IOCTL,
905 bgmac_idm_read(bgmac, BCMA_IOCTL) &
906 ~BGMAC_BCMA_IOCTL_SW_RESET);
909 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
910 static void bgmac_chip_reset(struct bgmac *bgmac)
915 if (bgmac_clk_enabled(bgmac)) {
916 if (!bgmac->stats_grabbed) {
917 /* bgmac_chip_stats_update(bgmac); */
918 bgmac->stats_grabbed = true;
921 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
922 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
924 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
927 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
928 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
930 /* TODO: Clear software multicast filter list */
933 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
934 bgmac_chip_reset_idm_config(bgmac);
936 /* Request Misc PLL for corerev > 2 */
937 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
938 bgmac_set(bgmac, BCMA_CLKCTLST,
939 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
940 bgmac_wait_value(bgmac, BCMA_CLKCTLST,
941 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
942 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
946 if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
948 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
949 BGMAC_CHIPCTL_1_IF_TYPE_MII;
952 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
953 if (kstrtou8(buf, 0, &et_swtype))
954 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
959 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
960 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
961 BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
962 } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
963 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
964 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
966 bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
967 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
969 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
970 u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
971 BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
975 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
976 if (kstrtou8(buf, 0, &et_swtype))
977 dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
979 sw_type = (et_swtype & 0x0f) << 12;
980 } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
981 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
982 BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
984 bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
985 BGMAC_CHIPCTL_4_SW_TYPE_MASK),
987 } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
988 bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
989 BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
992 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
993 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
994 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
995 * be keps until taking MAC out of the reset.
997 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
998 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
1000 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1002 bgmac_cmdcfg_maskset(bgmac,
1014 BGMAC_CMDCFG_PAD_EN |
1021 bgmac->mac_speed = SPEED_UNKNOWN;
1022 bgmac->mac_duplex = DUPLEX_UNKNOWN;
1024 bgmac_clear_mib(bgmac);
1025 if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
1026 bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
1027 BCMA_GMAC_CMN_PC_MTE);
1029 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1030 bgmac_miiconfig(bgmac);
1032 bgmac->mii_bus->reset(bgmac->mii_bus);
1034 netdev_reset_queue(bgmac->net_dev);
1037 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1039 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1042 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1044 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1045 bgmac_read(bgmac, BGMAC_INT_MASK);
1048 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1049 static void bgmac_enable(struct bgmac *bgmac)
1055 if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
1056 cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
1058 cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1060 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1061 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1064 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1065 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1067 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1069 if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1070 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1071 if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1072 bgmac_cco_ctl_maskset(bgmac, 1, ~0,
1073 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1075 if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
1076 BGMAC_FEAT_FLW_CTRL2)) {
1079 if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
1082 fl_ctl = 0x03cb04cb;
1084 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1085 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1088 if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
1093 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1094 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1095 bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
1096 mdp = (bp_clk * 128 / 1000) - 3;
1097 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1098 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1102 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1103 static void bgmac_chip_init(struct bgmac *bgmac)
1105 /* Clear any erroneously pending interrupts */
1106 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1108 /* 1 interrupt per received frame */
1109 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1111 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1112 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1114 bgmac_set_rx_mode(bgmac->net_dev);
1116 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1118 if (bgmac->loopback)
1119 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1121 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1123 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1125 bgmac_chip_intrs_on(bgmac);
1127 bgmac_enable(bgmac);
1130 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1132 struct bgmac *bgmac = netdev_priv(dev_id);
1134 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1135 int_status &= bgmac->int_mask;
1140 int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
1142 dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
1144 /* Disable new interrupts until handling existing ones */
1145 bgmac_chip_intrs_off(bgmac);
1147 napi_schedule(&bgmac->napi);
1152 static int bgmac_poll(struct napi_struct *napi, int weight)
1154 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1158 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1160 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
1161 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1163 /* Poll again if more events arrived in the meantime */
1164 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1167 if (handled < weight) {
1168 napi_complete_done(napi, handled);
1169 bgmac_chip_intrs_on(bgmac);
1175 /**************************************************
1177 **************************************************/
1179 static int bgmac_open(struct net_device *net_dev)
1181 struct bgmac *bgmac = netdev_priv(net_dev);
1184 bgmac_chip_reset(bgmac);
1186 err = bgmac_dma_init(bgmac);
1190 /* Specs say about reclaiming rings here, but we do that in DMA init */
1191 bgmac_chip_init(bgmac);
1193 err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
1194 net_dev->name, net_dev);
1196 dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
1197 bgmac_dma_cleanup(bgmac);
1200 napi_enable(&bgmac->napi);
1202 phy_start(net_dev->phydev);
1204 netif_start_queue(net_dev);
1209 static int bgmac_stop(struct net_device *net_dev)
1211 struct bgmac *bgmac = netdev_priv(net_dev);
1213 netif_carrier_off(net_dev);
1215 phy_stop(net_dev->phydev);
1217 napi_disable(&bgmac->napi);
1218 bgmac_chip_intrs_off(bgmac);
1219 free_irq(bgmac->irq, net_dev);
1221 bgmac_chip_reset(bgmac);
1222 bgmac_dma_cleanup(bgmac);
1227 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1228 struct net_device *net_dev)
1230 struct bgmac *bgmac = netdev_priv(net_dev);
1231 struct bgmac_dma_ring *ring;
1233 /* No QOS support yet */
1234 ring = &bgmac->tx_ring[0];
1235 return bgmac_dma_tx_add(bgmac, ring, skb);
1238 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1240 struct bgmac *bgmac = netdev_priv(net_dev);
1241 struct sockaddr *sa = addr;
1244 ret = eth_prepare_mac_addr_change(net_dev, addr);
1248 ether_addr_copy(net_dev->dev_addr, sa->sa_data);
1249 bgmac_write_mac_address(bgmac, net_dev->dev_addr);
1251 eth_commit_mac_addr_change(net_dev, addr);
1255 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1257 if (!netif_running(net_dev))
1260 return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1263 static const struct net_device_ops bgmac_netdev_ops = {
1264 .ndo_open = bgmac_open,
1265 .ndo_stop = bgmac_stop,
1266 .ndo_start_xmit = bgmac_start_xmit,
1267 .ndo_set_rx_mode = bgmac_set_rx_mode,
1268 .ndo_set_mac_address = bgmac_set_mac_address,
1269 .ndo_validate_addr = eth_validate_addr,
1270 .ndo_do_ioctl = bgmac_ioctl,
1273 /**************************************************
1275 **************************************************/
1283 static struct bgmac_stat bgmac_get_strings_stats[] = {
1284 { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
1285 { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
1286 { 8, BGMAC_TX_OCTETS, "tx_octets" },
1287 { 4, BGMAC_TX_PKTS, "tx_pkts" },
1288 { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
1289 { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
1290 { 4, BGMAC_TX_LEN_64, "tx_64" },
1291 { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
1292 { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
1293 { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
1294 { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
1295 { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
1296 { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
1297 { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
1298 { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
1299 { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
1300 { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
1301 { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
1302 { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
1303 { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
1304 { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
1305 { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
1306 { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
1307 { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
1308 { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
1309 { 4, BGMAC_TX_DEFERED, "tx_defered" },
1310 { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
1311 { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
1312 { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
1313 { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
1314 { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
1315 { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
1316 { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
1317 { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
1318 { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
1319 { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
1320 { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
1321 { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
1322 { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
1323 { 8, BGMAC_RX_OCTETS, "rx_octets" },
1324 { 4, BGMAC_RX_PKTS, "rx_pkts" },
1325 { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
1326 { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
1327 { 4, BGMAC_RX_LEN_64, "rx_64" },
1328 { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
1329 { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
1330 { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
1331 { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
1332 { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
1333 { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
1334 { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
1335 { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
1336 { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
1337 { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
1338 { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
1339 { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
1340 { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
1341 { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
1342 { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
1343 { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
1344 { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
1345 { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
1346 { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
1347 { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
1348 { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
1349 { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
1352 #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
1354 static int bgmac_get_sset_count(struct net_device *dev, int string_set)
1356 switch (string_set) {
1358 return BGMAC_STATS_LEN;
1364 static void bgmac_get_strings(struct net_device *dev, u32 stringset,
1369 if (stringset != ETH_SS_STATS)
1372 for (i = 0; i < BGMAC_STATS_LEN; i++)
1373 strlcpy(data + i * ETH_GSTRING_LEN,
1374 bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
1377 static void bgmac_get_ethtool_stats(struct net_device *dev,
1378 struct ethtool_stats *ss, uint64_t *data)
1380 struct bgmac *bgmac = netdev_priv(dev);
1381 const struct bgmac_stat *s;
1385 if (!netif_running(dev))
1388 for (i = 0; i < BGMAC_STATS_LEN; i++) {
1389 s = &bgmac_get_strings_stats[i];
1392 val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
1393 val |= bgmac_read(bgmac, s->offset);
1398 static void bgmac_get_drvinfo(struct net_device *net_dev,
1399 struct ethtool_drvinfo *info)
1401 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1402 strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
1405 static const struct ethtool_ops bgmac_ethtool_ops = {
1406 .get_strings = bgmac_get_strings,
1407 .get_sset_count = bgmac_get_sset_count,
1408 .get_ethtool_stats = bgmac_get_ethtool_stats,
1409 .get_drvinfo = bgmac_get_drvinfo,
1410 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1411 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1414 /**************************************************
1416 **************************************************/
1418 void bgmac_adjust_link(struct net_device *net_dev)
1420 struct bgmac *bgmac = netdev_priv(net_dev);
1421 struct phy_device *phy_dev = net_dev->phydev;
1422 bool update = false;
1424 if (phy_dev->link) {
1425 if (phy_dev->speed != bgmac->mac_speed) {
1426 bgmac->mac_speed = phy_dev->speed;
1430 if (phy_dev->duplex != bgmac->mac_duplex) {
1431 bgmac->mac_duplex = phy_dev->duplex;
1437 bgmac_mac_speed(bgmac);
1438 phy_print_status(phy_dev);
1441 EXPORT_SYMBOL_GPL(bgmac_adjust_link);
1443 int bgmac_phy_connect_direct(struct bgmac *bgmac)
1445 struct fixed_phy_status fphy_status = {
1447 .speed = SPEED_1000,
1448 .duplex = DUPLEX_FULL,
1450 struct phy_device *phy_dev;
1453 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1454 if (!phy_dev || IS_ERR(phy_dev)) {
1455 dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
1459 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
1460 PHY_INTERFACE_MODE_MII);
1462 dev_err(bgmac->dev, "Connecting PHY failed\n");
1468 EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
1470 struct bgmac *bgmac_alloc(struct device *dev)
1472 struct net_device *net_dev;
1473 struct bgmac *bgmac;
1475 /* Allocation and references */
1476 net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
1480 net_dev->netdev_ops = &bgmac_netdev_ops;
1481 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1483 bgmac = netdev_priv(net_dev);
1485 bgmac->net_dev = net_dev;
1489 EXPORT_SYMBOL_GPL(bgmac_alloc);
1491 int bgmac_enet_probe(struct bgmac *bgmac)
1493 struct net_device *net_dev = bgmac->net_dev;
1496 bgmac_chip_intrs_off(bgmac);
1498 net_dev->irq = bgmac->irq;
1499 SET_NETDEV_DEV(net_dev, bgmac->dev);
1500 dev_set_drvdata(bgmac->dev, bgmac);
1502 if (!is_valid_ether_addr(net_dev->dev_addr)) {
1503 dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
1505 eth_hw_addr_random(net_dev);
1506 dev_warn(bgmac->dev, "Using random MAC: %pM\n",
1510 /* This (reset &) enable is not preset in specs or reference driver but
1511 * Broadcom does it in arch PCI code when enabling fake PCI device.
1513 bgmac_clk_enable(bgmac, 0);
1515 /* This seems to be fixing IRQ by assigning OOB #6 to the core */
1516 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
1517 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
1518 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
1521 bgmac_chip_reset(bgmac);
1523 err = bgmac_dma_alloc(bgmac);
1525 dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
1529 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1530 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1531 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1533 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1535 err = bgmac_phy_connect(bgmac);
1537 dev_err(bgmac->dev, "Cannot connect to phy\n");
1541 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1542 net_dev->hw_features = net_dev->features;
1543 net_dev->vlan_features = net_dev->features;
1545 err = register_netdev(bgmac->net_dev);
1547 dev_err(bgmac->dev, "Cannot register net device\n");
1548 goto err_phy_disconnect;
1551 netif_carrier_off(net_dev);
1556 phy_disconnect(net_dev->phydev);
1558 bgmac_dma_free(bgmac);
1563 EXPORT_SYMBOL_GPL(bgmac_enet_probe);
1565 void bgmac_enet_remove(struct bgmac *bgmac)
1567 unregister_netdev(bgmac->net_dev);
1568 phy_disconnect(bgmac->net_dev->phydev);
1569 netif_napi_del(&bgmac->napi);
1570 bgmac_dma_free(bgmac);
1571 free_netdev(bgmac->net_dev);
1573 EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1575 int bgmac_enet_suspend(struct bgmac *bgmac)
1577 if (!netif_running(bgmac->net_dev))
1580 phy_stop(bgmac->net_dev->phydev);
1582 netif_stop_queue(bgmac->net_dev);
1584 napi_disable(&bgmac->napi);
1586 netif_tx_lock(bgmac->net_dev);
1587 netif_device_detach(bgmac->net_dev);
1588 netif_tx_unlock(bgmac->net_dev);
1590 bgmac_chip_intrs_off(bgmac);
1591 bgmac_chip_reset(bgmac);
1592 bgmac_dma_cleanup(bgmac);
1596 EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
1598 int bgmac_enet_resume(struct bgmac *bgmac)
1602 if (!netif_running(bgmac->net_dev))
1605 rc = bgmac_dma_init(bgmac);
1609 bgmac_chip_init(bgmac);
1611 napi_enable(&bgmac->napi);
1613 netif_tx_lock(bgmac->net_dev);
1614 netif_device_attach(bgmac->net_dev);
1615 netif_tx_unlock(bgmac->net_dev);
1617 netif_start_queue(bgmac->net_dev);
1619 phy_start(bgmac->net_dev->phydev);
1623 EXPORT_SYMBOL_GPL(bgmac_enet_resume);
1625 MODULE_AUTHOR("Rafał Miłecki");
1626 MODULE_LICENSE("GPL");