1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
4 * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/spinlock.h>
15 #include <linux/sort.h>
16 #include <linux/random.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/socket.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/if_bridge.h>
25 #include <linux/bitops.h>
26 #include <linux/ctype.h>
27 #include <linux/workqueue.h>
28 #include <net/switchdev.h>
29 #include <net/rtnetlink.h>
30 #include <net/netevent.h>
32 #include <net/fib_rules.h>
33 #include <net/fib_notifier.h>
34 #include <linux/io-64-nonatomic-lo-hi.h>
35 #include <generated/utsrelease.h>
37 #include "rocker_hw.h"
39 #include "rocker_tlv.h"
41 static const char rocker_driver_name[] = "rocker";
43 static const struct pci_device_id rocker_pci_id_table[] = {
44 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
49 wait_queue_head_t wait;
54 static void rocker_wait_reset(struct rocker_wait *wait)
60 static void rocker_wait_init(struct rocker_wait *wait)
62 init_waitqueue_head(&wait->wait);
63 rocker_wait_reset(wait);
66 static struct rocker_wait *rocker_wait_create(void)
68 struct rocker_wait *wait;
70 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
76 static void rocker_wait_destroy(struct rocker_wait *wait)
81 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
82 unsigned long timeout)
84 wait_event_timeout(wait->wait, wait->done, HZ / 10);
90 static void rocker_wait_wake_up(struct rocker_wait *wait)
96 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
98 return rocker->msix_entries[vector].vector;
101 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
103 return rocker_msix_vector(rocker_port->rocker,
104 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
107 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
109 return rocker_msix_vector(rocker_port->rocker,
110 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
113 #define rocker_write32(rocker, reg, val) \
114 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
115 #define rocker_read32(rocker, reg) \
116 readl((rocker)->hw_addr + (ROCKER_ ## reg))
117 #define rocker_write64(rocker, reg, val) \
118 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
119 #define rocker_read64(rocker, reg) \
120 readq((rocker)->hw_addr + (ROCKER_ ## reg))
122 /*****************************
123 * HW basic testing functions
124 *****************************/
126 static int rocker_reg_test(const struct rocker *rocker)
128 const struct pci_dev *pdev = rocker->pdev;
134 rocker_write32(rocker, TEST_REG, rnd);
135 test_reg = rocker_read32(rocker, TEST_REG);
136 if (test_reg != rnd * 2) {
137 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
144 rnd |= prandom_u32();
145 rocker_write64(rocker, TEST_REG64, rnd);
146 test_reg = rocker_read64(rocker, TEST_REG64);
147 if (test_reg != rnd * 2) {
148 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
156 static int rocker_dma_test_one(const struct rocker *rocker,
157 struct rocker_wait *wait, u32 test_type,
158 dma_addr_t dma_handle, const unsigned char *buf,
159 const unsigned char *expect, size_t size)
161 const struct pci_dev *pdev = rocker->pdev;
164 rocker_wait_reset(wait);
165 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
167 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
168 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
172 for (i = 0; i < size; i++) {
173 if (buf[i] != expect[i]) {
174 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
175 buf[i], i, expect[i]);
182 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
183 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
185 static int rocker_dma_test_offset(const struct rocker *rocker,
186 struct rocker_wait *wait, int offset)
188 struct pci_dev *pdev = rocker->pdev;
189 unsigned char *alloc;
191 unsigned char *expect;
192 dma_addr_t dma_handle;
196 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
197 GFP_KERNEL | GFP_DMA);
200 buf = alloc + offset;
201 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
203 dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE,
205 if (dma_mapping_error(&pdev->dev, dma_handle)) {
210 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
211 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
213 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
214 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
215 dma_handle, buf, expect,
216 ROCKER_TEST_DMA_BUF_SIZE);
220 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
221 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
222 dma_handle, buf, expect,
223 ROCKER_TEST_DMA_BUF_SIZE);
227 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
228 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
230 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
231 dma_handle, buf, expect,
232 ROCKER_TEST_DMA_BUF_SIZE);
237 dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
245 static int rocker_dma_test(const struct rocker *rocker,
246 struct rocker_wait *wait)
251 for (i = 0; i < 8; i++) {
252 err = rocker_dma_test_offset(rocker, wait, i);
259 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
261 struct rocker_wait *wait = dev_id;
263 rocker_wait_wake_up(wait);
268 static int rocker_basic_hw_test(const struct rocker *rocker)
270 const struct pci_dev *pdev = rocker->pdev;
271 struct rocker_wait wait;
274 err = rocker_reg_test(rocker);
276 dev_err(&pdev->dev, "reg test failed\n");
280 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
281 rocker_test_irq_handler, 0,
282 rocker_driver_name, &wait);
284 dev_err(&pdev->dev, "cannot assign test irq\n");
288 rocker_wait_init(&wait);
289 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
291 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
292 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
297 err = rocker_dma_test(rocker, &wait);
299 dev_err(&pdev->dev, "dma test failed\n");
302 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
306 /******************************************
307 * DMA rings and descriptors manipulations
308 ******************************************/
310 static u32 __pos_inc(u32 pos, size_t limit)
312 return ++pos == limit ? 0 : pos;
315 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
317 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
332 case -ROCKER_EMSGSIZE:
334 case -ROCKER_ENOTSUP:
336 case -ROCKER_ENOBUFS:
343 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
345 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
348 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
350 u32 comp_err = desc_info->desc->comp_err;
352 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
356 rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
358 return (void *)(uintptr_t)desc_info->desc->cookie;
361 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
364 desc_info->desc->cookie = (uintptr_t) ptr;
367 static struct rocker_desc_info *
368 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
370 struct rocker_desc_info *desc_info;
371 u32 head = __pos_inc(info->head, info->size);
373 desc_info = &info->desc_info[info->head];
374 if (head == info->tail)
375 return NULL; /* ring full */
376 desc_info->tlv_size = 0;
380 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
382 desc_info->desc->buf_size = desc_info->data_size;
383 desc_info->desc->tlv_size = desc_info->tlv_size;
386 static void rocker_desc_head_set(const struct rocker *rocker,
387 struct rocker_dma_ring_info *info,
388 const struct rocker_desc_info *desc_info)
390 u32 head = __pos_inc(info->head, info->size);
392 BUG_ON(head == info->tail);
393 rocker_desc_commit(desc_info);
395 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
398 static struct rocker_desc_info *
399 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
401 struct rocker_desc_info *desc_info;
403 if (info->tail == info->head)
404 return NULL; /* nothing to be done between head and tail */
405 desc_info = &info->desc_info[info->tail];
406 if (!rocker_desc_gen(desc_info))
407 return NULL; /* gen bit not set, desc is not ready yet */
408 info->tail = __pos_inc(info->tail, info->size);
409 desc_info->tlv_size = desc_info->desc->tlv_size;
413 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
414 const struct rocker_dma_ring_info *info,
418 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
421 static unsigned long rocker_dma_ring_size_fix(size_t size)
423 return max(ROCKER_DMA_SIZE_MIN,
424 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
427 static int rocker_dma_ring_create(const struct rocker *rocker,
430 struct rocker_dma_ring_info *info)
434 BUG_ON(size != rocker_dma_ring_size_fix(size));
439 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
441 if (!info->desc_info)
444 info->desc = dma_alloc_coherent(&rocker->pdev->dev,
445 info->size * sizeof(*info->desc),
446 &info->mapaddr, GFP_KERNEL);
448 kfree(info->desc_info);
452 for (i = 0; i < info->size; i++)
453 info->desc_info[i].desc = &info->desc[i];
455 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
456 ROCKER_DMA_DESC_CTRL_RESET);
457 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
458 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
463 static void rocker_dma_ring_destroy(const struct rocker *rocker,
464 const struct rocker_dma_ring_info *info)
466 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
468 dma_free_coherent(&rocker->pdev->dev,
469 info->size * sizeof(struct rocker_desc), info->desc,
471 kfree(info->desc_info);
474 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
475 struct rocker_dma_ring_info *info)
479 BUG_ON(info->head || info->tail);
481 /* When ring is consumer, we need to advance head for each desc.
482 * That tells hw that the desc is ready to be used by it.
484 for (i = 0; i < info->size - 1; i++)
485 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
486 rocker_desc_commit(&info->desc_info[i]);
489 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
490 const struct rocker_dma_ring_info *info,
491 int direction, size_t buf_size)
493 struct pci_dev *pdev = rocker->pdev;
497 for (i = 0; i < info->size; i++) {
498 struct rocker_desc_info *desc_info = &info->desc_info[i];
499 struct rocker_desc *desc = &info->desc[i];
500 dma_addr_t dma_handle;
503 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
509 dma_handle = dma_map_single(&pdev->dev, buf, buf_size,
511 if (dma_mapping_error(&pdev->dev, dma_handle)) {
517 desc_info->data = buf;
518 desc_info->data_size = buf_size;
519 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
521 desc->buf_addr = dma_handle;
522 desc->buf_size = buf_size;
527 for (i--; i >= 0; i--) {
528 const struct rocker_desc_info *desc_info = &info->desc_info[i];
530 dma_unmap_single(&pdev->dev,
531 dma_unmap_addr(desc_info, mapaddr),
532 desc_info->data_size, direction);
533 kfree(desc_info->data);
538 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
539 const struct rocker_dma_ring_info *info,
542 struct pci_dev *pdev = rocker->pdev;
545 for (i = 0; i < info->size; i++) {
546 const struct rocker_desc_info *desc_info = &info->desc_info[i];
547 struct rocker_desc *desc = &info->desc[i];
551 dma_unmap_single(&pdev->dev,
552 dma_unmap_addr(desc_info, mapaddr),
553 desc_info->data_size, direction);
554 kfree(desc_info->data);
558 static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info)
560 struct rocker_wait *wait;
562 wait = rocker_wait_create();
565 rocker_desc_cookie_ptr_set(desc_info, wait);
570 rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info)
572 struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info);
574 rocker_wait_destroy(wait);
577 static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker)
579 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
583 for (i = 0; i < cmd_ring->size; i++) {
584 err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]);
591 for (i--; i >= 0; i--)
592 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
596 static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker)
598 const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring;
601 for (i = 0; i < cmd_ring->size; i++)
602 rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]);
605 static int rocker_dma_rings_init(struct rocker *rocker)
607 const struct pci_dev *pdev = rocker->pdev;
610 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
611 ROCKER_DMA_CMD_DEFAULT_SIZE,
614 dev_err(&pdev->dev, "failed to create command dma ring\n");
618 spin_lock_init(&rocker->cmd_ring_lock);
620 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
621 DMA_BIDIRECTIONAL, PAGE_SIZE);
623 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
624 goto err_dma_cmd_ring_bufs_alloc;
627 err = rocker_dma_cmd_ring_waits_alloc(rocker);
629 dev_err(&pdev->dev, "failed to alloc command dma ring waits\n");
630 goto err_dma_cmd_ring_waits_alloc;
633 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
634 ROCKER_DMA_EVENT_DEFAULT_SIZE,
635 &rocker->event_ring);
637 dev_err(&pdev->dev, "failed to create event dma ring\n");
638 goto err_dma_event_ring_create;
641 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
642 DMA_FROM_DEVICE, PAGE_SIZE);
644 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
645 goto err_dma_event_ring_bufs_alloc;
647 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
650 err_dma_event_ring_bufs_alloc:
651 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
652 err_dma_event_ring_create:
653 rocker_dma_cmd_ring_waits_free(rocker);
654 err_dma_cmd_ring_waits_alloc:
655 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
657 err_dma_cmd_ring_bufs_alloc:
658 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
662 static void rocker_dma_rings_fini(struct rocker *rocker)
664 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
666 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
667 rocker_dma_cmd_ring_waits_free(rocker);
668 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
670 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
673 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
674 struct rocker_desc_info *desc_info,
675 struct sk_buff *skb, size_t buf_len)
677 const struct rocker *rocker = rocker_port->rocker;
678 struct pci_dev *pdev = rocker->pdev;
679 dma_addr_t dma_handle;
681 dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len,
683 if (dma_mapping_error(&pdev->dev, dma_handle))
685 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
686 goto tlv_put_failure;
687 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
688 goto tlv_put_failure;
692 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE);
693 desc_info->tlv_size = 0;
697 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
699 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
702 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
703 struct rocker_desc_info *desc_info)
705 struct net_device *dev = rocker_port->dev;
707 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
710 /* Ensure that hw will see tlv_size zero in case of an error.
711 * That tells hw to use another descriptor.
713 rocker_desc_cookie_ptr_set(desc_info, NULL);
714 desc_info->tlv_size = 0;
716 skb = netdev_alloc_skb_ip_align(dev, buf_len);
719 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
721 dev_kfree_skb_any(skb);
724 rocker_desc_cookie_ptr_set(desc_info, skb);
728 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
729 const struct rocker_tlv **attrs)
731 struct pci_dev *pdev = rocker->pdev;
732 dma_addr_t dma_handle;
735 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
736 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
738 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
739 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
740 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE);
743 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
744 const struct rocker_desc_info *desc_info)
746 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
747 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
751 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
752 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
753 dev_kfree_skb_any(skb);
756 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
758 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
759 const struct rocker *rocker = rocker_port->rocker;
763 for (i = 0; i < rx_ring->size; i++) {
764 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
765 &rx_ring->desc_info[i]);
772 for (i--; i >= 0; i--)
773 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
777 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
779 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
780 const struct rocker *rocker = rocker_port->rocker;
783 for (i = 0; i < rx_ring->size; i++)
784 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
787 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
789 struct rocker *rocker = rocker_port->rocker;
792 err = rocker_dma_ring_create(rocker,
793 ROCKER_DMA_TX(rocker_port->port_number),
794 ROCKER_DMA_TX_DEFAULT_SIZE,
795 &rocker_port->tx_ring);
797 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
801 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
803 ROCKER_DMA_TX_DESC_SIZE);
805 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
806 goto err_dma_tx_ring_bufs_alloc;
809 err = rocker_dma_ring_create(rocker,
810 ROCKER_DMA_RX(rocker_port->port_number),
811 ROCKER_DMA_RX_DEFAULT_SIZE,
812 &rocker_port->rx_ring);
814 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
815 goto err_dma_rx_ring_create;
818 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
820 ROCKER_DMA_RX_DESC_SIZE);
822 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
823 goto err_dma_rx_ring_bufs_alloc;
826 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
828 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
829 goto err_dma_rx_ring_skbs_alloc;
831 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
835 err_dma_rx_ring_skbs_alloc:
836 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
838 err_dma_rx_ring_bufs_alloc:
839 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
840 err_dma_rx_ring_create:
841 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
843 err_dma_tx_ring_bufs_alloc:
844 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
848 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
850 struct rocker *rocker = rocker_port->rocker;
852 rocker_dma_rx_ring_skbs_free(rocker_port);
853 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
855 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
856 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
858 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
861 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
864 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
867 val |= 1ULL << rocker_port->pport;
869 val &= ~(1ULL << rocker_port->pport);
870 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
873 /********************************
874 * Interrupt handler and helpers
875 ********************************/
877 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
879 struct rocker *rocker = dev_id;
880 const struct rocker_desc_info *desc_info;
881 struct rocker_wait *wait;
884 spin_lock(&rocker->cmd_ring_lock);
885 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
886 wait = rocker_desc_cookie_ptr_get(desc_info);
888 rocker_desc_gen_clear(desc_info);
890 rocker_wait_wake_up(wait);
894 spin_unlock(&rocker->cmd_ring_lock);
895 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
900 static void rocker_port_link_up(const struct rocker_port *rocker_port)
902 netif_carrier_on(rocker_port->dev);
903 netdev_info(rocker_port->dev, "Link is up\n");
906 static void rocker_port_link_down(const struct rocker_port *rocker_port)
908 netif_carrier_off(rocker_port->dev);
909 netdev_info(rocker_port->dev, "Link is down\n");
912 static int rocker_event_link_change(const struct rocker *rocker,
913 const struct rocker_tlv *info)
915 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
916 unsigned int port_number;
918 struct rocker_port *rocker_port;
920 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
921 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
922 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
925 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
926 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
928 if (port_number >= rocker->port_count)
931 rocker_port = rocker->ports[port_number];
932 if (netif_carrier_ok(rocker_port->dev) != link_up) {
934 rocker_port_link_up(rocker_port);
936 rocker_port_link_down(rocker_port);
942 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
943 const unsigned char *addr,
946 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
947 const struct rocker_tlv *info)
949 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
950 unsigned int port_number;
951 struct rocker_port *rocker_port;
952 const unsigned char *addr;
955 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
956 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
957 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
958 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
961 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
962 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
963 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
965 if (port_number >= rocker->port_count)
968 rocker_port = rocker->ports[port_number];
969 return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
972 static int rocker_event_process(const struct rocker *rocker,
973 const struct rocker_desc_info *desc_info)
975 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
976 const struct rocker_tlv *info;
979 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
980 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
981 !attrs[ROCKER_TLV_EVENT_INFO])
984 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
985 info = attrs[ROCKER_TLV_EVENT_INFO];
988 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
989 return rocker_event_link_change(rocker, info);
990 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
991 return rocker_event_mac_vlan_seen(rocker, info);
997 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
999 struct rocker *rocker = dev_id;
1000 const struct pci_dev *pdev = rocker->pdev;
1001 const struct rocker_desc_info *desc_info;
1005 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1006 err = rocker_desc_err(desc_info);
1008 dev_err(&pdev->dev, "event desc received with err %d\n",
1011 err = rocker_event_process(rocker, desc_info);
1013 dev_err(&pdev->dev, "event processing failed with err %d\n",
1016 rocker_desc_gen_clear(desc_info);
1017 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1020 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1025 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1027 struct rocker_port *rocker_port = dev_id;
1029 napi_schedule(&rocker_port->napi_tx);
1033 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1035 struct rocker_port *rocker_port = dev_id;
1037 napi_schedule(&rocker_port->napi_rx);
1041 /********************
1043 ********************/
1045 int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait,
1046 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1047 rocker_cmd_proc_cb_t process, void *process_priv)
1049 struct rocker *rocker = rocker_port->rocker;
1050 struct rocker_desc_info *desc_info;
1051 struct rocker_wait *wait;
1052 unsigned long lock_flags;
1055 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1057 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1059 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1063 wait = rocker_desc_cookie_ptr_get(desc_info);
1064 rocker_wait_init(wait);
1065 wait->nowait = nowait;
1067 err = prepare(rocker_port, desc_info, prepare_priv);
1069 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1073 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1075 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1080 if (!rocker_wait_event_timeout(wait, HZ / 10))
1083 err = rocker_desc_err(desc_info);
1088 err = process(rocker_port, desc_info, process_priv);
1090 rocker_desc_gen_clear(desc_info);
1095 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1096 struct rocker_desc_info *desc_info,
1099 struct rocker_tlv *cmd_info;
1101 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1102 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1104 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1107 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1108 rocker_port->pport))
1110 rocker_tlv_nest_end(desc_info, cmd_info);
1115 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1116 const struct rocker_desc_info *desc_info,
1119 struct ethtool_link_ksettings *ecmd = priv;
1120 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1121 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1126 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1127 if (!attrs[ROCKER_TLV_CMD_INFO])
1130 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1131 attrs[ROCKER_TLV_CMD_INFO]);
1132 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1133 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1134 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1137 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1138 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1139 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1141 ethtool_link_ksettings_zero_link_mode(ecmd, supported);
1142 ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
1144 ecmd->base.phy_address = 0xff;
1145 ecmd->base.port = PORT_TP;
1146 ecmd->base.speed = speed;
1147 ecmd->base.duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1148 ecmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1154 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1155 const struct rocker_desc_info *desc_info,
1158 unsigned char *macaddr = priv;
1159 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1160 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1161 const struct rocker_tlv *attr;
1163 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1164 if (!attrs[ROCKER_TLV_CMD_INFO])
1167 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1168 attrs[ROCKER_TLV_CMD_INFO]);
1169 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1173 if (rocker_tlv_len(attr) != ETH_ALEN)
1176 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1181 rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port,
1182 const struct rocker_desc_info *desc_info,
1186 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1187 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1188 const struct rocker_tlv *attr;
1190 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1191 if (!attrs[ROCKER_TLV_CMD_INFO])
1194 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1195 attrs[ROCKER_TLV_CMD_INFO]);
1196 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE];
1200 *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
1210 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1211 const struct rocker_desc_info *desc_info,
1214 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1215 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1216 struct port_name *name = priv;
1217 const struct rocker_tlv *attr;
1221 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1222 if (!attrs[ROCKER_TLV_CMD_INFO])
1225 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1226 attrs[ROCKER_TLV_CMD_INFO]);
1227 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1231 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1232 str = rocker_tlv_data(attr);
1234 /* make sure name only contains alphanumeric characters */
1235 for (i = j = 0; i < len; ++i) {
1236 if (isalnum(str[i])) {
1237 name->buf[j] = str[i];
1245 name->buf[j] = '\0';
1251 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1252 struct rocker_desc_info *desc_info,
1255 struct ethtool_link_ksettings *ecmd = priv;
1256 struct rocker_tlv *cmd_info;
1258 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1259 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1261 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1264 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1265 rocker_port->pport))
1267 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1270 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1273 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1274 ecmd->base.autoneg))
1276 rocker_tlv_nest_end(desc_info, cmd_info);
1281 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1282 struct rocker_desc_info *desc_info,
1285 const unsigned char *macaddr = priv;
1286 struct rocker_tlv *cmd_info;
1288 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1289 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1291 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1294 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1295 rocker_port->pport))
1297 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1300 rocker_tlv_nest_end(desc_info, cmd_info);
1305 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1306 struct rocker_desc_info *desc_info,
1309 int mtu = *(int *)priv;
1310 struct rocker_tlv *cmd_info;
1312 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1313 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1315 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1318 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1319 rocker_port->pport))
1321 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1324 rocker_tlv_nest_end(desc_info, cmd_info);
1329 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1330 struct rocker_desc_info *desc_info,
1333 bool learning = *(bool *)priv;
1334 struct rocker_tlv *cmd_info;
1336 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1337 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1339 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1342 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1343 rocker_port->pport))
1345 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1348 rocker_tlv_nest_end(desc_info, cmd_info);
1353 rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1354 struct ethtool_link_ksettings *ecmd)
1356 return rocker_cmd_exec(rocker_port, false,
1357 rocker_cmd_get_port_settings_prep, NULL,
1358 rocker_cmd_get_port_settings_ethtool_proc,
1362 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1363 unsigned char *macaddr)
1365 return rocker_cmd_exec(rocker_port, false,
1366 rocker_cmd_get_port_settings_prep, NULL,
1367 rocker_cmd_get_port_settings_macaddr_proc,
1371 static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port,
1374 return rocker_cmd_exec(rocker_port, false,
1375 rocker_cmd_get_port_settings_prep, NULL,
1376 rocker_cmd_get_port_settings_mode_proc, p_mode);
1380 rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1381 const struct ethtool_link_ksettings *ecmd)
1383 struct ethtool_link_ksettings copy_ecmd;
1385 memcpy(©_ecmd, ecmd, sizeof(copy_ecmd));
1387 return rocker_cmd_exec(rocker_port, false,
1388 rocker_cmd_set_port_settings_ethtool_prep,
1389 ©_ecmd, NULL, NULL);
1392 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1393 unsigned char *macaddr)
1395 return rocker_cmd_exec(rocker_port, false,
1396 rocker_cmd_set_port_settings_macaddr_prep,
1397 macaddr, NULL, NULL);
1400 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1403 return rocker_cmd_exec(rocker_port, false,
1404 rocker_cmd_set_port_settings_mtu_prep,
1408 int rocker_port_set_learning(struct rocker_port *rocker_port,
1411 return rocker_cmd_exec(rocker_port, false,
1412 rocker_cmd_set_port_learning_prep,
1413 &learning, NULL, NULL);
1416 /**********************
1417 * Worlds manipulation
1418 **********************/
1420 static struct rocker_world_ops *rocker_world_ops[] = {
1424 #define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops)
1426 static struct rocker_world_ops *rocker_world_ops_find(u8 mode)
1430 for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++)
1431 if (rocker_world_ops[i]->mode == mode)
1432 return rocker_world_ops[i];
1436 static int rocker_world_init(struct rocker *rocker, u8 mode)
1438 struct rocker_world_ops *wops;
1441 wops = rocker_world_ops_find(mode);
1443 dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n",
1447 rocker->wops = wops;
1448 rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL);
1453 err = wops->init(rocker);
1455 kfree(rocker->wpriv);
1459 static void rocker_world_fini(struct rocker *rocker)
1461 struct rocker_world_ops *wops = rocker->wops;
1463 if (!wops || !wops->fini)
1466 kfree(rocker->wpriv);
1469 static int rocker_world_check_init(struct rocker_port *rocker_port)
1471 struct rocker *rocker = rocker_port->rocker;
1475 err = rocker_cmd_get_port_settings_mode(rocker_port, &mode);
1477 dev_err(&rocker->pdev->dev, "failed to get port mode\n");
1481 if (rocker->wops->mode != mode) {
1482 dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
1487 return rocker_world_init(rocker, mode);
1490 static int rocker_world_port_pre_init(struct rocker_port *rocker_port)
1492 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1495 rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL);
1496 if (!rocker_port->wpriv)
1498 if (!wops->port_pre_init)
1500 err = wops->port_pre_init(rocker_port);
1502 kfree(rocker_port->wpriv);
1506 static int rocker_world_port_init(struct rocker_port *rocker_port)
1508 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1510 if (!wops->port_init)
1512 return wops->port_init(rocker_port);
1515 static void rocker_world_port_fini(struct rocker_port *rocker_port)
1517 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1519 if (!wops->port_fini)
1521 wops->port_fini(rocker_port);
1524 static void rocker_world_port_post_fini(struct rocker_port *rocker_port)
1526 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1528 if (!wops->port_post_fini)
1530 wops->port_post_fini(rocker_port);
1531 kfree(rocker_port->wpriv);
1534 static int rocker_world_port_open(struct rocker_port *rocker_port)
1536 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1538 if (!wops->port_open)
1540 return wops->port_open(rocker_port);
1543 static void rocker_world_port_stop(struct rocker_port *rocker_port)
1545 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1547 if (!wops->port_stop)
1549 wops->port_stop(rocker_port);
1552 static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
1555 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1557 if (!wops->port_attr_stp_state_set)
1560 return wops->port_attr_stp_state_set(rocker_port, state);
1564 rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
1567 p_brport_flags_support)
1569 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1571 if (!wops->port_attr_bridge_flags_support_get)
1573 return wops->port_attr_bridge_flags_support_get(rocker_port,
1574 p_brport_flags_support);
1578 rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
1579 struct switchdev_brport_flags flags)
1581 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1582 unsigned long brport_flags_s;
1585 if (!wops->port_attr_bridge_flags_set)
1588 err = rocker_world_port_attr_bridge_flags_support_get(rocker_port,
1593 if (flags.mask & ~brport_flags_s)
1600 rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
1601 struct switchdev_brport_flags flags)
1603 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1605 if (!wops->port_attr_bridge_flags_set)
1608 return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
1612 rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
1615 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1617 if (!wops->port_attr_bridge_ageing_time_set)
1620 return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
1624 rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
1625 const struct switchdev_obj_port_vlan *vlan)
1627 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1629 if (!wops->port_obj_vlan_add)
1632 return wops->port_obj_vlan_add(rocker_port, vlan);
1636 rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port,
1637 const struct switchdev_obj_port_vlan *vlan)
1639 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1641 if (netif_is_bridge_master(vlan->obj.orig_dev))
1644 if (!wops->port_obj_vlan_del)
1646 return wops->port_obj_vlan_del(rocker_port, vlan);
1650 rocker_world_port_fdb_add(struct rocker_port *rocker_port,
1651 struct switchdev_notifier_fdb_info *info)
1653 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1655 if (!wops->port_obj_fdb_add)
1658 return wops->port_obj_fdb_add(rocker_port, info->vid, info->addr);
1662 rocker_world_port_fdb_del(struct rocker_port *rocker_port,
1663 struct switchdev_notifier_fdb_info *info)
1665 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1667 if (!wops->port_obj_fdb_del)
1669 return wops->port_obj_fdb_del(rocker_port, info->vid, info->addr);
1672 static int rocker_world_port_master_linked(struct rocker_port *rocker_port,
1673 struct net_device *master)
1675 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1677 if (!wops->port_master_linked)
1679 return wops->port_master_linked(rocker_port, master);
1682 static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port,
1683 struct net_device *master)
1685 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1687 if (!wops->port_master_unlinked)
1689 return wops->port_master_unlinked(rocker_port, master);
1692 static int rocker_world_port_neigh_update(struct rocker_port *rocker_port,
1693 struct neighbour *n)
1695 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1697 if (!wops->port_neigh_update)
1699 return wops->port_neigh_update(rocker_port, n);
1702 static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port,
1703 struct neighbour *n)
1705 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1707 if (!wops->port_neigh_destroy)
1709 return wops->port_neigh_destroy(rocker_port, n);
1712 static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port,
1713 const unsigned char *addr,
1716 struct rocker_world_ops *wops = rocker_port->rocker->wops;
1718 if (!wops->port_ev_mac_vlan_seen)
1720 return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id);
1723 static int rocker_world_fib4_add(struct rocker *rocker,
1724 const struct fib_entry_notifier_info *fen_info)
1726 struct rocker_world_ops *wops = rocker->wops;
1728 if (!wops->fib4_add)
1730 return wops->fib4_add(rocker, fen_info);
1733 static int rocker_world_fib4_del(struct rocker *rocker,
1734 const struct fib_entry_notifier_info *fen_info)
1736 struct rocker_world_ops *wops = rocker->wops;
1738 if (!wops->fib4_del)
1740 return wops->fib4_del(rocker, fen_info);
1743 static void rocker_world_fib4_abort(struct rocker *rocker)
1745 struct rocker_world_ops *wops = rocker->wops;
1747 if (wops->fib4_abort)
1748 wops->fib4_abort(rocker);
1755 static int rocker_port_open(struct net_device *dev)
1757 struct rocker_port *rocker_port = netdev_priv(dev);
1760 err = rocker_port_dma_rings_init(rocker_port);
1764 err = request_irq(rocker_msix_tx_vector(rocker_port),
1765 rocker_tx_irq_handler, 0,
1766 rocker_driver_name, rocker_port);
1768 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
1769 goto err_request_tx_irq;
1772 err = request_irq(rocker_msix_rx_vector(rocker_port),
1773 rocker_rx_irq_handler, 0,
1774 rocker_driver_name, rocker_port);
1776 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
1777 goto err_request_rx_irq;
1780 err = rocker_world_port_open(rocker_port);
1782 netdev_err(rocker_port->dev, "cannot open port in world\n");
1783 goto err_world_port_open;
1786 napi_enable(&rocker_port->napi_tx);
1787 napi_enable(&rocker_port->napi_rx);
1788 if (!dev->proto_down)
1789 rocker_port_set_enable(rocker_port, true);
1790 netif_start_queue(dev);
1793 err_world_port_open:
1794 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
1796 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
1798 rocker_port_dma_rings_fini(rocker_port);
1802 static int rocker_port_stop(struct net_device *dev)
1804 struct rocker_port *rocker_port = netdev_priv(dev);
1806 netif_stop_queue(dev);
1807 rocker_port_set_enable(rocker_port, false);
1808 napi_disable(&rocker_port->napi_rx);
1809 napi_disable(&rocker_port->napi_tx);
1810 rocker_world_port_stop(rocker_port);
1811 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
1812 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
1813 rocker_port_dma_rings_fini(rocker_port);
1818 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
1819 const struct rocker_desc_info *desc_info)
1821 const struct rocker *rocker = rocker_port->rocker;
1822 struct pci_dev *pdev = rocker->pdev;
1823 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
1824 struct rocker_tlv *attr;
1827 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
1828 if (!attrs[ROCKER_TLV_TX_FRAGS])
1830 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
1831 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
1832 dma_addr_t dma_handle;
1835 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
1837 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
1839 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
1840 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
1842 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
1843 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
1844 dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE);
1848 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
1849 struct rocker_desc_info *desc_info,
1850 char *buf, size_t buf_len)
1852 const struct rocker *rocker = rocker_port->rocker;
1853 struct pci_dev *pdev = rocker->pdev;
1854 dma_addr_t dma_handle;
1855 struct rocker_tlv *frag;
1857 dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE);
1858 if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) {
1859 if (net_ratelimit())
1860 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
1863 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
1866 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
1869 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
1872 rocker_tlv_nest_end(desc_info, frag);
1876 rocker_tlv_nest_cancel(desc_info, frag);
1878 dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE);
1882 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
1884 struct rocker_port *rocker_port = netdev_priv(dev);
1885 struct rocker *rocker = rocker_port->rocker;
1886 struct rocker_desc_info *desc_info;
1887 struct rocker_tlv *frags;
1891 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
1892 if (unlikely(!desc_info)) {
1893 if (net_ratelimit())
1894 netdev_err(dev, "tx ring full when queue awake\n");
1895 return NETDEV_TX_BUSY;
1898 rocker_desc_cookie_ptr_set(desc_info, skb);
1900 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
1903 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
1904 skb->data, skb_headlen(skb));
1907 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
1908 err = skb_linearize(skb);
1913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1914 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1916 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
1917 skb_frag_address(frag),
1918 skb_frag_size(frag));
1922 rocker_tlv_nest_end(desc_info, frags);
1924 rocker_desc_gen_clear(desc_info);
1925 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
1927 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
1929 netif_stop_queue(dev);
1931 return NETDEV_TX_OK;
1934 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
1936 rocker_tlv_nest_cancel(desc_info, frags);
1939 dev->stats.tx_dropped++;
1941 return NETDEV_TX_OK;
1944 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
1946 struct sockaddr *addr = p;
1947 struct rocker_port *rocker_port = netdev_priv(dev);
1950 if (!is_valid_ether_addr(addr->sa_data))
1951 return -EADDRNOTAVAIL;
1953 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
1956 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1960 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
1962 struct rocker_port *rocker_port = netdev_priv(dev);
1963 int running = netif_running(dev);
1967 rocker_port_stop(dev);
1969 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
1972 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
1977 err = rocker_port_open(dev);
1982 static int rocker_port_get_phys_port_name(struct net_device *dev,
1983 char *buf, size_t len)
1985 struct rocker_port *rocker_port = netdev_priv(dev);
1986 struct port_name name = { .buf = buf, .len = len };
1989 err = rocker_cmd_exec(rocker_port, false,
1990 rocker_cmd_get_port_settings_prep, NULL,
1991 rocker_cmd_get_port_settings_phys_name_proc,
1994 return err ? -EOPNOTSUPP : 0;
1997 static int rocker_port_change_proto_down(struct net_device *dev,
2000 struct rocker_port *rocker_port = netdev_priv(dev);
2002 if (rocker_port->dev->flags & IFF_UP)
2003 rocker_port_set_enable(rocker_port, !proto_down);
2004 rocker_port->dev->proto_down = proto_down;
2008 static void rocker_port_neigh_destroy(struct net_device *dev,
2009 struct neighbour *n)
2011 struct rocker_port *rocker_port = netdev_priv(n->dev);
2014 err = rocker_world_port_neigh_destroy(rocker_port, n);
2016 netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n",
2020 static int rocker_port_get_port_parent_id(struct net_device *dev,
2021 struct netdev_phys_item_id *ppid)
2023 const struct rocker_port *rocker_port = netdev_priv(dev);
2024 const struct rocker *rocker = rocker_port->rocker;
2026 ppid->id_len = sizeof(rocker->hw.id);
2027 memcpy(&ppid->id, &rocker->hw.id, ppid->id_len);
2032 static const struct net_device_ops rocker_port_netdev_ops = {
2033 .ndo_open = rocker_port_open,
2034 .ndo_stop = rocker_port_stop,
2035 .ndo_start_xmit = rocker_port_xmit,
2036 .ndo_set_mac_address = rocker_port_set_mac_address,
2037 .ndo_change_mtu = rocker_port_change_mtu,
2038 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
2039 .ndo_change_proto_down = rocker_port_change_proto_down,
2040 .ndo_neigh_destroy = rocker_port_neigh_destroy,
2041 .ndo_get_port_parent_id = rocker_port_get_port_parent_id,
2044 /********************
2046 ********************/
2048 static int rocker_port_attr_set(struct net_device *dev,
2049 const struct switchdev_attr *attr)
2051 struct rocker_port *rocker_port = netdev_priv(dev);
2055 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
2056 err = rocker_world_port_attr_stp_state_set(rocker_port,
2059 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
2060 err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
2061 attr->u.brport_flags);
2063 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
2064 err = rocker_world_port_attr_bridge_flags_set(rocker_port,
2065 attr->u.brport_flags);
2067 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
2068 err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
2069 attr->u.ageing_time);
2079 static int rocker_port_obj_add(struct net_device *dev,
2080 const struct switchdev_obj *obj)
2082 struct rocker_port *rocker_port = netdev_priv(dev);
2086 case SWITCHDEV_OBJ_ID_PORT_VLAN:
2087 err = rocker_world_port_obj_vlan_add(rocker_port,
2088 SWITCHDEV_OBJ_PORT_VLAN(obj));
2098 static int rocker_port_obj_del(struct net_device *dev,
2099 const struct switchdev_obj *obj)
2101 struct rocker_port *rocker_port = netdev_priv(dev);
2105 case SWITCHDEV_OBJ_ID_PORT_VLAN:
2106 err = rocker_world_port_obj_vlan_del(rocker_port,
2107 SWITCHDEV_OBJ_PORT_VLAN(obj));
2117 struct rocker_fib_event_work {
2118 struct work_struct work;
2120 struct fib_entry_notifier_info fen_info;
2121 struct fib_rule_notifier_info fr_info;
2123 struct rocker *rocker;
2124 unsigned long event;
2127 static void rocker_router_fib_event_work(struct work_struct *work)
2129 struct rocker_fib_event_work *fib_work =
2130 container_of(work, struct rocker_fib_event_work, work);
2131 struct rocker *rocker = fib_work->rocker;
2132 struct fib_rule *rule;
2135 /* Protect internal structures from changes */
2137 switch (fib_work->event) {
2138 case FIB_EVENT_ENTRY_REPLACE:
2139 err = rocker_world_fib4_add(rocker, &fib_work->fen_info);
2141 rocker_world_fib4_abort(rocker);
2142 fib_info_put(fib_work->fen_info.fi);
2144 case FIB_EVENT_ENTRY_DEL:
2145 rocker_world_fib4_del(rocker, &fib_work->fen_info);
2146 fib_info_put(fib_work->fen_info.fi);
2148 case FIB_EVENT_RULE_ADD:
2149 case FIB_EVENT_RULE_DEL:
2150 rule = fib_work->fr_info.rule;
2151 if (!fib4_rule_default(rule))
2152 rocker_world_fib4_abort(rocker);
2160 /* Called with rcu_read_lock() */
2161 static int rocker_router_fib_event(struct notifier_block *nb,
2162 unsigned long event, void *ptr)
2164 struct rocker *rocker = container_of(nb, struct rocker, fib_nb);
2165 struct rocker_fib_event_work *fib_work;
2166 struct fib_notifier_info *info = ptr;
2168 if (info->family != AF_INET)
2171 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2172 if (WARN_ON(!fib_work))
2175 INIT_WORK(&fib_work->work, rocker_router_fib_event_work);
2176 fib_work->rocker = rocker;
2177 fib_work->event = event;
2180 case FIB_EVENT_ENTRY_REPLACE:
2181 case FIB_EVENT_ENTRY_DEL:
2182 if (info->family == AF_INET) {
2183 struct fib_entry_notifier_info *fen_info = ptr;
2185 if (fen_info->fi->fib_nh_is_v6) {
2186 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
2188 return notifier_from_errno(-EINVAL);
2190 if (fen_info->fi->nh) {
2191 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
2193 return notifier_from_errno(-EINVAL);
2197 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2198 /* Take referece on fib_info to prevent it from being
2199 * freed while work is queued. Release it afterwards.
2201 fib_info_hold(fib_work->fen_info.fi);
2203 case FIB_EVENT_RULE_ADD:
2204 case FIB_EVENT_RULE_DEL:
2205 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2206 fib_rule_get(fib_work->fr_info.rule);
2210 queue_work(rocker->rocker_owq, &fib_work->work);
2215 /********************
2217 ********************/
2220 rocker_port_get_link_ksettings(struct net_device *dev,
2221 struct ethtool_link_ksettings *ecmd)
2223 struct rocker_port *rocker_port = netdev_priv(dev);
2225 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
2229 rocker_port_set_link_ksettings(struct net_device *dev,
2230 const struct ethtool_link_ksettings *ecmd)
2232 struct rocker_port *rocker_port = netdev_priv(dev);
2234 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
2237 static void rocker_port_get_drvinfo(struct net_device *dev,
2238 struct ethtool_drvinfo *drvinfo)
2240 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
2241 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2244 static struct rocker_port_stats {
2245 char str[ETH_GSTRING_LEN];
2247 } rocker_port_stats[] = {
2248 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
2249 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
2250 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
2251 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
2253 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
2254 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
2255 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
2256 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
2259 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
2261 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
2267 switch (stringset) {
2269 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
2270 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
2271 p += ETH_GSTRING_LEN;
2278 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
2279 struct rocker_desc_info *desc_info,
2282 struct rocker_tlv *cmd_stats;
2284 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
2285 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
2288 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2292 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
2293 rocker_port->pport))
2296 rocker_tlv_nest_end(desc_info, cmd_stats);
2302 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
2303 const struct rocker_desc_info *desc_info,
2306 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
2307 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
2308 const struct rocker_tlv *pattr;
2313 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
2315 if (!attrs[ROCKER_TLV_CMD_INFO])
2318 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
2319 attrs[ROCKER_TLV_CMD_INFO]);
2321 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
2324 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
2325 if (pport != rocker_port->pport)
2328 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
2329 pattr = stats_attrs[rocker_port_stats[i].type];
2333 data[i] = rocker_tlv_get_u64(pattr);
2339 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
2342 return rocker_cmd_exec(rocker_port, false,
2343 rocker_cmd_get_port_stats_prep, NULL,
2344 rocker_cmd_get_port_stats_ethtool_proc,
2348 static void rocker_port_get_stats(struct net_device *dev,
2349 struct ethtool_stats *stats, u64 *data)
2351 struct rocker_port *rocker_port = netdev_priv(dev);
2353 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
2356 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
2361 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
2365 return ROCKER_PORT_STATS_LEN;
2371 static const struct ethtool_ops rocker_port_ethtool_ops = {
2372 .get_drvinfo = rocker_port_get_drvinfo,
2373 .get_link = ethtool_op_get_link,
2374 .get_strings = rocker_port_get_strings,
2375 .get_ethtool_stats = rocker_port_get_stats,
2376 .get_sset_count = rocker_port_get_sset_count,
2377 .get_link_ksettings = rocker_port_get_link_ksettings,
2378 .set_link_ksettings = rocker_port_set_link_ksettings,
2385 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
2387 return container_of(napi, struct rocker_port, napi_tx);
2390 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
2392 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
2393 const struct rocker *rocker = rocker_port->rocker;
2394 const struct rocker_desc_info *desc_info;
2398 /* Cleanup tx descriptors */
2399 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
2400 struct sk_buff *skb;
2402 err = rocker_desc_err(desc_info);
2403 if (err && net_ratelimit())
2404 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
2406 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
2408 skb = rocker_desc_cookie_ptr_get(desc_info);
2410 rocker_port->dev->stats.tx_packets++;
2411 rocker_port->dev->stats.tx_bytes += skb->len;
2413 rocker_port->dev->stats.tx_errors++;
2416 dev_kfree_skb_any(skb);
2420 if (credits && netif_queue_stopped(rocker_port->dev))
2421 netif_wake_queue(rocker_port->dev);
2423 napi_complete(napi);
2424 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
2429 static int rocker_port_rx_proc(const struct rocker *rocker,
2430 const struct rocker_port *rocker_port,
2431 struct rocker_desc_info *desc_info)
2433 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
2434 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
2441 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
2442 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
2444 if (attrs[ROCKER_TLV_RX_FLAGS])
2445 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
2447 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
2449 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
2450 skb_put(skb, rx_len);
2451 skb->protocol = eth_type_trans(skb, rocker_port->dev);
2453 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
2454 skb->offload_fwd_mark = 1;
2456 rocker_port->dev->stats.rx_packets++;
2457 rocker_port->dev->stats.rx_bytes += skb->len;
2459 netif_receive_skb(skb);
2461 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
2464 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
2466 return container_of(napi, struct rocker_port, napi_rx);
2469 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
2471 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
2472 const struct rocker *rocker = rocker_port->rocker;
2473 struct rocker_desc_info *desc_info;
2477 /* Process rx descriptors */
2478 while (credits < budget &&
2479 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
2480 err = rocker_desc_err(desc_info);
2482 if (net_ratelimit())
2483 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
2486 err = rocker_port_rx_proc(rocker, rocker_port,
2488 if (err && net_ratelimit())
2489 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
2493 rocker_port->dev->stats.rx_errors++;
2495 rocker_desc_gen_clear(desc_info);
2496 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
2500 if (credits < budget)
2501 napi_complete_done(napi, credits);
2503 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
2512 static void rocker_carrier_init(const struct rocker_port *rocker_port)
2514 const struct rocker *rocker = rocker_port->rocker;
2515 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
2518 link_up = link_status & (1 << rocker_port->pport);
2520 netif_carrier_on(rocker_port->dev);
2522 netif_carrier_off(rocker_port->dev);
2525 static void rocker_remove_ports(struct rocker *rocker)
2527 struct rocker_port *rocker_port;
2530 for (i = 0; i < rocker->port_count; i++) {
2531 rocker_port = rocker->ports[i];
2534 rocker_world_port_fini(rocker_port);
2535 unregister_netdev(rocker_port->dev);
2536 rocker_world_port_post_fini(rocker_port);
2537 free_netdev(rocker_port->dev);
2539 rocker_world_fini(rocker);
2540 kfree(rocker->ports);
2543 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
2545 const struct rocker *rocker = rocker_port->rocker;
2546 const struct pci_dev *pdev = rocker->pdev;
2549 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
2550 rocker_port->dev->dev_addr);
2552 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
2553 eth_hw_addr_random(rocker_port->dev);
2557 #define ROCKER_PORT_MIN_MTU ETH_MIN_MTU
2558 #define ROCKER_PORT_MAX_MTU 9000
2559 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
2561 struct pci_dev *pdev = rocker->pdev;
2562 struct rocker_port *rocker_port;
2563 struct net_device *dev;
2566 dev = alloc_etherdev(sizeof(struct rocker_port));
2569 SET_NETDEV_DEV(dev, &pdev->dev);
2570 rocker_port = netdev_priv(dev);
2571 rocker_port->dev = dev;
2572 rocker_port->rocker = rocker;
2573 rocker_port->port_number = port_number;
2574 rocker_port->pport = port_number + 1;
2576 err = rocker_world_check_init(rocker_port);
2578 dev_err(&pdev->dev, "world init failed\n");
2579 goto err_world_check_init;
2582 rocker_port_dev_addr_init(rocker_port);
2583 dev->netdev_ops = &rocker_port_netdev_ops;
2584 dev->ethtool_ops = &rocker_port_ethtool_ops;
2585 netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
2587 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
2589 rocker_carrier_init(rocker_port);
2591 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
2593 /* MTU range: 68 - 9000 */
2594 dev->min_mtu = ROCKER_PORT_MIN_MTU;
2595 dev->max_mtu = ROCKER_PORT_MAX_MTU;
2597 err = rocker_world_port_pre_init(rocker_port);
2599 dev_err(&pdev->dev, "port world pre-init failed\n");
2600 goto err_world_port_pre_init;
2602 err = register_netdev(dev);
2604 dev_err(&pdev->dev, "register_netdev failed\n");
2605 goto err_register_netdev;
2607 rocker->ports[port_number] = rocker_port;
2609 err = rocker_world_port_init(rocker_port);
2611 dev_err(&pdev->dev, "port world init failed\n");
2612 goto err_world_port_init;
2617 err_world_port_init:
2618 rocker->ports[port_number] = NULL;
2619 unregister_netdev(dev);
2620 err_register_netdev:
2621 rocker_world_port_post_fini(rocker_port);
2622 err_world_port_pre_init:
2623 err_world_check_init:
2628 static int rocker_probe_ports(struct rocker *rocker)
2634 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
2635 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
2638 for (i = 0; i < rocker->port_count; i++) {
2639 err = rocker_probe_port(rocker, i);
2646 rocker_remove_ports(rocker);
2650 static int rocker_msix_init(struct rocker *rocker)
2652 struct pci_dev *pdev = rocker->pdev;
2657 msix_entries = pci_msix_vec_count(pdev);
2658 if (msix_entries < 0)
2659 return msix_entries;
2661 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
2664 rocker->msix_entries = kmalloc_array(msix_entries,
2665 sizeof(struct msix_entry),
2667 if (!rocker->msix_entries)
2670 for (i = 0; i < msix_entries; i++)
2671 rocker->msix_entries[i].entry = i;
2673 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
2675 goto err_enable_msix;
2680 kfree(rocker->msix_entries);
2684 static void rocker_msix_fini(const struct rocker *rocker)
2686 pci_disable_msix(rocker->pdev);
2687 kfree(rocker->msix_entries);
2690 static bool rocker_port_dev_check(const struct net_device *dev)
2692 return dev->netdev_ops == &rocker_port_netdev_ops;
2696 rocker_switchdev_port_attr_set_event(struct net_device *netdev,
2697 struct switchdev_notifier_port_attr_info *port_attr_info)
2701 err = rocker_port_attr_set(netdev, port_attr_info->attr);
2703 port_attr_info->handled = true;
2704 return notifier_from_errno(err);
2707 struct rocker_switchdev_event_work {
2708 struct work_struct work;
2709 struct switchdev_notifier_fdb_info fdb_info;
2710 struct rocker_port *rocker_port;
2711 unsigned long event;
2715 rocker_fdb_offload_notify(struct rocker_port *rocker_port,
2716 struct switchdev_notifier_fdb_info *recv_info)
2718 struct switchdev_notifier_fdb_info info;
2720 info.addr = recv_info->addr;
2721 info.vid = recv_info->vid;
2722 info.offloaded = true;
2723 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
2724 rocker_port->dev, &info.info, NULL);
2727 static void rocker_switchdev_event_work(struct work_struct *work)
2729 struct rocker_switchdev_event_work *switchdev_work =
2730 container_of(work, struct rocker_switchdev_event_work, work);
2731 struct rocker_port *rocker_port = switchdev_work->rocker_port;
2732 struct switchdev_notifier_fdb_info *fdb_info;
2736 switch (switchdev_work->event) {
2737 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2738 fdb_info = &switchdev_work->fdb_info;
2739 if (!fdb_info->added_by_user || fdb_info->is_local)
2741 err = rocker_world_port_fdb_add(rocker_port, fdb_info);
2743 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
2746 rocker_fdb_offload_notify(rocker_port, fdb_info);
2748 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2749 fdb_info = &switchdev_work->fdb_info;
2750 if (!fdb_info->added_by_user || fdb_info->is_local)
2752 err = rocker_world_port_fdb_del(rocker_port, fdb_info);
2754 netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err);
2759 kfree(switchdev_work->fdb_info.addr);
2760 kfree(switchdev_work);
2761 dev_put(rocker_port->dev);
2764 /* called under rcu_read_lock() */
2765 static int rocker_switchdev_event(struct notifier_block *unused,
2766 unsigned long event, void *ptr)
2768 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2769 struct rocker_switchdev_event_work *switchdev_work;
2770 struct switchdev_notifier_fdb_info *fdb_info = ptr;
2771 struct rocker_port *rocker_port;
2773 if (!rocker_port_dev_check(dev))
2776 if (event == SWITCHDEV_PORT_ATTR_SET)
2777 return rocker_switchdev_port_attr_set_event(dev, ptr);
2779 rocker_port = netdev_priv(dev);
2780 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2781 if (WARN_ON(!switchdev_work))
2784 INIT_WORK(&switchdev_work->work, rocker_switchdev_event_work);
2785 switchdev_work->rocker_port = rocker_port;
2786 switchdev_work->event = event;
2789 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2790 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2791 memcpy(&switchdev_work->fdb_info, ptr,
2792 sizeof(switchdev_work->fdb_info));
2793 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2794 if (unlikely(!switchdev_work->fdb_info.addr)) {
2795 kfree(switchdev_work);
2799 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2801 /* Take a reference on the rocker device */
2805 kfree(switchdev_work);
2809 queue_work(rocker_port->rocker->rocker_owq,
2810 &switchdev_work->work);
2815 rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
2816 struct switchdev_notifier_port_obj_info *port_obj_info)
2818 int err = -EOPNOTSUPP;
2821 case SWITCHDEV_PORT_OBJ_ADD:
2822 err = rocker_port_obj_add(netdev, port_obj_info->obj);
2824 case SWITCHDEV_PORT_OBJ_DEL:
2825 err = rocker_port_obj_del(netdev, port_obj_info->obj);
2829 port_obj_info->handled = true;
2830 return notifier_from_errno(err);
2833 static int rocker_switchdev_blocking_event(struct notifier_block *unused,
2834 unsigned long event, void *ptr)
2836 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2838 if (!rocker_port_dev_check(dev))
2842 case SWITCHDEV_PORT_OBJ_ADD:
2843 case SWITCHDEV_PORT_OBJ_DEL:
2844 return rocker_switchdev_port_obj_event(event, dev, ptr);
2845 case SWITCHDEV_PORT_ATTR_SET:
2846 return rocker_switchdev_port_attr_set_event(dev, ptr);
2852 static struct notifier_block rocker_switchdev_notifier = {
2853 .notifier_call = rocker_switchdev_event,
2856 static struct notifier_block rocker_switchdev_blocking_notifier = {
2857 .notifier_call = rocker_switchdev_blocking_event,
2860 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2862 struct notifier_block *nb;
2863 struct rocker *rocker;
2866 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
2870 err = pci_enable_device(pdev);
2872 dev_err(&pdev->dev, "pci_enable_device failed\n");
2873 goto err_pci_enable_device;
2876 err = pci_request_regions(pdev, rocker_driver_name);
2878 dev_err(&pdev->dev, "pci_request_regions failed\n");
2879 goto err_pci_request_regions;
2882 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2884 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2886 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
2887 goto err_pci_set_dma_mask;
2890 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2892 dev_err(&pdev->dev, "dma_set_mask failed\n");
2893 goto err_pci_set_dma_mask;
2897 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
2898 dev_err(&pdev->dev, "invalid PCI region size\n");
2900 goto err_pci_resource_len_check;
2903 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
2904 pci_resource_len(pdev, 0));
2905 if (!rocker->hw_addr) {
2906 dev_err(&pdev->dev, "ioremap failed\n");
2910 pci_set_master(pdev);
2912 rocker->pdev = pdev;
2913 pci_set_drvdata(pdev, rocker);
2915 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
2917 err = rocker_msix_init(rocker);
2919 dev_err(&pdev->dev, "MSI-X init failed\n");
2923 err = rocker_basic_hw_test(rocker);
2925 dev_err(&pdev->dev, "basic hw test failed\n");
2926 goto err_basic_hw_test;
2929 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
2931 err = rocker_dma_rings_init(rocker);
2933 goto err_dma_rings_init;
2935 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
2936 rocker_cmd_irq_handler, 0,
2937 rocker_driver_name, rocker);
2939 dev_err(&pdev->dev, "cannot assign cmd irq\n");
2940 goto err_request_cmd_irq;
2943 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
2944 rocker_event_irq_handler, 0,
2945 rocker_driver_name, rocker);
2947 dev_err(&pdev->dev, "cannot assign event irq\n");
2948 goto err_request_event_irq;
2951 rocker->rocker_owq = alloc_ordered_workqueue(rocker_driver_name,
2953 if (!rocker->rocker_owq) {
2955 goto err_alloc_ordered_workqueue;
2958 err = rocker_probe_ports(rocker);
2960 dev_err(&pdev->dev, "failed to probe ports\n");
2961 goto err_probe_ports;
2964 /* Only FIBs pointing to our own netdevs are programmed into
2965 * the device, so no need to pass a callback.
2967 rocker->fib_nb.notifier_call = rocker_router_fib_event;
2968 err = register_fib_notifier(&init_net, &rocker->fib_nb, NULL, NULL);
2970 goto err_register_fib_notifier;
2972 err = register_switchdev_notifier(&rocker_switchdev_notifier);
2974 dev_err(&pdev->dev, "Failed to register switchdev notifier\n");
2975 goto err_register_switchdev_notifier;
2978 nb = &rocker_switchdev_blocking_notifier;
2979 err = register_switchdev_blocking_notifier(nb);
2981 dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
2982 goto err_register_switchdev_blocking_notifier;
2985 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
2987 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
2988 (int)sizeof(rocker->hw.id), &rocker->hw.id);
2992 err_register_switchdev_blocking_notifier:
2993 unregister_switchdev_notifier(&rocker_switchdev_notifier);
2994 err_register_switchdev_notifier:
2995 unregister_fib_notifier(&init_net, &rocker->fib_nb);
2996 err_register_fib_notifier:
2997 rocker_remove_ports(rocker);
2999 destroy_workqueue(rocker->rocker_owq);
3000 err_alloc_ordered_workqueue:
3001 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
3002 err_request_event_irq:
3003 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
3004 err_request_cmd_irq:
3005 rocker_dma_rings_fini(rocker);
3008 rocker_msix_fini(rocker);
3010 iounmap(rocker->hw_addr);
3012 err_pci_resource_len_check:
3013 err_pci_set_dma_mask:
3014 pci_release_regions(pdev);
3015 err_pci_request_regions:
3016 pci_disable_device(pdev);
3017 err_pci_enable_device:
3022 static void rocker_remove(struct pci_dev *pdev)
3024 struct rocker *rocker = pci_get_drvdata(pdev);
3025 struct notifier_block *nb;
3027 nb = &rocker_switchdev_blocking_notifier;
3028 unregister_switchdev_blocking_notifier(nb);
3030 unregister_switchdev_notifier(&rocker_switchdev_notifier);
3031 unregister_fib_notifier(&init_net, &rocker->fib_nb);
3032 rocker_remove_ports(rocker);
3033 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
3034 destroy_workqueue(rocker->rocker_owq);
3035 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
3036 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
3037 rocker_dma_rings_fini(rocker);
3038 rocker_msix_fini(rocker);
3039 iounmap(rocker->hw_addr);
3040 pci_release_regions(rocker->pdev);
3041 pci_disable_device(rocker->pdev);
3045 static struct pci_driver rocker_pci_driver = {
3046 .name = rocker_driver_name,
3047 .id_table = rocker_pci_id_table,
3048 .probe = rocker_probe,
3049 .remove = rocker_remove,
3052 /************************************
3053 * Net device notifier event handler
3054 ************************************/
3056 static bool rocker_port_dev_check_under(const struct net_device *dev,
3057 struct rocker *rocker)
3059 struct rocker_port *rocker_port;
3061 if (!rocker_port_dev_check(dev))
3064 rocker_port = netdev_priv(dev);
3065 if (rocker_port->rocker != rocker)
3071 struct rocker_walk_data {
3072 struct rocker *rocker;
3073 struct rocker_port *port;
3076 static int rocker_lower_dev_walk(struct net_device *lower_dev,
3077 struct netdev_nested_priv *priv)
3079 struct rocker_walk_data *data = (struct rocker_walk_data *)priv->data;
3082 if (rocker_port_dev_check_under(lower_dev, data->rocker)) {
3083 data->port = netdev_priv(lower_dev);
3090 struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev,
3091 struct rocker *rocker)
3093 struct netdev_nested_priv priv;
3094 struct rocker_walk_data data;
3096 if (rocker_port_dev_check_under(dev, rocker))
3097 return netdev_priv(dev);
3099 data.rocker = rocker;
3101 priv.data = (void *)&data;
3102 netdev_walk_all_lower_dev(dev, rocker_lower_dev_walk, &priv);
3107 static int rocker_netdevice_event(struct notifier_block *unused,
3108 unsigned long event, void *ptr)
3110 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3111 struct netdev_notifier_changeupper_info *info;
3112 struct rocker_port *rocker_port;
3115 if (!rocker_port_dev_check(dev))
3119 case NETDEV_CHANGEUPPER:
3123 rocker_port = netdev_priv(dev);
3124 if (info->linking) {
3125 err = rocker_world_port_master_linked(rocker_port,
3128 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
3131 err = rocker_world_port_master_unlinked(rocker_port,
3134 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
3142 static struct notifier_block rocker_netdevice_nb __read_mostly = {
3143 .notifier_call = rocker_netdevice_event,
3146 /************************************
3147 * Net event notifier event handler
3148 ************************************/
3150 static int rocker_netevent_event(struct notifier_block *unused,
3151 unsigned long event, void *ptr)
3153 struct rocker_port *rocker_port;
3154 struct net_device *dev;
3155 struct neighbour *n = ptr;
3159 case NETEVENT_NEIGH_UPDATE:
3160 if (n->tbl != &arp_tbl)
3163 if (!rocker_port_dev_check(dev))
3165 rocker_port = netdev_priv(dev);
3166 err = rocker_world_port_neigh_update(rocker_port, n);
3168 netdev_warn(dev, "failed to handle neigh update (err %d)\n",
3176 static struct notifier_block rocker_netevent_nb __read_mostly = {
3177 .notifier_call = rocker_netevent_event,
3180 /***********************
3181 * Module init and exit
3182 ***********************/
3184 static int __init rocker_module_init(void)
3188 register_netdevice_notifier(&rocker_netdevice_nb);
3189 register_netevent_notifier(&rocker_netevent_nb);
3190 err = pci_register_driver(&rocker_pci_driver);
3192 goto err_pci_register_driver;
3195 err_pci_register_driver:
3196 unregister_netevent_notifier(&rocker_netevent_nb);
3197 unregister_netdevice_notifier(&rocker_netdevice_nb);
3201 static void __exit rocker_module_exit(void)
3203 unregister_netevent_notifier(&rocker_netevent_nb);
3204 unregister_netdevice_notifier(&rocker_netdevice_nb);
3205 pci_unregister_driver(&rocker_pci_driver);
3208 module_init(rocker_module_init);
3209 module_exit(rocker_module_exit);
3211 MODULE_LICENSE("GPL v2");
3212 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3213 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
3214 MODULE_DESCRIPTION("Rocker switch device driver");
3215 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);