1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /* Copyright (C) 2018 Microchip Technology Inc. */
4 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/microchipphy.h>
10 #include <linux/net_tstamp.h>
11 #include <linux/phy.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/iopoll.h>
14 #include "lan743x_main.h"
16 static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
18 pci_release_selected_regions(adapter->pdev,
19 pci_select_bars(adapter->pdev,
21 pci_disable_device(adapter->pdev);
24 static int lan743x_pci_init(struct lan743x_adapter *adapter,
27 unsigned long bars = 0;
31 ret = pci_enable_device_mem(pdev);
35 netif_info(adapter, probe, adapter->netdev,
36 "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n",
37 pdev->vendor, pdev->device);
38 bars = pci_select_bars(pdev, IORESOURCE_MEM);
39 if (!test_bit(0, &bars))
42 ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME);
50 pci_disable_device(adapter->pdev);
56 u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset)
58 return ioread32(&adapter->csr.csr_address[offset]);
61 void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data)
63 iowrite32(data, &adapter->csr.csr_address[offset]);
66 #define LAN743X_CSR_READ_OP(offset) lan743x_csr_read(adapter, offset)
68 static int lan743x_csr_light_reset(struct lan743x_adapter *adapter)
72 data = lan743x_csr_read(adapter, HW_CFG);
74 lan743x_csr_write(adapter, HW_CFG, data);
76 return readx_poll_timeout(LAN743X_CSR_READ_OP, HW_CFG, data,
77 !(data & HW_CFG_LRST_), 100000, 10000000);
80 static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter,
81 int offset, u32 bit_mask,
82 int target_value, int usleep_min,
83 int usleep_max, int count)
87 return readx_poll_timeout(LAN743X_CSR_READ_OP, offset, data,
88 target_value == ((data & bit_mask) ? 1 : 0),
89 usleep_max, usleep_min * count);
92 static int lan743x_csr_init(struct lan743x_adapter *adapter)
94 struct lan743x_csr *csr = &adapter->csr;
95 resource_size_t bar_start, bar_length;
98 bar_start = pci_resource_start(adapter->pdev, 0);
99 bar_length = pci_resource_len(adapter->pdev, 0);
100 csr->csr_address = devm_ioremap(&adapter->pdev->dev,
101 bar_start, bar_length);
102 if (!csr->csr_address) {
107 csr->id_rev = lan743x_csr_read(adapter, ID_REV);
108 csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV);
109 netif_info(adapter, probe, adapter->netdev,
110 "ID_REV = 0x%08X, FPGA_REV = %d.%d\n",
111 csr->id_rev, FPGA_REV_GET_MAJOR_(csr->fpga_rev),
112 FPGA_REV_GET_MINOR_(csr->fpga_rev));
113 if (!ID_REV_IS_VALID_CHIP_ID_(csr->id_rev)) {
118 csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
119 switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) {
120 case ID_REV_CHIP_REV_A0_:
121 csr->flags |= LAN743X_CSR_FLAG_IS_A0;
122 csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR;
124 case ID_REV_CHIP_REV_B0_:
125 csr->flags |= LAN743X_CSR_FLAG_IS_B0;
129 result = lan743x_csr_light_reset(adapter);
137 static void lan743x_intr_software_isr(void *context)
139 struct lan743x_adapter *adapter = context;
140 struct lan743x_intr *intr = &adapter->intr;
143 int_sts = lan743x_csr_read(adapter, INT_STS);
144 if (int_sts & INT_BIT_SW_GP_) {
145 lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_);
146 intr->software_isr_flag = 1;
150 static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
152 struct lan743x_tx *tx = context;
153 struct lan743x_adapter *adapter = tx->adapter;
154 bool enable_flag = true;
157 int_en = lan743x_csr_read(adapter, INT_EN_SET);
158 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
159 lan743x_csr_write(adapter, INT_EN_CLR,
160 INT_BIT_DMA_TX_(tx->channel_number));
163 if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) {
164 u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
168 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
169 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
171 dmac_int_sts = ioc_bit;
172 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
173 dmac_int_en = lan743x_csr_read(adapter,
176 dmac_int_en = ioc_bit;
178 dmac_int_en &= ioc_bit;
179 dmac_int_sts &= dmac_int_en;
180 if (dmac_int_sts & ioc_bit) {
181 napi_schedule(&tx->napi);
182 enable_flag = false;/* poll func will enable later */
188 lan743x_csr_write(adapter, INT_EN_SET,
189 INT_BIT_DMA_TX_(tx->channel_number));
192 static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags)
194 struct lan743x_rx *rx = context;
195 struct lan743x_adapter *adapter = rx->adapter;
196 bool enable_flag = true;
198 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
199 lan743x_csr_write(adapter, INT_EN_CLR,
200 INT_BIT_DMA_RX_(rx->channel_number));
203 if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) {
204 u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number);
208 if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ)
209 dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
211 dmac_int_sts = rx_frame_bit;
212 if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK)
213 dmac_int_en = lan743x_csr_read(adapter,
216 dmac_int_en = rx_frame_bit;
218 dmac_int_en &= rx_frame_bit;
219 dmac_int_sts &= dmac_int_en;
220 if (dmac_int_sts & rx_frame_bit) {
221 napi_schedule(&rx->napi);
222 enable_flag = false;/* poll funct will enable later */
228 lan743x_csr_write(adapter, INT_EN_SET,
229 INT_BIT_DMA_RX_(rx->channel_number));
233 static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags)
235 struct lan743x_adapter *adapter = context;
236 unsigned int channel;
238 if (int_sts & INT_BIT_ALL_RX_) {
239 for (channel = 0; channel < LAN743X_USED_RX_CHANNELS;
241 u32 int_bit = INT_BIT_DMA_RX_(channel);
243 if (int_sts & int_bit) {
244 lan743x_rx_isr(&adapter->rx[channel],
250 if (int_sts & INT_BIT_ALL_TX_) {
251 for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
253 u32 int_bit = INT_BIT_DMA_TX_(channel);
255 if (int_sts & int_bit) {
256 lan743x_tx_isr(&adapter->tx[channel],
262 if (int_sts & INT_BIT_ALL_OTHER_) {
263 if (int_sts & INT_BIT_SW_GP_) {
264 lan743x_intr_software_isr(adapter);
265 int_sts &= ~INT_BIT_SW_GP_;
269 lan743x_csr_write(adapter, INT_EN_CLR, int_sts);
272 static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr)
274 struct lan743x_vector *vector = ptr;
275 struct lan743x_adapter *adapter = vector->adapter;
276 irqreturn_t result = IRQ_NONE;
280 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) {
281 int_sts = lan743x_csr_read(adapter, INT_STS);
282 } else if (vector->flags &
283 (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C |
284 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) {
285 int_sts = lan743x_csr_read(adapter, INT_STS_R2C);
287 /* use mask as implied status */
288 int_sts = vector->int_mask | INT_BIT_MAS_;
291 if (!(int_sts & INT_BIT_MAS_))
294 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR)
295 /* disable vector interrupt */
296 lan743x_csr_write(adapter,
298 INT_VEC_EN_(vector->vector_index));
300 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR)
301 /* disable master interrupt */
302 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
304 if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) {
305 int_enables = lan743x_csr_read(adapter, INT_EN_SET);
307 /* use vector mask as implied enable mask */
308 int_enables = vector->int_mask;
311 int_sts &= int_enables;
312 int_sts &= vector->int_mask;
314 if (vector->handler) {
315 vector->handler(vector->context,
316 int_sts, vector->flags);
318 /* disable interrupts on this vector */
319 lan743x_csr_write(adapter, INT_EN_CLR,
322 result = IRQ_HANDLED;
325 if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET)
326 /* enable master interrupt */
327 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
329 if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET)
330 /* enable vector interrupt */
331 lan743x_csr_write(adapter,
333 INT_VEC_EN_(vector->vector_index));
338 static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
340 struct lan743x_intr *intr = &adapter->intr;
341 int result = -ENODEV;
344 intr->software_isr_flag = 0;
346 /* enable interrupt */
347 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
349 /* activate interrupt here */
350 lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
351 while ((timeout > 0) && (!(intr->software_isr_flag))) {
352 usleep_range(1000, 20000);
356 if (intr->software_isr_flag)
359 /* disable interrupts */
360 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
364 static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
365 int vector_index, u32 flags,
367 lan743x_vector_handler handler,
370 struct lan743x_vector *vector = &adapter->intr.vector_list
374 vector->adapter = adapter;
375 vector->flags = flags;
376 vector->vector_index = vector_index;
377 vector->int_mask = int_mask;
378 vector->handler = handler;
379 vector->context = context;
381 ret = request_irq(vector->irq,
382 lan743x_intr_entry_isr,
383 (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ?
384 IRQF_SHARED : 0, DRIVER_NAME, vector);
386 vector->handler = NULL;
387 vector->context = NULL;
388 vector->int_mask = 0;
394 static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter,
397 struct lan743x_vector *vector = &adapter->intr.vector_list
400 free_irq(vector->irq, vector);
401 vector->handler = NULL;
402 vector->context = NULL;
403 vector->int_mask = 0;
407 static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter,
412 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
413 if (adapter->intr.vector_list[index].int_mask & int_mask)
414 return adapter->intr.vector_list[index].flags;
419 static void lan743x_intr_close(struct lan743x_adapter *adapter)
421 struct lan743x_intr *intr = &adapter->intr;
424 lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
425 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
427 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
428 if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
429 lan743x_intr_unregister_isr(adapter, index);
430 intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
434 if (intr->flags & INTR_FLAG_MSI_ENABLED) {
435 pci_disable_msi(adapter->pdev);
436 intr->flags &= ~INTR_FLAG_MSI_ENABLED;
439 if (intr->flags & INTR_FLAG_MSIX_ENABLED) {
440 pci_disable_msix(adapter->pdev);
441 intr->flags &= ~INTR_FLAG_MSIX_ENABLED;
445 static int lan743x_intr_open(struct lan743x_adapter *adapter)
447 struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
448 struct lan743x_intr *intr = &adapter->intr;
449 u32 int_vec_en_auto_clr = 0;
450 u32 int_vec_map0 = 0;
451 u32 int_vec_map1 = 0;
456 intr->number_of_vectors = 0;
458 /* Try to set up MSIX interrupts */
459 memset(&msix_entries[0], 0,
460 sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
461 for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
462 msix_entries[index].entry = index;
463 ret = pci_enable_msix_range(adapter->pdev,
465 1 + LAN743X_USED_TX_CHANNELS +
466 LAN743X_USED_RX_CHANNELS);
469 intr->flags |= INTR_FLAG_MSIX_ENABLED;
470 intr->number_of_vectors = ret;
471 intr->using_vectors = true;
472 for (index = 0; index < intr->number_of_vectors; index++)
473 intr->vector_list[index].irq = msix_entries
475 netif_info(adapter, ifup, adapter->netdev,
476 "using MSIX interrupts, number of vectors = %d\n",
477 intr->number_of_vectors);
480 /* If MSIX failed try to setup using MSI interrupts */
481 if (!intr->number_of_vectors) {
482 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
483 if (!pci_enable_msi(adapter->pdev)) {
484 intr->flags |= INTR_FLAG_MSI_ENABLED;
485 intr->number_of_vectors = 1;
486 intr->using_vectors = true;
487 intr->vector_list[0].irq =
489 netif_info(adapter, ifup, adapter->netdev,
490 "using MSI interrupts, number of vectors = %d\n",
491 intr->number_of_vectors);
496 /* If MSIX, and MSI failed, setup using legacy interrupt */
497 if (!intr->number_of_vectors) {
498 intr->number_of_vectors = 1;
499 intr->using_vectors = false;
500 intr->vector_list[0].irq = intr->irq;
501 netif_info(adapter, ifup, adapter->netdev,
502 "using legacy interrupts\n");
505 /* At this point we must have at least one irq */
506 lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF);
508 /* map all interrupts to vector 0 */
509 lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000);
510 lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000);
511 lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000);
512 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
513 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
514 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
515 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
517 if (intr->using_vectors) {
518 flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
519 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
521 flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR |
522 LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET |
523 LAN743X_VECTOR_FLAG_IRQ_SHARED;
526 if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
527 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ;
528 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C;
529 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR;
530 flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK;
531 flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C;
532 flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
535 ret = lan743x_intr_register_isr(adapter, 0, flags,
536 INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
538 lan743x_intr_shared_isr, adapter);
541 intr->flags |= INTR_FLAG_IRQ_REQUESTED(0);
543 if (intr->using_vectors)
544 lan743x_csr_write(adapter, INT_VEC_EN_SET,
547 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
548 lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD);
549 lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD);
550 lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD);
551 lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD);
552 lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD);
553 lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
554 lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
555 lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
556 lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
557 lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
558 lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
561 /* enable interrupts */
562 lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_);
563 ret = lan743x_intr_test_isr(adapter);
567 if (intr->number_of_vectors > 1) {
568 int number_of_tx_vectors = intr->number_of_vectors - 1;
570 if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
571 number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
572 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
573 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
574 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
575 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
576 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
577 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
579 if (adapter->csr.flags &
580 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
581 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
582 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
583 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
584 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
585 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
588 for (index = 0; index < number_of_tx_vectors; index++) {
589 u32 int_bit = INT_BIT_DMA_TX_(index);
590 int vector = index + 1;
592 /* map TX interrupt to vector */
593 int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
594 lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
596 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
597 int_vec_en_auto_clr |= INT_VEC_EN_(vector);
598 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
599 int_vec_en_auto_clr);
602 /* Remove TX interrupt from shared mask */
603 intr->vector_list[0].int_mask &= ~int_bit;
604 ret = lan743x_intr_register_isr(adapter, vector, flags,
605 int_bit, lan743x_tx_isr,
606 &adapter->tx[index]);
609 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
611 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET))
612 lan743x_csr_write(adapter, INT_VEC_EN_SET,
613 INT_VEC_EN_(vector));
616 if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
617 int number_of_rx_vectors = intr->number_of_vectors -
618 LAN743X_USED_TX_CHANNELS - 1;
620 if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
621 number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
623 flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
624 LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
625 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
626 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR |
627 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR |
628 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET;
630 if (adapter->csr.flags &
631 LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
632 flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
633 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
634 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
635 LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
636 LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
638 for (index = 0; index < number_of_rx_vectors; index++) {
639 int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
640 u32 int_bit = INT_BIT_DMA_RX_(index);
642 /* map RX interrupt to vector */
643 int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector);
644 lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0);
646 LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
647 int_vec_en_auto_clr |= INT_VEC_EN_(vector);
648 lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
649 int_vec_en_auto_clr);
652 /* Remove RX interrupt from shared mask */
653 intr->vector_list[0].int_mask &= ~int_bit;
654 ret = lan743x_intr_register_isr(adapter, vector, flags,
655 int_bit, lan743x_rx_isr,
656 &adapter->rx[index]);
659 intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector);
661 lan743x_csr_write(adapter, INT_VEC_EN_SET,
662 INT_VEC_EN_(vector));
668 lan743x_intr_close(adapter);
672 static int lan743x_dp_write(struct lan743x_adapter *adapter,
673 u32 select, u32 addr, u32 length, u32 *buf)
679 mutex_lock(&adapter->dp_lock);
680 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
683 dp_sel = lan743x_csr_read(adapter, DP_SEL);
684 dp_sel &= ~DP_SEL_MASK_;
686 lan743x_csr_write(adapter, DP_SEL, dp_sel);
688 for (i = 0; i < length; i++) {
689 lan743x_csr_write(adapter, DP_ADDR, addr + i);
690 lan743x_csr_write(adapter, DP_DATA_0, buf[i]);
691 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
692 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
699 mutex_unlock(&adapter->dp_lock);
703 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
707 ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
708 MAC_MII_ACC_PHY_ADDR_MASK_;
709 ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) &
710 MAC_MII_ACC_MIIRINDA_MASK_;
713 ret |= MAC_MII_ACC_MII_READ_;
715 ret |= MAC_MII_ACC_MII_WRITE_;
716 ret |= MAC_MII_ACC_MII_BUSY_;
721 static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
725 return readx_poll_timeout(LAN743X_CSR_READ_OP, MAC_MII_ACC, data,
726 !(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
729 static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
731 struct lan743x_adapter *adapter = bus->priv;
735 /* comfirm MII not busy */
736 ret = lan743x_mac_mii_wait_till_not_busy(adapter);
740 /* set the address, index & direction (read from PHY) */
741 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ);
742 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
743 ret = lan743x_mac_mii_wait_till_not_busy(adapter);
747 val = lan743x_csr_read(adapter, MAC_MII_DATA);
748 return (int)(val & 0xFFFF);
751 static int lan743x_mdiobus_write(struct mii_bus *bus,
752 int phy_id, int index, u16 regval)
754 struct lan743x_adapter *adapter = bus->priv;
758 /* confirm MII not busy */
759 ret = lan743x_mac_mii_wait_till_not_busy(adapter);
763 lan743x_csr_write(adapter, MAC_MII_DATA, val);
765 /* set the address, index & direction (write to PHY) */
766 mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE);
767 lan743x_csr_write(adapter, MAC_MII_ACC, mii_access);
768 ret = lan743x_mac_mii_wait_till_not_busy(adapter);
772 static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
775 u32 addr_lo, addr_hi;
783 lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo);
784 lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi);
786 ether_addr_copy(adapter->mac_address, addr);
787 netif_info(adapter, drv, adapter->netdev,
788 "MAC address set to %pM\n", addr);
791 static int lan743x_mac_init(struct lan743x_adapter *adapter)
793 bool mac_address_valid = true;
794 struct net_device *netdev;
800 netdev = adapter->netdev;
801 lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
802 ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
803 0, 1000, 20000, 100);
807 /* setup auto duplex, and speed detection */
808 data = lan743x_csr_read(adapter, MAC_CR);
809 data |= MAC_CR_ADD_ | MAC_CR_ASD_;
810 data |= MAC_CR_CNTR_RST_;
811 lan743x_csr_write(adapter, MAC_CR, data);
813 mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
814 mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
815 adapter->mac_address[0] = mac_addr_lo & 0xFF;
816 adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
817 adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
818 adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
819 adapter->mac_address[4] = mac_addr_hi & 0xFF;
820 adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
822 if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
823 mac_addr_lo == 0xFFFFFFFF) {
824 mac_address_valid = false;
825 } else if (!is_valid_ether_addr(adapter->mac_address)) {
826 mac_address_valid = false;
829 if (!mac_address_valid)
830 random_ether_addr(adapter->mac_address);
831 lan743x_mac_set_address(adapter, adapter->mac_address);
832 ether_addr_copy(netdev->dev_addr, adapter->mac_address);
836 static int lan743x_mac_open(struct lan743x_adapter *adapter)
841 temp = lan743x_csr_read(adapter, MAC_RX);
842 lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
843 temp = lan743x_csr_read(adapter, MAC_TX);
844 lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
848 static void lan743x_mac_close(struct lan743x_adapter *adapter)
852 temp = lan743x_csr_read(adapter, MAC_TX);
853 temp &= ~MAC_TX_TXEN_;
854 lan743x_csr_write(adapter, MAC_TX, temp);
855 lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_,
856 1, 1000, 20000, 100);
858 temp = lan743x_csr_read(adapter, MAC_RX);
859 temp &= ~MAC_RX_RXEN_;
860 lan743x_csr_write(adapter, MAC_RX, temp);
861 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
862 1, 1000, 20000, 100);
865 static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter,
866 bool tx_enable, bool rx_enable)
868 u32 flow_setting = 0;
870 /* set maximum pause time because when fifo space frees
871 * up a zero value pause frame will be sent to release the pause
873 flow_setting = MAC_FLOW_CR_FCPT_MASK_;
875 flow_setting |= MAC_FLOW_CR_TX_FCEN_;
877 flow_setting |= MAC_FLOW_CR_RX_FCEN_;
878 lan743x_csr_write(adapter, MAC_FLOW, flow_setting);
881 static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
886 mac_rx = lan743x_csr_read(adapter, MAC_RX);
887 if (mac_rx & MAC_RX_RXEN_) {
889 if (mac_rx & MAC_RX_RXD_) {
890 lan743x_csr_write(adapter, MAC_RX, mac_rx);
891 mac_rx &= ~MAC_RX_RXD_;
893 mac_rx &= ~MAC_RX_RXEN_;
894 lan743x_csr_write(adapter, MAC_RX, mac_rx);
895 lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_,
896 1, 1000, 20000, 100);
897 lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_);
900 mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
901 mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
902 MAC_RX_MAX_SIZE_MASK_);
903 lan743x_csr_write(adapter, MAC_RX, mac_rx);
906 mac_rx |= MAC_RX_RXEN_;
907 lan743x_csr_write(adapter, MAC_RX, mac_rx);
913 static int lan743x_phy_reset(struct lan743x_adapter *adapter)
917 /* Only called with in probe, and before mdiobus_register */
919 data = lan743x_csr_read(adapter, PMT_CTL);
920 data |= PMT_CTL_ETH_PHY_RST_;
921 lan743x_csr_write(adapter, PMT_CTL, data);
923 return readx_poll_timeout(LAN743X_CSR_READ_OP, PMT_CTL, data,
924 (!(data & PMT_CTL_ETH_PHY_RST_) &&
925 (data & PMT_CTL_READY_)),
929 static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
930 u8 duplex, u16 local_adv,
933 struct lan743x_phy *phy = &adapter->phy;
937 cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv);
939 cap = phy->fc_request_control;
941 lan743x_mac_flow_ctrl_set_enables(adapter,
946 static int lan743x_phy_init(struct lan743x_adapter *adapter)
948 struct net_device *netdev;
951 netdev = adapter->netdev;
952 ret = lan743x_phy_reset(adapter);
959 static void lan743x_phy_link_status_change(struct net_device *netdev)
961 struct lan743x_adapter *adapter = netdev_priv(netdev);
962 struct phy_device *phydev = netdev->phydev;
964 phy_print_status(phydev);
965 if (phydev->state == PHY_RUNNING) {
966 struct ethtool_link_ksettings ksettings;
967 struct lan743x_phy *phy = NULL;
968 int remote_advertisement = 0;
969 int local_advertisement = 0;
972 memset(&ksettings, 0, sizeof(ksettings));
973 phy_ethtool_get_link_ksettings(netdev, &ksettings);
974 local_advertisement = phy_read(phydev, MII_ADVERTISE);
975 if (local_advertisement < 0)
978 remote_advertisement = phy_read(phydev, MII_LPA);
979 if (remote_advertisement < 0)
982 lan743x_phy_update_flowcontrol(adapter,
983 ksettings.base.duplex,
985 remote_advertisement);
989 static void lan743x_phy_close(struct lan743x_adapter *adapter)
991 struct net_device *netdev = adapter->netdev;
993 phy_stop(netdev->phydev);
994 phy_disconnect(netdev->phydev);
995 netdev->phydev = NULL;
998 static int lan743x_phy_open(struct lan743x_adapter *adapter)
1000 struct lan743x_phy *phy = &adapter->phy;
1001 struct phy_device *phydev;
1002 struct net_device *netdev;
1006 netdev = adapter->netdev;
1007 phydev = phy_find_first(adapter->mdiobus);
1011 ret = phy_connect_direct(netdev, phydev,
1012 lan743x_phy_link_status_change,
1013 PHY_INTERFACE_MODE_GMII);
1017 /* MAC doesn't support 1000T Half */
1018 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1020 /* support both flow controls */
1021 phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1022 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1023 mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control);
1024 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1025 phy->fc_autoneg = phydev->autoneg;
1028 phy_start_aneg(phydev);
1035 static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter)
1038 u32 mac_addr_hi = 0;
1039 u32 mac_addr_lo = 0;
1041 /* Add mac address to perfect Filter */
1042 mac_addr = adapter->mac_address;
1043 mac_addr_lo = ((((u32)(mac_addr[0])) << 0) |
1044 (((u32)(mac_addr[1])) << 8) |
1045 (((u32)(mac_addr[2])) << 16) |
1046 (((u32)(mac_addr[3])) << 24));
1047 mac_addr_hi = ((((u32)(mac_addr[4])) << 0) |
1048 (((u32)(mac_addr[5])) << 8));
1050 lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo);
1051 lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0),
1052 mac_addr_hi | RFE_ADDR_FILT_HI_VALID_);
1055 static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter)
1057 struct net_device *netdev = adapter->netdev;
1058 u32 hash_table[DP_SEL_VHF_HASH_LEN];
1062 rfctl = lan743x_csr_read(adapter, RFE_CTL);
1063 rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ |
1064 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1065 rfctl |= RFE_CTL_AB_;
1066 if (netdev->flags & IFF_PROMISC) {
1067 rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_;
1069 if (netdev->flags & IFF_ALLMULTI)
1070 rfctl |= RFE_CTL_AM_;
1073 memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32));
1074 if (netdev_mc_count(netdev)) {
1075 struct netdev_hw_addr *ha;
1078 rfctl |= RFE_CTL_DA_PERFECT_;
1080 netdev_for_each_mc_addr(ha, netdev) {
1081 /* set first 32 into Perfect Filter */
1083 lan743x_csr_write(adapter,
1084 RFE_ADDR_FILT_HI(i), 0);
1086 data = ha->addr[2] | (data << 8);
1087 data = ha->addr[1] | (data << 8);
1088 data = ha->addr[0] | (data << 8);
1089 lan743x_csr_write(adapter,
1090 RFE_ADDR_FILT_LO(i), data);
1092 data = ha->addr[4] | (data << 8);
1093 data |= RFE_ADDR_FILT_HI_VALID_;
1094 lan743x_csr_write(adapter,
1095 RFE_ADDR_FILT_HI(i), data);
1097 u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >>
1099 hash_table[bitnum / 32] |= (1 << (bitnum % 32));
1100 rfctl |= RFE_CTL_MCAST_HASH_;
1106 lan743x_dp_write(adapter, DP_SEL_RFE_RAM,
1107 DP_SEL_VHF_VLAN_LEN,
1108 DP_SEL_VHF_HASH_LEN, hash_table);
1109 lan743x_csr_write(adapter, RFE_CTL, rfctl);
1112 static int lan743x_dmac_init(struct lan743x_adapter *adapter)
1116 lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_);
1117 lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_,
1118 0, 1000, 20000, 100);
1119 switch (DEFAULT_DMA_DESCRIPTOR_SPACING) {
1120 case DMA_DESCRIPTOR_SPACING_16:
1121 data = DMAC_CFG_MAX_DSPACE_16_;
1123 case DMA_DESCRIPTOR_SPACING_32:
1124 data = DMAC_CFG_MAX_DSPACE_32_;
1126 case DMA_DESCRIPTOR_SPACING_64:
1127 data = DMAC_CFG_MAX_DSPACE_64_;
1129 case DMA_DESCRIPTOR_SPACING_128:
1130 data = DMAC_CFG_MAX_DSPACE_128_;
1135 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1136 data |= DMAC_CFG_COAL_EN_;
1137 data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_;
1138 data |= DMAC_CFG_MAX_READ_REQ_SET_(6);
1139 lan743x_csr_write(adapter, DMAC_CFG, data);
1140 data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1);
1141 data |= DMAC_COAL_CFG_TIMER_TX_START_;
1142 data |= DMAC_COAL_CFG_FLUSH_INTS_;
1143 data |= DMAC_COAL_CFG_INT_EXIT_COAL_;
1144 data |= DMAC_COAL_CFG_CSR_EXIT_COAL_;
1145 data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A);
1146 data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C);
1147 lan743x_csr_write(adapter, DMAC_COAL_CFG, data);
1148 data = DMAC_OBFF_TX_THRES_SET_(0x08);
1149 data |= DMAC_OBFF_RX_THRES_SET_(0x0A);
1150 lan743x_csr_write(adapter, DMAC_OBFF_CFG, data);
1154 static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter,
1159 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1160 return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1161 DMAC_CMD_START_T_(tx_channel)),
1163 DMAC_CMD_STOP_T_(tx_channel)));
1166 static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter,
1173 ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) ==
1174 DMAC_CHANNEL_STATE_STOP_PENDING)) {
1175 usleep_range(1000, 20000);
1178 if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1183 static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter,
1188 dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD);
1189 return DMAC_CHANNEL_STATE_SET((dmac_cmd &
1190 DMAC_CMD_START_R_(rx_channel)),
1192 DMAC_CMD_STOP_R_(rx_channel)));
1195 static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter,
1202 ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) ==
1203 DMAC_CHANNEL_STATE_STOP_PENDING)) {
1204 usleep_range(1000, 20000);
1207 if (result == DMAC_CHANNEL_STATE_STOP_PENDING)
1212 static void lan743x_tx_release_desc(struct lan743x_tx *tx,
1213 int descriptor_index, bool cleanup)
1215 struct lan743x_tx_buffer_info *buffer_info = NULL;
1216 struct lan743x_tx_descriptor *descriptor = NULL;
1217 u32 descriptor_type = 0;
1219 descriptor = &tx->ring_cpu_ptr[descriptor_index];
1220 buffer_info = &tx->buffer_info[descriptor_index];
1221 if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
1224 descriptor_type = (descriptor->data0) &
1225 TX_DESC_DATA0_DTYPE_MASK_;
1226 if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
1227 goto clean_up_data_descriptor;
1231 clean_up_data_descriptor:
1232 if (buffer_info->dma_ptr) {
1233 if (buffer_info->flags &
1234 TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) {
1235 dma_unmap_page(&tx->adapter->pdev->dev,
1236 buffer_info->dma_ptr,
1237 buffer_info->buffer_length,
1240 dma_unmap_single(&tx->adapter->pdev->dev,
1241 buffer_info->dma_ptr,
1242 buffer_info->buffer_length,
1245 buffer_info->dma_ptr = 0;
1246 buffer_info->buffer_length = 0;
1248 if (buffer_info->skb) {
1249 dev_kfree_skb(buffer_info->skb);
1250 buffer_info->skb = NULL;
1254 buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE;
1257 memset(buffer_info, 0, sizeof(*buffer_info));
1258 memset(descriptor, 0, sizeof(*descriptor));
1261 static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
1263 return ((++index) % tx->ring_size);
1266 static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
1268 while ((*tx->head_cpu_ptr) != (tx->last_head)) {
1269 lan743x_tx_release_desc(tx, tx->last_head, false);
1270 tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1274 static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx)
1276 u32 original_head = 0;
1278 original_head = tx->last_head;
1280 lan743x_tx_release_desc(tx, tx->last_head, true);
1281 tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
1282 } while (tx->last_head != original_head);
1283 memset(tx->ring_cpu_ptr, 0,
1284 sizeof(*tx->ring_cpu_ptr) * (tx->ring_size));
1285 memset(tx->buffer_info, 0,
1286 sizeof(*tx->buffer_info) * (tx->ring_size));
1289 static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx,
1290 struct sk_buff *skb)
1292 int result = 1; /* 1 for the main skb buffer */
1295 if (skb_is_gso(skb))
1296 result++; /* requires an extension descriptor */
1297 nr_frags = skb_shinfo(skb)->nr_frags;
1298 result += nr_frags; /* 1 for each fragment buffer */
1302 static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
1304 int last_head = tx->last_head;
1305 int last_tail = tx->last_tail;
1307 if (last_tail >= last_head)
1308 return tx->ring_size - last_tail + last_head - 1;
1310 return last_head - last_tail - 1;
1313 static int lan743x_tx_frame_start(struct lan743x_tx *tx,
1314 unsigned char *first_buffer,
1315 unsigned int first_buffer_length,
1316 unsigned int frame_length,
1319 /* called only from within lan743x_tx_xmit_frame.
1320 * assuming tx->ring_lock has already been acquired.
1322 struct lan743x_tx_descriptor *tx_descriptor = NULL;
1323 struct lan743x_tx_buffer_info *buffer_info = NULL;
1324 struct lan743x_adapter *adapter = tx->adapter;
1325 struct device *dev = &adapter->pdev->dev;
1328 tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS;
1329 tx->frame_first = tx->last_tail;
1330 tx->frame_tail = tx->frame_first;
1332 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1333 buffer_info = &tx->buffer_info[tx->frame_tail];
1334 dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length,
1336 if (dma_mapping_error(dev, dma_ptr))
1339 tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
1340 tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
1341 tx_descriptor->data3 = (frame_length << 16) &
1342 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
1344 buffer_info->skb = NULL;
1345 buffer_info->dma_ptr = dma_ptr;
1346 buffer_info->buffer_length = first_buffer_length;
1347 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1349 tx->frame_data0 = (first_buffer_length &
1350 TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1351 TX_DESC_DATA0_DTYPE_DATA_ |
1356 tx->frame_data0 |= TX_DESC_DATA0_ICE_ |
1357 TX_DESC_DATA0_IPE_ |
1360 /* data0 will be programmed in one of other frame assembler functions */
1364 static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
1365 unsigned int frame_length)
1367 /* called only from within lan743x_tx_xmit_frame.
1368 * assuming tx->ring_lock has already been acquired.
1370 struct lan743x_tx_descriptor *tx_descriptor = NULL;
1371 struct lan743x_tx_buffer_info *buffer_info = NULL;
1373 /* wrap up previous descriptor */
1374 tx->frame_data0 |= TX_DESC_DATA0_EXT_;
1375 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1376 tx_descriptor->data0 = tx->frame_data0;
1378 /* move to next descriptor */
1379 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1380 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1381 buffer_info = &tx->buffer_info[tx->frame_tail];
1383 /* add extension descriptor */
1384 tx_descriptor->data1 = 0;
1385 tx_descriptor->data2 = 0;
1386 tx_descriptor->data3 = 0;
1388 buffer_info->skb = NULL;
1389 buffer_info->dma_ptr = 0;
1390 buffer_info->buffer_length = 0;
1391 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1393 tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) |
1394 TX_DESC_DATA0_DTYPE_EXT_ |
1395 TX_DESC_DATA0_EXT_LSO_;
1397 /* data0 will be programmed in one of other frame assembler functions */
1400 static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
1401 const struct skb_frag_struct *fragment,
1402 unsigned int frame_length)
1404 /* called only from within lan743x_tx_xmit_frame
1405 * assuming tx->ring_lock has already been acquired
1407 struct lan743x_tx_descriptor *tx_descriptor = NULL;
1408 struct lan743x_tx_buffer_info *buffer_info = NULL;
1409 struct lan743x_adapter *adapter = tx->adapter;
1410 struct device *dev = &adapter->pdev->dev;
1411 unsigned int fragment_length = 0;
1414 fragment_length = skb_frag_size(fragment);
1415 if (!fragment_length)
1418 /* wrap up previous descriptor */
1419 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1420 tx_descriptor->data0 = tx->frame_data0;
1422 /* move to next descriptor */
1423 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1424 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1425 buffer_info = &tx->buffer_info[tx->frame_tail];
1426 dma_ptr = skb_frag_dma_map(dev, fragment,
1429 if (dma_mapping_error(dev, dma_ptr)) {
1432 /* cleanup all previously setup descriptors */
1433 desc_index = tx->frame_first;
1434 while (desc_index != tx->frame_tail) {
1435 lan743x_tx_release_desc(tx, desc_index, true);
1436 desc_index = lan743x_tx_next_index(tx, desc_index);
1439 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1440 tx->frame_first = 0;
1441 tx->frame_data0 = 0;
1446 tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
1447 tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
1448 tx_descriptor->data3 = (frame_length << 16) &
1449 TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
1451 buffer_info->skb = NULL;
1452 buffer_info->dma_ptr = dma_ptr;
1453 buffer_info->buffer_length = fragment_length;
1454 buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE;
1455 buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT;
1457 tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) |
1458 TX_DESC_DATA0_DTYPE_DATA_ |
1461 /* data0 will be programmed in one of other frame assembler functions */
1465 static void lan743x_tx_frame_end(struct lan743x_tx *tx,
1466 struct sk_buff *skb,
1469 /* called only from within lan743x_tx_xmit_frame
1470 * assuming tx->ring_lock has already been acquired
1472 struct lan743x_tx_descriptor *tx_descriptor = NULL;
1473 struct lan743x_tx_buffer_info *buffer_info = NULL;
1474 struct lan743x_adapter *adapter = tx->adapter;
1475 u32 tx_tail_flags = 0;
1477 /* wrap up previous descriptor */
1478 tx->frame_data0 |= TX_DESC_DATA0_LS_;
1479 tx->frame_data0 |= TX_DESC_DATA0_IOC_;
1481 tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
1482 buffer_info = &tx->buffer_info[tx->frame_tail];
1483 buffer_info->skb = skb;
1485 buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
1487 tx_descriptor->data0 = tx->frame_data0;
1488 tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
1489 tx->last_tail = tx->frame_tail;
1493 if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
1494 tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_;
1495 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)
1496 tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ |
1497 TX_TAIL_SET_TOP_INT_EN_;
1499 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1500 tx_tail_flags | tx->frame_tail);
1501 tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS;
1504 static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
1505 struct sk_buff *skb)
1507 int required_number_of_descriptors = 0;
1508 unsigned int start_frame_length = 0;
1509 unsigned int frame_length = 0;
1510 unsigned int head_length = 0;
1511 unsigned long irq_flags = 0;
1512 bool ignore_sync = false;
1517 required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb);
1519 spin_lock_irqsave(&tx->ring_lock, irq_flags);
1520 if (required_number_of_descriptors >
1521 lan743x_tx_get_avail_desc(tx)) {
1522 if (required_number_of_descriptors > (tx->ring_size - 1)) {
1525 /* save to overflow buffer */
1526 tx->overflow_skb = skb;
1527 netif_stop_queue(tx->adapter->netdev);
1532 /* space available, transmit skb */
1533 head_length = skb_headlen(skb);
1534 frame_length = skb_pagelen(skb);
1535 nr_frags = skb_shinfo(skb)->nr_frags;
1536 start_frame_length = frame_length;
1537 gso = skb_is_gso(skb);
1539 start_frame_length = max(skb_shinfo(skb)->gso_size,
1543 if (lan743x_tx_frame_start(tx,
1544 skb->data, head_length,
1546 skb->ip_summed == CHECKSUM_PARTIAL)) {
1552 lan743x_tx_frame_add_lso(tx, frame_length);
1557 for (j = 0; j < nr_frags; j++) {
1558 const struct skb_frag_struct *frag;
1560 frag = &(skb_shinfo(skb)->frags[j]);
1561 if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
1562 /* upon error no need to call
1563 * lan743x_tx_frame_end
1564 * frame assembler clean up was performed inside
1565 * lan743x_tx_frame_add_fragment
1573 lan743x_tx_frame_end(tx, skb, ignore_sync);
1576 spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1577 return NETDEV_TX_OK;
1580 static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
1582 struct lan743x_tx *tx = container_of(napi, struct lan743x_tx, napi);
1583 struct lan743x_adapter *adapter = tx->adapter;
1584 bool start_transmitter = false;
1585 unsigned long irq_flags = 0;
1589 adapter = tx->adapter;
1590 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
1591 int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
1592 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
1593 lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
1594 spin_lock_irqsave(&tx->ring_lock, irq_flags);
1596 /* clean up tx ring */
1597 lan743x_tx_release_completed_descriptors(tx);
1598 if (netif_queue_stopped(adapter->netdev)) {
1599 if (tx->overflow_skb) {
1600 if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <=
1601 lan743x_tx_get_avail_desc(tx))
1602 start_transmitter = true;
1604 netif_wake_queue(adapter->netdev);
1607 spin_unlock_irqrestore(&tx->ring_lock, irq_flags);
1609 if (start_transmitter) {
1610 /* space is now available, transmit overflow skb */
1611 lan743x_tx_xmit_frame(tx, tx->overflow_skb);
1612 tx->overflow_skb = NULL;
1613 netif_wake_queue(adapter->netdev);
1616 if (!napi_complete_done(napi, weight))
1620 lan743x_csr_write(adapter, INT_EN_SET,
1621 INT_BIT_DMA_TX_(tx->channel_number));
1622 lan743x_csr_read(adapter, INT_STS);
1628 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
1630 if (tx->head_cpu_ptr) {
1631 pci_free_consistent(tx->adapter->pdev,
1632 sizeof(*tx->head_cpu_ptr),
1633 (void *)(tx->head_cpu_ptr),
1635 tx->head_cpu_ptr = NULL;
1636 tx->head_dma_ptr = 0;
1638 kfree(tx->buffer_info);
1639 tx->buffer_info = NULL;
1641 if (tx->ring_cpu_ptr) {
1642 pci_free_consistent(tx->adapter->pdev,
1643 tx->ring_allocation_size,
1646 tx->ring_allocation_size = 0;
1647 tx->ring_cpu_ptr = NULL;
1648 tx->ring_dma_ptr = 0;
1653 static int lan743x_tx_ring_init(struct lan743x_tx *tx)
1655 size_t ring_allocation_size = 0;
1656 void *cpu_ptr = NULL;
1660 tx->ring_size = LAN743X_TX_RING_SIZE;
1661 if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) {
1665 ring_allocation_size = ALIGN(tx->ring_size *
1666 sizeof(struct lan743x_tx_descriptor),
1669 cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
1670 ring_allocation_size, &dma_ptr);
1676 tx->ring_allocation_size = ring_allocation_size;
1677 tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr;
1678 tx->ring_dma_ptr = dma_ptr;
1680 cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL);
1685 tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
1687 cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
1688 sizeof(*tx->head_cpu_ptr), &dma_ptr);
1694 tx->head_cpu_ptr = cpu_ptr;
1695 tx->head_dma_ptr = dma_ptr;
1696 if (tx->head_dma_ptr & 0x3) {
1704 lan743x_tx_ring_cleanup(tx);
1708 static void lan743x_tx_close(struct lan743x_tx *tx)
1710 struct lan743x_adapter *adapter = tx->adapter;
1712 lan743x_csr_write(adapter,
1714 DMAC_CMD_STOP_T_(tx->channel_number));
1715 lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number);
1717 lan743x_csr_write(adapter,
1719 DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1720 lan743x_csr_write(adapter, INT_EN_CLR,
1721 INT_BIT_DMA_TX_(tx->channel_number));
1722 napi_disable(&tx->napi);
1723 netif_napi_del(&tx->napi);
1725 lan743x_csr_write(adapter, FCT_TX_CTL,
1726 FCT_TX_CTL_DIS_(tx->channel_number));
1727 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1728 FCT_TX_CTL_EN_(tx->channel_number),
1729 0, 1000, 20000, 100);
1731 lan743x_tx_release_all_descriptors(tx);
1733 if (tx->overflow_skb) {
1734 dev_kfree_skb(tx->overflow_skb);
1735 tx->overflow_skb = NULL;
1738 lan743x_tx_ring_cleanup(tx);
1741 static int lan743x_tx_open(struct lan743x_tx *tx)
1743 struct lan743x_adapter *adapter = NULL;
1747 adapter = tx->adapter;
1748 ret = lan743x_tx_ring_init(tx);
1752 /* initialize fifo */
1753 lan743x_csr_write(adapter, FCT_TX_CTL,
1754 FCT_TX_CTL_RESET_(tx->channel_number));
1755 lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL,
1756 FCT_TX_CTL_RESET_(tx->channel_number),
1757 0, 1000, 20000, 100);
1760 lan743x_csr_write(adapter, FCT_TX_CTL,
1761 FCT_TX_CTL_EN_(tx->channel_number));
1763 /* reset tx channel */
1764 lan743x_csr_write(adapter, DMAC_CMD,
1765 DMAC_CMD_TX_SWR_(tx->channel_number));
1766 lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
1767 DMAC_CMD_TX_SWR_(tx->channel_number),
1768 0, 1000, 20000, 100);
1770 /* Write TX_BASE_ADDR */
1771 lan743x_csr_write(adapter,
1772 TX_BASE_ADDRH(tx->channel_number),
1773 DMA_ADDR_HIGH32(tx->ring_dma_ptr));
1774 lan743x_csr_write(adapter,
1775 TX_BASE_ADDRL(tx->channel_number),
1776 DMA_ADDR_LOW32(tx->ring_dma_ptr));
1778 /* Write TX_CFG_B */
1779 data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number));
1780 data &= ~TX_CFG_B_TX_RING_LEN_MASK_;
1781 data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_);
1782 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
1783 data |= TX_CFG_B_TDMABL_512_;
1784 lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data);
1786 /* Write TX_CFG_A */
1787 data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_;
1788 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
1789 data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_;
1790 data |= TX_CFG_A_TX_PF_THRES_SET_(0x10);
1791 data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04);
1792 data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07);
1794 lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data);
1796 /* Write TX_HEAD_WRITEBACK_ADDR */
1797 lan743x_csr_write(adapter,
1798 TX_HEAD_WRITEBACK_ADDRH(tx->channel_number),
1799 DMA_ADDR_HIGH32(tx->head_dma_ptr));
1800 lan743x_csr_write(adapter,
1801 TX_HEAD_WRITEBACK_ADDRL(tx->channel_number),
1802 DMA_ADDR_LOW32(tx->head_dma_ptr));
1805 tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number));
1809 lan743x_csr_write(adapter, TX_TAIL(tx->channel_number),
1810 (u32)(tx->last_tail));
1811 tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
1813 (tx->channel_number));
1814 netif_napi_add(adapter->netdev,
1815 &tx->napi, lan743x_tx_napi_poll,
1817 napi_enable(&tx->napi);
1820 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
1821 data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_;
1822 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
1823 data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_;
1824 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
1825 data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_;
1826 if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
1827 data |= TX_CFG_C_TX_INT_EN_R2C_;
1828 lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data);
1830 if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET))
1831 lan743x_csr_write(adapter, INT_EN_SET,
1832 INT_BIT_DMA_TX_(tx->channel_number));
1833 lan743x_csr_write(adapter, DMAC_INT_EN_SET,
1834 DMAC_INT_BIT_TX_IOC_(tx->channel_number));
1836 /* start dmac channel */
1837 lan743x_csr_write(adapter, DMAC_CMD,
1838 DMAC_CMD_START_T_(tx->channel_number));
1842 static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
1844 return ((++index) % rx->ring_size);
1847 static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
1849 struct lan743x_rx_buffer_info *buffer_info;
1850 struct lan743x_rx_descriptor *descriptor;
1853 length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
1854 descriptor = &rx->ring_cpu_ptr[index];
1855 buffer_info = &rx->buffer_info[index];
1856 buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
1858 GFP_ATOMIC | GFP_DMA);
1859 if (!(buffer_info->skb))
1861 buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
1862 buffer_info->skb->data,
1865 if (dma_mapping_error(&rx->adapter->pdev->dev,
1866 buffer_info->dma_ptr)) {
1867 buffer_info->dma_ptr = 0;
1871 buffer_info->buffer_length = length;
1872 descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
1873 descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
1874 descriptor->data3 = 0;
1875 descriptor->data0 = (RX_DESC_DATA0_OWN_ |
1876 (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
1877 skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
1882 static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
1884 struct lan743x_rx_buffer_info *buffer_info;
1885 struct lan743x_rx_descriptor *descriptor;
1887 descriptor = &rx->ring_cpu_ptr[index];
1888 buffer_info = &rx->buffer_info[index];
1890 descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
1891 descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
1892 descriptor->data3 = 0;
1893 descriptor->data0 = (RX_DESC_DATA0_OWN_ |
1894 ((buffer_info->buffer_length) &
1895 RX_DESC_DATA0_BUF_LENGTH_MASK_));
1898 static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
1900 struct lan743x_rx_buffer_info *buffer_info;
1901 struct lan743x_rx_descriptor *descriptor;
1903 descriptor = &rx->ring_cpu_ptr[index];
1904 buffer_info = &rx->buffer_info[index];
1906 memset(descriptor, 0, sizeof(*descriptor));
1908 if (buffer_info->dma_ptr) {
1909 dma_unmap_single(&rx->adapter->pdev->dev,
1910 buffer_info->dma_ptr,
1911 buffer_info->buffer_length,
1913 buffer_info->dma_ptr = 0;
1916 if (buffer_info->skb) {
1917 dev_kfree_skb(buffer_info->skb);
1918 buffer_info->skb = NULL;
1921 memset(buffer_info, 0, sizeof(*buffer_info));
1924 static int lan743x_rx_process_packet(struct lan743x_rx *rx)
1926 struct skb_shared_hwtstamps *hwtstamps = NULL;
1927 int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
1928 struct lan743x_rx_buffer_info *buffer_info;
1929 struct lan743x_rx_descriptor *descriptor;
1930 int current_head_index = -1;
1931 int extension_index = -1;
1932 int first_index = -1;
1933 int last_index = -1;
1935 current_head_index = *rx->head_cpu_ptr;
1936 if (current_head_index < 0 || current_head_index >= rx->ring_size)
1939 if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
1942 if (rx->last_head != current_head_index) {
1943 descriptor = &rx->ring_cpu_ptr[rx->last_head];
1944 if (descriptor->data0 & RX_DESC_DATA0_OWN_)
1947 if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
1950 first_index = rx->last_head;
1951 if (descriptor->data0 & RX_DESC_DATA0_LS_) {
1952 last_index = rx->last_head;
1956 index = lan743x_rx_next_index(rx, first_index);
1957 while (index != current_head_index) {
1958 descriptor = &rx->ring_cpu_ptr[index];
1959 if (descriptor->data0 & RX_DESC_DATA0_OWN_)
1962 if (descriptor->data0 & RX_DESC_DATA0_LS_) {
1966 index = lan743x_rx_next_index(rx, index);
1969 if (last_index >= 0) {
1970 descriptor = &rx->ring_cpu_ptr[last_index];
1971 if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
1972 /* extension is expected to follow */
1973 int index = lan743x_rx_next_index(rx,
1975 if (index != current_head_index) {
1976 descriptor = &rx->ring_cpu_ptr[index];
1977 if (descriptor->data0 &
1978 RX_DESC_DATA0_OWN_) {
1981 if (descriptor->data0 &
1982 RX_DESC_DATA0_EXT_) {
1983 extension_index = index;
1988 /* extension is not yet available */
1989 /* prevent processing of this packet */
1996 if (first_index >= 0 && last_index >= 0) {
1997 int real_last_index = last_index;
1998 struct sk_buff *skb = NULL;
2002 /* packet is available */
2003 if (first_index == last_index) {
2004 /* single buffer packet */
2007 buffer_info = &rx->buffer_info[first_index];
2008 skb = buffer_info->skb;
2009 descriptor = &rx->ring_cpu_ptr[first_index];
2011 /* unmap from dma */
2012 if (buffer_info->dma_ptr) {
2013 dma_unmap_single(&rx->adapter->pdev->dev,
2014 buffer_info->dma_ptr,
2015 buffer_info->buffer_length,
2017 buffer_info->dma_ptr = 0;
2018 buffer_info->buffer_length = 0;
2020 buffer_info->skb = NULL;
2021 packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
2022 (descriptor->data0);
2023 skb_put(skb, packet_length - 4);
2024 skb->protocol = eth_type_trans(skb,
2025 rx->adapter->netdev);
2026 lan743x_rx_allocate_ring_element(rx, first_index);
2028 int index = first_index;
2030 /* multi buffer packet not supported */
2031 /* this should not happen since
2032 * buffers are allocated to be at least jumbo size
2035 /* clean up buffers */
2036 if (first_index <= last_index) {
2037 while ((index >= first_index) &&
2038 (index <= last_index)) {
2039 lan743x_rx_release_ring_element(rx,
2041 lan743x_rx_allocate_ring_element(rx,
2043 index = lan743x_rx_next_index(rx,
2047 while ((index >= first_index) ||
2048 (index <= last_index)) {
2049 lan743x_rx_release_ring_element(rx,
2051 lan743x_rx_allocate_ring_element(rx,
2053 index = lan743x_rx_next_index(rx,
2059 if (extension_index >= 0) {
2060 descriptor = &rx->ring_cpu_ptr[extension_index];
2061 buffer_info = &rx->buffer_info[extension_index];
2063 ts_sec = descriptor->data1;
2064 ts_nsec = (descriptor->data2 &
2065 RX_DESC_DATA2_TS_NS_MASK_);
2066 lan743x_rx_reuse_ring_element(rx, extension_index);
2067 real_last_index = extension_index;
2071 result = RX_PROCESS_RESULT_PACKET_DROPPED;
2075 if (extension_index < 0)
2076 goto pass_packet_to_os;
2077 hwtstamps = skb_hwtstamps(skb);
2079 hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
2082 /* pass packet to OS */
2083 napi_gro_receive(&rx->napi, skb);
2084 result = RX_PROCESS_RESULT_PACKET_RECEIVED;
2087 /* push tail and head forward */
2088 rx->last_tail = real_last_index;
2089 rx->last_head = lan743x_rx_next_index(rx, real_last_index);
2095 static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
2097 struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
2098 struct lan743x_adapter *adapter = rx->adapter;
2099 u32 rx_tail_flags = 0;
2102 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) {
2103 /* clear int status bit before reading packet */
2104 lan743x_csr_write(adapter, DMAC_INT_STS,
2105 DMAC_INT_BIT_RXFRM_(rx->channel_number));
2108 while (count < weight) {
2109 int rx_process_result = -1;
2111 rx_process_result = lan743x_rx_process_packet(rx);
2112 if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
2114 } else if (rx_process_result ==
2115 RX_PROCESS_RESULT_NOTHING_TO_DO) {
2117 } else if (rx_process_result ==
2118 RX_PROCESS_RESULT_PACKET_DROPPED) {
2122 rx->frame_count += count;
2123 if (count == weight)
2126 if (!napi_complete_done(napi, count))
2129 if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
2130 rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
2131 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
2132 rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_;
2134 lan743x_csr_write(adapter, INT_EN_SET,
2135 INT_BIT_DMA_RX_(rx->channel_number));
2138 /* update RX_TAIL */
2139 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2140 rx_tail_flags | rx->last_tail);
2145 static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
2147 if (rx->buffer_info && rx->ring_cpu_ptr) {
2150 for (index = 0; index < rx->ring_size; index++)
2151 lan743x_rx_release_ring_element(rx, index);
2154 if (rx->head_cpu_ptr) {
2155 pci_free_consistent(rx->adapter->pdev,
2156 sizeof(*rx->head_cpu_ptr),
2159 rx->head_cpu_ptr = NULL;
2160 rx->head_dma_ptr = 0;
2163 kfree(rx->buffer_info);
2164 rx->buffer_info = NULL;
2166 if (rx->ring_cpu_ptr) {
2167 pci_free_consistent(rx->adapter->pdev,
2168 rx->ring_allocation_size,
2171 rx->ring_allocation_size = 0;
2172 rx->ring_cpu_ptr = NULL;
2173 rx->ring_dma_ptr = 0;
2180 static int lan743x_rx_ring_init(struct lan743x_rx *rx)
2182 size_t ring_allocation_size = 0;
2183 dma_addr_t dma_ptr = 0;
2184 void *cpu_ptr = NULL;
2188 rx->ring_size = LAN743X_RX_RING_SIZE;
2189 if (rx->ring_size <= 1) {
2193 if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) {
2197 ring_allocation_size = ALIGN(rx->ring_size *
2198 sizeof(struct lan743x_rx_descriptor),
2201 cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
2202 ring_allocation_size, &dma_ptr);
2207 rx->ring_allocation_size = ring_allocation_size;
2208 rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr;
2209 rx->ring_dma_ptr = dma_ptr;
2211 cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info),
2217 rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
2219 cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
2220 sizeof(*rx->head_cpu_ptr), &dma_ptr);
2226 rx->head_cpu_ptr = cpu_ptr;
2227 rx->head_dma_ptr = dma_ptr;
2228 if (rx->head_dma_ptr & 0x3) {
2234 for (index = 0; index < rx->ring_size; index++) {
2235 ret = lan743x_rx_allocate_ring_element(rx, index);
2242 lan743x_rx_ring_cleanup(rx);
2246 static void lan743x_rx_close(struct lan743x_rx *rx)
2248 struct lan743x_adapter *adapter = rx->adapter;
2250 lan743x_csr_write(adapter, FCT_RX_CTL,
2251 FCT_RX_CTL_DIS_(rx->channel_number));
2252 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2253 FCT_RX_CTL_EN_(rx->channel_number),
2254 0, 1000, 20000, 100);
2256 lan743x_csr_write(adapter, DMAC_CMD,
2257 DMAC_CMD_STOP_R_(rx->channel_number));
2258 lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number);
2260 lan743x_csr_write(adapter, DMAC_INT_EN_CLR,
2261 DMAC_INT_BIT_RXFRM_(rx->channel_number));
2262 lan743x_csr_write(adapter, INT_EN_CLR,
2263 INT_BIT_DMA_RX_(rx->channel_number));
2264 napi_disable(&rx->napi);
2266 netif_napi_del(&rx->napi);
2268 lan743x_rx_ring_cleanup(rx);
2271 static int lan743x_rx_open(struct lan743x_rx *rx)
2273 struct lan743x_adapter *adapter = rx->adapter;
2277 rx->frame_count = 0;
2278 ret = lan743x_rx_ring_init(rx);
2282 netif_napi_add(adapter->netdev,
2283 &rx->napi, lan743x_rx_napi_poll,
2286 lan743x_csr_write(adapter, DMAC_CMD,
2287 DMAC_CMD_RX_SWR_(rx->channel_number));
2288 lan743x_csr_wait_for_bit(adapter, DMAC_CMD,
2289 DMAC_CMD_RX_SWR_(rx->channel_number),
2290 0, 1000, 20000, 100);
2292 /* set ring base address */
2293 lan743x_csr_write(adapter,
2294 RX_BASE_ADDRH(rx->channel_number),
2295 DMA_ADDR_HIGH32(rx->ring_dma_ptr));
2296 lan743x_csr_write(adapter,
2297 RX_BASE_ADDRL(rx->channel_number),
2298 DMA_ADDR_LOW32(rx->ring_dma_ptr));
2300 /* set rx write back address */
2301 lan743x_csr_write(adapter,
2302 RX_HEAD_WRITEBACK_ADDRH(rx->channel_number),
2303 DMA_ADDR_HIGH32(rx->head_dma_ptr));
2304 lan743x_csr_write(adapter,
2305 RX_HEAD_WRITEBACK_ADDRL(rx->channel_number),
2306 DMA_ADDR_LOW32(rx->head_dma_ptr));
2307 data = RX_CFG_A_RX_HP_WB_EN_;
2308 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) {
2309 data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ |
2310 RX_CFG_A_RX_WB_THRES_SET_(0x7) |
2311 RX_CFG_A_RX_PF_THRES_SET_(16) |
2312 RX_CFG_A_RX_PF_PRI_THRES_SET_(4));
2316 lan743x_csr_write(adapter,
2317 RX_CFG_A(rx->channel_number), data);
2320 data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number));
2321 data &= ~RX_CFG_B_RX_PAD_MASK_;
2322 if (!RX_HEAD_PADDING)
2323 data |= RX_CFG_B_RX_PAD_0_;
2325 data |= RX_CFG_B_RX_PAD_2_;
2326 data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
2327 data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
2328 data |= RX_CFG_B_TS_ALL_RX_;
2329 if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
2330 data |= RX_CFG_B_RDMABL_512_;
2332 lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data);
2333 rx->vector_flags = lan743x_intr_get_vector_flags(adapter,
2335 (rx->channel_number));
2339 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR)
2340 data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_;
2341 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR)
2342 data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_;
2343 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C)
2344 data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_;
2345 if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)
2346 data |= RX_CFG_C_RX_INT_EN_R2C_;
2347 lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data);
2349 rx->last_tail = ((u32)(rx->ring_size - 1));
2350 lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
2352 rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number));
2353 if (rx->last_head) {
2358 napi_enable(&rx->napi);
2360 lan743x_csr_write(adapter, INT_EN_SET,
2361 INT_BIT_DMA_RX_(rx->channel_number));
2362 lan743x_csr_write(adapter, DMAC_INT_STS,
2363 DMAC_INT_BIT_RXFRM_(rx->channel_number));
2364 lan743x_csr_write(adapter, DMAC_INT_EN_SET,
2365 DMAC_INT_BIT_RXFRM_(rx->channel_number));
2366 lan743x_csr_write(adapter, DMAC_CMD,
2367 DMAC_CMD_START_R_(rx->channel_number));
2369 /* initialize fifo */
2370 lan743x_csr_write(adapter, FCT_RX_CTL,
2371 FCT_RX_CTL_RESET_(rx->channel_number));
2372 lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL,
2373 FCT_RX_CTL_RESET_(rx->channel_number),
2374 0, 1000, 20000, 100);
2375 lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number),
2376 FCT_FLOW_CTL_REQ_EN_ |
2377 FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) |
2378 FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA));
2381 lan743x_csr_write(adapter, FCT_RX_CTL,
2382 FCT_RX_CTL_EN_(rx->channel_number));
2386 netif_napi_del(&rx->napi);
2387 lan743x_rx_ring_cleanup(rx);
2393 static int lan743x_netdev_close(struct net_device *netdev)
2395 struct lan743x_adapter *adapter = netdev_priv(netdev);
2398 lan743x_tx_close(&adapter->tx[0]);
2400 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
2401 lan743x_rx_close(&adapter->rx[index]);
2403 lan743x_phy_close(adapter);
2405 lan743x_mac_close(adapter);
2407 lan743x_intr_close(adapter);
2412 static int lan743x_netdev_open(struct net_device *netdev)
2414 struct lan743x_adapter *adapter = netdev_priv(netdev);
2418 ret = lan743x_intr_open(adapter);
2422 ret = lan743x_mac_open(adapter);
2426 ret = lan743x_phy_open(adapter);
2430 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2431 ret = lan743x_rx_open(&adapter->rx[index]);
2436 ret = lan743x_tx_open(&adapter->tx[0]);
2443 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2444 if (adapter->rx[index].ring_cpu_ptr)
2445 lan743x_rx_close(&adapter->rx[index]);
2447 lan743x_phy_close(adapter);
2450 lan743x_mac_close(adapter);
2453 lan743x_intr_close(adapter);
2456 netif_warn(adapter, ifup, adapter->netdev,
2457 "Error opening LAN743x\n");
2461 static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb,
2462 struct net_device *netdev)
2464 struct lan743x_adapter *adapter = netdev_priv(netdev);
2466 return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
2469 static int lan743x_netdev_ioctl(struct net_device *netdev,
2470 struct ifreq *ifr, int cmd)
2472 if (!netif_running(netdev))
2474 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
2477 static void lan743x_netdev_set_multicast(struct net_device *netdev)
2479 struct lan743x_adapter *adapter = netdev_priv(netdev);
2481 lan743x_rfe_set_multicast(adapter);
2484 static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
2486 struct lan743x_adapter *adapter = netdev_priv(netdev);
2489 ret = lan743x_mac_set_mtu(adapter, new_mtu);
2491 netdev->mtu = new_mtu;
2495 static void lan743x_netdev_get_stats64(struct net_device *netdev,
2496 struct rtnl_link_stats64 *stats)
2498 struct lan743x_adapter *adapter = netdev_priv(netdev);
2500 stats->rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES);
2501 stats->tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES);
2502 stats->rx_bytes = lan743x_csr_read(adapter,
2503 STAT_RX_UNICAST_BYTE_COUNT) +
2504 lan743x_csr_read(adapter,
2505 STAT_RX_BROADCAST_BYTE_COUNT) +
2506 lan743x_csr_read(adapter,
2507 STAT_RX_MULTICAST_BYTE_COUNT);
2508 stats->tx_bytes = lan743x_csr_read(adapter,
2509 STAT_TX_UNICAST_BYTE_COUNT) +
2510 lan743x_csr_read(adapter,
2511 STAT_TX_BROADCAST_BYTE_COUNT) +
2512 lan743x_csr_read(adapter,
2513 STAT_TX_MULTICAST_BYTE_COUNT);
2514 stats->rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) +
2515 lan743x_csr_read(adapter,
2516 STAT_RX_ALIGNMENT_ERRORS) +
2517 lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) +
2518 lan743x_csr_read(adapter,
2519 STAT_RX_UNDERSIZE_FRAME_ERRORS) +
2520 lan743x_csr_read(adapter,
2521 STAT_RX_OVERSIZE_FRAME_ERRORS);
2522 stats->tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) +
2523 lan743x_csr_read(adapter,
2524 STAT_TX_EXCESS_DEFERRAL_ERRORS) +
2525 lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS);
2526 stats->rx_dropped = lan743x_csr_read(adapter,
2527 STAT_RX_DROPPED_FRAMES);
2528 stats->tx_dropped = lan743x_csr_read(adapter,
2529 STAT_TX_EXCESSIVE_COLLISION);
2530 stats->multicast = lan743x_csr_read(adapter,
2531 STAT_RX_MULTICAST_FRAMES) +
2532 lan743x_csr_read(adapter,
2533 STAT_TX_MULTICAST_FRAMES);
2534 stats->collisions = lan743x_csr_read(adapter,
2535 STAT_TX_SINGLE_COLLISIONS) +
2536 lan743x_csr_read(adapter,
2537 STAT_TX_MULTIPLE_COLLISIONS) +
2538 lan743x_csr_read(adapter,
2539 STAT_TX_LATE_COLLISIONS);
2542 static int lan743x_netdev_set_mac_address(struct net_device *netdev,
2545 struct lan743x_adapter *adapter = netdev_priv(netdev);
2546 struct sockaddr *sock_addr = addr;
2549 ret = eth_prepare_mac_addr_change(netdev, sock_addr);
2552 ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
2553 lan743x_mac_set_address(adapter, sock_addr->sa_data);
2554 lan743x_rfe_update_mac_address(adapter);
2558 static const struct net_device_ops lan743x_netdev_ops = {
2559 .ndo_open = lan743x_netdev_open,
2560 .ndo_stop = lan743x_netdev_close,
2561 .ndo_start_xmit = lan743x_netdev_xmit_frame,
2562 .ndo_do_ioctl = lan743x_netdev_ioctl,
2563 .ndo_set_rx_mode = lan743x_netdev_set_multicast,
2564 .ndo_change_mtu = lan743x_netdev_change_mtu,
2565 .ndo_get_stats64 = lan743x_netdev_get_stats64,
2566 .ndo_set_mac_address = lan743x_netdev_set_mac_address,
2569 static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter)
2571 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2574 static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter)
2576 mdiobus_unregister(adapter->mdiobus);
2579 static void lan743x_full_cleanup(struct lan743x_adapter *adapter)
2581 unregister_netdev(adapter->netdev);
2583 lan743x_mdiobus_cleanup(adapter);
2584 lan743x_hardware_cleanup(adapter);
2585 lan743x_pci_cleanup(adapter);
2588 static int lan743x_hardware_init(struct lan743x_adapter *adapter,
2589 struct pci_dev *pdev)
2591 struct lan743x_tx *tx;
2595 adapter->intr.irq = adapter->pdev->irq;
2596 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
2597 mutex_init(&adapter->dp_lock);
2598 ret = lan743x_mac_init(adapter);
2602 ret = lan743x_phy_init(adapter);
2606 lan743x_rfe_update_mac_address(adapter);
2608 ret = lan743x_dmac_init(adapter);
2612 for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
2613 adapter->rx[index].adapter = adapter;
2614 adapter->rx[index].channel_number = index;
2617 tx = &adapter->tx[0];
2618 tx->adapter = adapter;
2619 tx->channel_number = 0;
2620 spin_lock_init(&tx->ring_lock);
2624 static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
2628 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
2629 if (!(adapter->mdiobus)) {
2634 adapter->mdiobus->priv = (void *)adapter;
2635 adapter->mdiobus->read = lan743x_mdiobus_read;
2636 adapter->mdiobus->write = lan743x_mdiobus_write;
2637 adapter->mdiobus->name = "lan743x-mdiobus";
2638 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
2639 "pci-%s", pci_name(adapter->pdev));
2641 /* set to internal PHY id */
2642 adapter->mdiobus->phy_mask = ~(u32)BIT(1);
2644 /* register mdiobus */
2645 ret = mdiobus_register(adapter->mdiobus);
2654 /* lan743x_pcidev_probe - Device Initialization Routine
2655 * @pdev: PCI device information struct
2656 * @id: entry in lan743x_pci_tbl
2658 * Returns 0 on success, negative on failure
2660 * initializes an adapter identified by a pci_dev structure.
2661 * The OS initialization, configuring of the adapter private structure,
2662 * and a hardware reset occur.
2664 static int lan743x_pcidev_probe(struct pci_dev *pdev,
2665 const struct pci_device_id *id)
2667 struct lan743x_adapter *adapter = NULL;
2668 struct net_device *netdev = NULL;
2671 netdev = devm_alloc_etherdev(&pdev->dev,
2672 sizeof(struct lan743x_adapter));
2676 SET_NETDEV_DEV(netdev, &pdev->dev);
2677 pci_set_drvdata(pdev, netdev);
2678 adapter = netdev_priv(netdev);
2679 adapter->netdev = netdev;
2680 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
2681 NETIF_MSG_LINK | NETIF_MSG_IFUP |
2682 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
2683 netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
2685 ret = lan743x_pci_init(adapter, pdev);
2689 ret = lan743x_csr_init(adapter);
2693 ret = lan743x_hardware_init(adapter, pdev);
2697 ret = lan743x_mdiobus_init(adapter);
2699 goto cleanup_hardware;
2701 adapter->netdev->netdev_ops = &lan743x_netdev_ops;
2702 adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2703 adapter->netdev->hw_features = adapter->netdev->features;
2705 /* carrier off reporting is important to ethtool even BEFORE open */
2706 netif_carrier_off(netdev);
2708 ret = register_netdev(adapter->netdev);
2710 goto cleanup_mdiobus;
2714 lan743x_mdiobus_cleanup(adapter);
2717 lan743x_hardware_cleanup(adapter);
2720 lan743x_pci_cleanup(adapter);
2723 pr_warn("Initialization failed\n");
2728 * lan743x_pcidev_remove - Device Removal Routine
2729 * @pdev: PCI device information struct
2731 * this is called by the PCI subsystem to alert the driver
2732 * that it should release a PCI device. This could be caused by a
2733 * Hot-Plug event, or because the driver is going to be removed from
2736 static void lan743x_pcidev_remove(struct pci_dev *pdev)
2738 struct net_device *netdev = pci_get_drvdata(pdev);
2739 struct lan743x_adapter *adapter = netdev_priv(netdev);
2741 lan743x_full_cleanup(adapter);
2744 static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2746 struct net_device *netdev = pci_get_drvdata(pdev);
2747 struct lan743x_adapter *adapter = netdev_priv(netdev);
2750 netif_device_detach(netdev);
2752 /* close netdev when netdev is at running state.
2753 * For instance, it is true when system goes to sleep by pm-suspend
2754 * However, it is false when system goes to sleep by suspend GUI menu
2756 if (netif_running(netdev))
2757 lan743x_netdev_close(netdev);
2760 /* clean up lan743x portion */
2761 lan743x_hardware_cleanup(adapter);
2764 static const struct pci_device_id lan743x_pcidev_tbl[] = {
2765 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
2769 static struct pci_driver lan743x_pcidev_driver = {
2770 .name = DRIVER_NAME,
2771 .id_table = lan743x_pcidev_tbl,
2772 .probe = lan743x_pcidev_probe,
2773 .remove = lan743x_pcidev_remove,
2774 .shutdown = lan743x_pcidev_shutdown,
2777 module_pci_driver(lan743x_pcidev_driver);
2779 MODULE_AUTHOR(DRIVER_AUTHOR);
2780 MODULE_DESCRIPTION(DRIVER_DESC);
2781 MODULE_LICENSE("GPL");