1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
37 struct xgene_enet_raw_desc16 *raw_desc;
40 for (i = 0; i < buf_pool->slots; i++) {
41 raw_desc = &buf_pool->raw_desc16[i];
43 /* Hardware expects descriptor in little endian format */
44 raw_desc->m0 = cpu_to_le64(i |
45 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
50 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
54 struct xgene_enet_raw_desc16 *raw_desc;
55 struct xgene_enet_pdata *pdata;
56 struct net_device *ndev;
59 u32 tail = buf_pool->tail;
60 u32 slots = buf_pool->slots - 1;
64 ndev = buf_pool->ndev;
65 dev = ndev_to_dev(buf_pool->ndev);
66 pdata = netdev_priv(ndev);
67 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
68 len = XGENE_ENET_MAX_MTU;
70 for (i = 0; i < nbuf; i++) {
71 raw_desc = &buf_pool->raw_desc16[tail];
73 skb = netdev_alloc_skb_ip_align(ndev, len);
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
84 buf_pool->rx_skb[tail] = skb;
86 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
87 SET_VAL(BUFDATALEN, bufdatalen) |
89 tail = (tail + 1) & slots;
92 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
93 buf_pool->tail = tail;
98 static u8 xgene_enet_hdr_len(const void *data)
100 const struct ethhdr *eth = data;
102 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
105 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
107 struct device *dev = ndev_to_dev(buf_pool->ndev);
108 struct xgene_enet_raw_desc16 *raw_desc;
112 /* Free up the buffers held by hardware */
113 for (i = 0; i < buf_pool->slots; i++) {
114 if (buf_pool->rx_skb[i]) {
115 dev_kfree_skb_any(buf_pool->rx_skb[i]);
117 raw_desc = &buf_pool->raw_desc16[i];
118 dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
119 dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
125 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127 struct xgene_enet_desc_ring *rx_ring = data;
129 if (napi_schedule_prep(&rx_ring->napi)) {
130 disable_irq_nosync(irq);
131 __napi_schedule(&rx_ring->napi);
137 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
138 struct xgene_enet_raw_desc *raw_desc)
140 struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
144 dma_addr_t *frag_dma_addr;
150 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
151 skb = cp_ring->cp_skb[skb_index];
152 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
154 dev = ndev_to_dev(cp_ring->ndev);
155 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
159 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
160 frag = &skb_shinfo(skb)->frags[i];
161 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
165 if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
166 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
167 spin_lock(&pdata->mss_lock);
168 pdata->mss_refcnt[mss_index]--;
169 spin_unlock(&pdata->mss_lock);
172 /* Checking for error */
173 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
174 if (unlikely(status > 2)) {
175 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
181 dev_kfree_skb_any(skb);
183 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
190 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
192 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
193 bool mss_index_found = false;
197 spin_lock(&pdata->mss_lock);
199 /* Reuse the slot if MSS matches */
200 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
201 if (pdata->mss[i] == mss) {
202 pdata->mss_refcnt[i]++;
204 mss_index_found = true;
208 /* Overwrite the slot with ref_count = 0 */
209 for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
210 if (!pdata->mss_refcnt[i]) {
211 pdata->mss_refcnt[i]++;
212 pdata->mac_ops->set_mss(pdata, mss, i);
215 mss_index_found = true;
219 spin_unlock(&pdata->mss_lock);
221 /* No slots with ref_count = 0 available, return busy */
222 if (!mss_index_found)
228 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
230 struct net_device *ndev = skb->dev;
232 u8 l3hlen = 0, l4hlen = 0;
233 u8 ethhdr, proto = 0, csum_enable = 0;
234 u32 hdr_len, mss = 0;
235 u32 i, len, nr_frags;
238 ethhdr = xgene_enet_hdr_len(skb->data);
240 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
241 unlikely(skb->protocol != htons(ETH_P_8021Q)))
244 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
248 if (unlikely(ip_is_fragment(iph)))
251 if (likely(iph->protocol == IPPROTO_TCP)) {
252 l4hlen = tcp_hdrlen(skb) >> 2;
254 proto = TSO_IPPROTO_TCP;
255 if (ndev->features & NETIF_F_TSO) {
256 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
257 mss = skb_shinfo(skb)->gso_size;
259 if (skb_is_nonlinear(skb)) {
260 len = skb_headlen(skb);
261 nr_frags = skb_shinfo(skb)->nr_frags;
263 for (i = 0; i < 2 && i < nr_frags; i++)
264 len += skb_shinfo(skb)->frags[i].size;
266 /* HW requires header must reside in 3 buffer */
267 if (unlikely(hdr_len > len)) {
268 if (skb_linearize(skb))
273 if (!mss || ((skb->len - hdr_len) <= mss))
276 mss_index = xgene_enet_setup_mss(ndev, mss);
277 if (unlikely(mss_index < 0))
280 *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
282 } else if (iph->protocol == IPPROTO_UDP) {
283 l4hlen = UDP_HDR_SIZE;
287 l3hlen = ip_hdrlen(skb) >> 2;
288 *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
289 SET_VAL(IPHDR, l3hlen) |
290 SET_VAL(ETHHDR, ethhdr) |
291 SET_VAL(EC, csum_enable) |
294 SET_BIT(TYPE_ETH_WORK_MESSAGE);
299 static u16 xgene_enet_encode_len(u16 len)
301 return (len == BUFLEN_16K) ? 0 : len;
304 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
306 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
307 SET_VAL(BUFDATALEN, len));
310 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
314 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
315 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
316 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
321 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
323 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
326 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
329 struct device *dev = ndev_to_dev(tx_ring->ndev);
330 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
331 struct xgene_enet_raw_desc *raw_desc;
332 __le64 *exp_desc = NULL, *exp_bufs = NULL;
333 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
335 u16 tail = tx_ring->tail;
338 u8 ll = 0, nv = 0, idx = 0;
340 u32 size, offset, ell_bytes = 0;
341 u32 i, fidx, nr_frags, count = 1;
344 raw_desc = &tx_ring->raw_desc[tail];
345 tail = (tail + 1) & (tx_ring->slots - 1);
346 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
348 ret = xgene_enet_work_msg(skb, &hopinfo);
352 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
355 len = skb_headlen(skb);
356 hw_len = xgene_enet_encode_len(len);
358 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
359 if (dma_mapping_error(dev, dma_addr)) {
360 netdev_err(tx_ring->ndev, "DMA mapping error\n");
364 /* Hardware expects descriptor in little endian format */
365 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
366 SET_VAL(BUFDATALEN, hw_len) |
369 if (!skb_is_nonlinear(skb))
374 exp_desc = (void *)&tx_ring->raw_desc[tail];
375 tail = (tail + 1) & (tx_ring->slots - 1);
376 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
378 nr_frags = skb_shinfo(skb)->nr_frags;
379 for (i = nr_frags; i < 4 ; i++)
380 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
382 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
384 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
386 frag = &skb_shinfo(skb)->frags[fidx];
387 size = skb_frag_size(frag);
390 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
392 if (dma_mapping_error(dev, pbuf_addr))
395 frag_dma_addr[fidx] = pbuf_addr;
398 if (size > BUFLEN_16K)
402 if (size > BUFLEN_16K) {
410 dma_addr = pbuf_addr + offset;
411 hw_len = xgene_enet_encode_len(len);
417 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
420 if (split || (fidx != nr_frags)) {
421 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
422 xgene_set_addr_len(exp_bufs, idx, dma_addr,
427 xgene_set_addr_len(exp_desc, i, dma_addr,
432 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
439 offset += BUFLEN_16K;
445 dma_addr = dma_map_single(dev, exp_bufs,
446 sizeof(u64) * MAX_EXP_BUFFS,
448 if (dma_mapping_error(dev, dma_addr)) {
449 dev_kfree_skb_any(skb);
452 i = ell_bytes >> LL_BYTES_LSB_LEN;
453 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
454 SET_VAL(LL_BYTES_MSB, i) |
455 SET_VAL(LL_LEN, idx));
456 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
460 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
461 SET_VAL(USERINFO, tx_ring->tail));
462 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
463 pdata->tx_level[tx_ring->cp_ring->index] += count;
464 tx_ring->tail = tail;
469 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
470 struct net_device *ndev)
472 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
473 struct xgene_enet_desc_ring *tx_ring;
474 int index = skb->queue_mapping;
475 u32 tx_level = pdata->tx_level[index];
478 tx_ring = pdata->tx_ring[index];
479 if (tx_level < pdata->txc_level[index])
480 tx_level += ((typeof(pdata->tx_level[index]))~0U);
482 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
483 netif_stop_subqueue(ndev, index);
484 return NETDEV_TX_BUSY;
487 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
490 count = xgene_enet_setup_tx_desc(tx_ring, skb);
492 return NETDEV_TX_BUSY;
495 dev_kfree_skb_any(skb);
499 skb_tx_timestamp(skb);
501 tx_ring->tx_packets++;
502 tx_ring->tx_bytes += skb->len;
504 pdata->ring_ops->wr_cmd(tx_ring, count);
508 static void xgene_enet_skip_csum(struct sk_buff *skb)
510 struct iphdr *iph = ip_hdr(skb);
512 if (!ip_is_fragment(iph) ||
513 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
514 skb->ip_summed = CHECKSUM_UNNECESSARY;
518 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
519 struct xgene_enet_raw_desc *raw_desc)
521 struct net_device *ndev;
523 struct xgene_enet_desc_ring *buf_pool;
524 u32 datalen, skb_index;
529 ndev = rx_ring->ndev;
530 dev = ndev_to_dev(rx_ring->ndev);
531 buf_pool = rx_ring->buf_pool;
533 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
534 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
535 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
536 skb = buf_pool->rx_skb[skb_index];
537 buf_pool->rx_skb[skb_index] = NULL;
539 /* checking for error */
540 status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
541 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
542 if (unlikely(status > 2)) {
543 dev_kfree_skb_any(skb);
544 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
550 /* strip off CRC as HW isn't doing this */
551 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
552 datalen = (datalen & DATALEN_MASK) - 4;
553 prefetch(skb->data - NET_IP_ALIGN);
554 skb_put(skb, datalen);
556 skb_checksum_none_assert(skb);
557 skb->protocol = eth_type_trans(skb, ndev);
558 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
559 skb->protocol == htons(ETH_P_IP))) {
560 xgene_enet_skip_csum(skb);
563 rx_ring->rx_packets++;
564 rx_ring->rx_bytes += datalen;
565 napi_gro_receive(&rx_ring->napi, skb);
567 if (--rx_ring->nbufpool == 0) {
568 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
569 rx_ring->nbufpool = NUM_BUFPOOL;
575 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
577 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
580 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
583 struct net_device *ndev = ring->ndev;
584 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
585 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
586 u16 head = ring->head;
587 u16 slots = ring->slots - 1;
588 int ret, desc_count, count = 0, processed = 0;
592 raw_desc = &ring->raw_desc[head];
594 is_completion = false;
596 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
599 /* read fpqnum field after dataaddr field */
601 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
602 head = (head + 1) & slots;
603 exp_desc = &ring->raw_desc[head];
605 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
606 head = (head - 1) & slots;
613 if (is_rx_desc(raw_desc)) {
614 ret = xgene_enet_rx_frame(ring, raw_desc);
616 ret = xgene_enet_tx_completion(ring, raw_desc);
617 is_completion = true;
619 xgene_enet_mark_desc_slot_empty(raw_desc);
621 xgene_enet_mark_desc_slot_empty(exp_desc);
623 head = (head + 1) & slots;
628 pdata->txc_level[ring->index] += desc_count;
635 pdata->ring_ops->wr_cmd(ring, -count);
638 if (__netif_subqueue_stopped(ndev, ring->index))
639 netif_start_subqueue(ndev, ring->index);
645 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
647 struct xgene_enet_desc_ring *ring;
650 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
651 processed = xgene_enet_process_ring(ring, budget);
653 if (processed != budget) {
655 enable_irq(ring->irq);
661 static void xgene_enet_timeout(struct net_device *ndev)
663 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
664 struct netdev_queue *txq;
667 pdata->mac_ops->reset(pdata);
669 for (i = 0; i < pdata->txq_cnt; i++) {
670 txq = netdev_get_tx_queue(ndev, i);
671 txq->trans_start = jiffies;
672 netif_tx_start_queue(txq);
676 static void xgene_enet_set_irq_name(struct net_device *ndev)
678 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
679 struct xgene_enet_desc_ring *ring;
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 ring = pdata->rx_ring[i];
684 if (!pdata->cq_cnt) {
685 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
688 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
693 for (i = 0; i < pdata->cq_cnt; i++) {
694 ring = pdata->tx_ring[i]->cp_ring;
695 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
700 static int xgene_enet_register_irq(struct net_device *ndev)
702 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
703 struct device *dev = ndev_to_dev(ndev);
704 struct xgene_enet_desc_ring *ring;
707 xgene_enet_set_irq_name(ndev);
708 for (i = 0; i < pdata->rxq_cnt; i++) {
709 ring = pdata->rx_ring[i];
710 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
711 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
712 0, ring->irq_name, ring);
714 netdev_err(ndev, "Failed to request irq %s\n",
719 for (i = 0; i < pdata->cq_cnt; i++) {
720 ring = pdata->tx_ring[i]->cp_ring;
721 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
722 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
723 0, ring->irq_name, ring);
725 netdev_err(ndev, "Failed to request irq %s\n",
733 static void xgene_enet_free_irq(struct net_device *ndev)
735 struct xgene_enet_pdata *pdata;
736 struct xgene_enet_desc_ring *ring;
740 pdata = netdev_priv(ndev);
741 dev = ndev_to_dev(ndev);
743 for (i = 0; i < pdata->rxq_cnt; i++) {
744 ring = pdata->rx_ring[i];
745 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
746 devm_free_irq(dev, ring->irq, ring);
749 for (i = 0; i < pdata->cq_cnt; i++) {
750 ring = pdata->tx_ring[i]->cp_ring;
751 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
752 devm_free_irq(dev, ring->irq, ring);
756 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
758 struct napi_struct *napi;
761 for (i = 0; i < pdata->rxq_cnt; i++) {
762 napi = &pdata->rx_ring[i]->napi;
766 for (i = 0; i < pdata->cq_cnt; i++) {
767 napi = &pdata->tx_ring[i]->cp_ring->napi;
772 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
774 struct napi_struct *napi;
777 for (i = 0; i < pdata->rxq_cnt; i++) {
778 napi = &pdata->rx_ring[i]->napi;
782 for (i = 0; i < pdata->cq_cnt; i++) {
783 napi = &pdata->tx_ring[i]->cp_ring->napi;
788 static int xgene_enet_open(struct net_device *ndev)
790 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
791 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
794 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
798 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
802 xgene_enet_napi_enable(pdata);
803 ret = xgene_enet_register_irq(ndev);
808 phy_start(ndev->phydev);
810 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
811 netif_carrier_off(ndev);
814 mac_ops->tx_enable(pdata);
815 mac_ops->rx_enable(pdata);
816 netif_tx_start_all_queues(ndev);
821 static int xgene_enet_close(struct net_device *ndev)
823 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
824 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
827 netif_tx_stop_all_queues(ndev);
828 mac_ops->tx_disable(pdata);
829 mac_ops->rx_disable(pdata);
832 phy_stop(ndev->phydev);
834 cancel_delayed_work_sync(&pdata->link_work);
836 xgene_enet_free_irq(ndev);
837 xgene_enet_napi_disable(pdata);
838 for (i = 0; i < pdata->rxq_cnt; i++)
839 xgene_enet_process_ring(pdata->rx_ring[i], -1);
843 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
845 struct xgene_enet_pdata *pdata;
848 pdata = netdev_priv(ring->ndev);
849 dev = ndev_to_dev(ring->ndev);
851 pdata->ring_ops->clear(ring);
852 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
855 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
857 struct xgene_enet_desc_ring *buf_pool;
858 struct xgene_enet_desc_ring *ring;
861 for (i = 0; i < pdata->txq_cnt; i++) {
862 ring = pdata->tx_ring[i];
864 xgene_enet_delete_ring(ring);
865 pdata->port_ops->clear(pdata, ring);
867 xgene_enet_delete_ring(ring->cp_ring);
868 pdata->tx_ring[i] = NULL;
872 for (i = 0; i < pdata->rxq_cnt; i++) {
873 ring = pdata->rx_ring[i];
875 buf_pool = ring->buf_pool;
876 xgene_enet_delete_bufpool(buf_pool);
877 xgene_enet_delete_ring(buf_pool);
878 pdata->port_ops->clear(pdata, buf_pool);
879 xgene_enet_delete_ring(ring);
880 pdata->rx_ring[i] = NULL;
885 static int xgene_enet_get_ring_size(struct device *dev,
886 enum xgene_enet_ring_cfgsize cfgsize)
891 case RING_CFGSIZE_512B:
894 case RING_CFGSIZE_2KB:
897 case RING_CFGSIZE_16KB:
900 case RING_CFGSIZE_64KB:
903 case RING_CFGSIZE_512KB:
907 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
914 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
916 struct xgene_enet_pdata *pdata;
922 dev = ndev_to_dev(ring->ndev);
923 pdata = netdev_priv(ring->ndev);
925 if (ring->desc_addr) {
926 pdata->ring_ops->clear(ring);
927 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
929 devm_kfree(dev, ring);
932 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
934 struct device *dev = &pdata->pdev->dev;
935 struct xgene_enet_desc_ring *ring;
938 for (i = 0; i < pdata->txq_cnt; i++) {
939 ring = pdata->tx_ring[i];
941 if (ring->cp_ring && ring->cp_ring->cp_skb)
942 devm_kfree(dev, ring->cp_ring->cp_skb);
943 if (ring->cp_ring && pdata->cq_cnt)
944 xgene_enet_free_desc_ring(ring->cp_ring);
945 xgene_enet_free_desc_ring(ring);
949 for (i = 0; i < pdata->rxq_cnt; i++) {
950 ring = pdata->rx_ring[i];
952 if (ring->buf_pool) {
953 if (ring->buf_pool->rx_skb)
954 devm_kfree(dev, ring->buf_pool->rx_skb);
955 xgene_enet_free_desc_ring(ring->buf_pool);
957 xgene_enet_free_desc_ring(ring);
962 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
963 struct xgene_enet_desc_ring *ring)
965 if ((pdata->enet_id == XGENE_ENET2) &&
966 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
973 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
974 struct xgene_enet_desc_ring *ring)
976 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
978 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
981 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
982 struct net_device *ndev, u32 ring_num,
983 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
985 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
986 struct device *dev = ndev_to_dev(ndev);
987 struct xgene_enet_desc_ring *ring;
991 size = xgene_enet_get_ring_size(dev, cfgsize);
995 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
1001 ring->num = ring_num;
1002 ring->cfgsize = cfgsize;
1005 ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1006 GFP_KERNEL | __GFP_ZERO);
1007 if (!ring->desc_addr) {
1008 devm_kfree(dev, ring);
1013 if (is_irq_mbox_required(pdata, ring)) {
1014 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1015 &ring->irq_mbox_dma,
1016 GFP_KERNEL | __GFP_ZERO);
1017 if (!irq_mbox_addr) {
1018 dmam_free_coherent(dev, size, ring->desc_addr,
1020 devm_kfree(dev, ring);
1023 ring->irq_mbox_addr = irq_mbox_addr;
1026 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1027 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1028 ring = pdata->ring_ops->setup(ring);
1029 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
1030 ring->num, ring->size, ring->id, ring->slots);
1035 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1037 return (owner << 6) | (bufnum & GENMASK(5, 0));
1040 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1042 enum xgene_ring_owner owner;
1044 if (p->enet_id == XGENE_ENET1) {
1045 switch (p->phy_mode) {
1046 case PHY_INTERFACE_MODE_SGMII:
1047 owner = RING_OWNER_ETH0;
1050 owner = (!p->port_id) ? RING_OWNER_ETH0 :
1055 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1061 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1063 struct device *dev = &pdata->pdev->dev;
1067 ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1069 return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1072 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1074 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1075 struct device *dev = ndev_to_dev(ndev);
1076 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1077 struct xgene_enet_desc_ring *buf_pool = NULL;
1078 enum xgene_ring_owner owner;
1079 dma_addr_t dma_exp_bufs;
1081 u8 eth_bufnum = pdata->eth_bufnum;
1082 u8 bp_bufnum = pdata->bp_bufnum;
1083 u16 ring_num = pdata->ring_num;
1088 cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1090 for (i = 0; i < pdata->rxq_cnt; i++) {
1091 /* allocate rx descriptor ring */
1092 owner = xgene_derive_ring_owner(pdata);
1093 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1094 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1102 /* allocate buffer pool for receiving packets */
1103 owner = xgene_derive_ring_owner(pdata);
1104 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1105 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1113 rx_ring->nbufpool = NUM_BUFPOOL;
1114 rx_ring->buf_pool = buf_pool;
1115 rx_ring->irq = pdata->irqs[i];
1116 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1117 sizeof(struct sk_buff *),
1119 if (!buf_pool->rx_skb) {
1124 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1125 rx_ring->buf_pool = buf_pool;
1126 pdata->rx_ring[i] = rx_ring;
1129 for (i = 0; i < pdata->txq_cnt; i++) {
1130 /* allocate tx descriptor ring */
1131 owner = xgene_derive_ring_owner(pdata);
1132 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1133 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1141 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1142 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1143 GFP_KERNEL | __GFP_ZERO);
1148 tx_ring->exp_bufs = exp_bufs;
1150 pdata->tx_ring[i] = tx_ring;
1152 if (!pdata->cq_cnt) {
1153 cp_ring = pdata->rx_ring[i];
1155 /* allocate tx completion descriptor ring */
1156 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1158 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1166 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1170 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1171 sizeof(struct sk_buff *),
1173 if (!cp_ring->cp_skb) {
1178 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1179 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1181 if (!cp_ring->frag_dma_addr) {
1182 devm_kfree(dev, cp_ring->cp_skb);
1187 tx_ring->cp_ring = cp_ring;
1188 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1191 if (pdata->ring_ops->coalesce)
1192 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1193 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1198 xgene_enet_free_desc_rings(pdata);
1202 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1203 struct net_device *ndev,
1204 struct rtnl_link_stats64 *storage)
1206 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1207 struct rtnl_link_stats64 *stats = &pdata->stats;
1208 struct xgene_enet_desc_ring *ring;
1211 memset(stats, 0, sizeof(struct rtnl_link_stats64));
1212 for (i = 0; i < pdata->txq_cnt; i++) {
1213 ring = pdata->tx_ring[i];
1215 stats->tx_packets += ring->tx_packets;
1216 stats->tx_bytes += ring->tx_bytes;
1220 for (i = 0; i < pdata->rxq_cnt; i++) {
1221 ring = pdata->rx_ring[i];
1223 stats->rx_packets += ring->rx_packets;
1224 stats->rx_bytes += ring->rx_bytes;
1225 stats->rx_errors += ring->rx_length_errors +
1226 ring->rx_crc_errors +
1227 ring->rx_frame_errors +
1228 ring->rx_fifo_errors;
1229 stats->rx_dropped += ring->rx_dropped;
1232 memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1237 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1239 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1242 ret = eth_mac_addr(ndev, addr);
1245 pdata->mac_ops->set_mac_addr(pdata);
1250 static const struct net_device_ops xgene_ndev_ops = {
1251 .ndo_open = xgene_enet_open,
1252 .ndo_stop = xgene_enet_close,
1253 .ndo_start_xmit = xgene_enet_start_xmit,
1254 .ndo_tx_timeout = xgene_enet_timeout,
1255 .ndo_get_stats64 = xgene_enet_get_stats64,
1256 .ndo_set_mac_address = xgene_enet_set_mac_address,
1260 static void xgene_get_port_id_acpi(struct device *dev,
1261 struct xgene_enet_pdata *pdata)
1266 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1267 if (ACPI_FAILURE(status)) {
1270 pdata->port_id = temp;
1277 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1281 of_property_read_u32(dev->of_node, "port-id", &id);
1283 pdata->port_id = id & BIT(0);
1288 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1290 struct device *dev = &pdata->pdev->dev;
1293 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1295 pdata->tx_delay = 4;
1299 if (delay < 0 || delay > 7) {
1300 dev_err(dev, "Invalid tx-delay specified\n");
1304 pdata->tx_delay = delay;
1309 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1311 struct device *dev = &pdata->pdev->dev;
1314 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1316 pdata->rx_delay = 2;
1320 if (delay < 0 || delay > 7) {
1321 dev_err(dev, "Invalid rx-delay specified\n");
1325 pdata->rx_delay = delay;
1330 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1332 struct platform_device *pdev = pdata->pdev;
1333 struct device *dev = &pdev->dev;
1334 int i, ret, max_irqs;
1336 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1338 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1341 max_irqs = XGENE_MAX_ENET_IRQ;
1343 for (i = 0; i < max_irqs; i++) {
1344 ret = platform_get_irq(pdev, i);
1346 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1348 pdata->rxq_cnt = max_irqs / 2;
1349 pdata->txq_cnt = max_irqs / 2;
1350 pdata->cq_cnt = max_irqs / 2;
1353 dev_err(dev, "Unable to get ENET IRQ\n");
1354 ret = ret ? : -ENXIO;
1357 pdata->irqs[i] = ret;
1363 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1367 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1370 if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1373 ret = xgene_enet_phy_connect(pdata->ndev);
1375 pdata->mdio_driver = true;
1380 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1382 struct device *dev = &pdata->pdev->dev;
1384 pdata->sfp_gpio_en = false;
1385 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1386 (!device_property_present(dev, "sfp-gpios") &&
1387 !device_property_present(dev, "rxlos-gpios")))
1390 pdata->sfp_gpio_en = true;
1391 pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1392 if (IS_ERR(pdata->sfp_rdy))
1393 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1396 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1398 struct platform_device *pdev;
1399 struct net_device *ndev;
1401 struct resource *res;
1402 void __iomem *base_addr;
1410 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1412 dev_err(dev, "Resource enet_csr not defined\n");
1415 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1416 if (!pdata->base_addr) {
1417 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1421 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1423 dev_err(dev, "Resource ring_csr not defined\n");
1426 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1427 resource_size(res));
1428 if (!pdata->ring_csr_addr) {
1429 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1433 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1435 dev_err(dev, "Resource ring_cmd not defined\n");
1438 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1439 resource_size(res));
1440 if (!pdata->ring_cmd_addr) {
1441 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1446 xgene_get_port_id_dt(dev, pdata);
1449 xgene_get_port_id_acpi(dev, pdata);
1452 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1453 eth_hw_addr_random(ndev);
1455 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1457 pdata->phy_mode = device_get_phy_mode(dev);
1458 if (pdata->phy_mode < 0) {
1459 dev_err(dev, "Unable to get phy-connection-type\n");
1460 return pdata->phy_mode;
1462 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1463 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1464 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1465 dev_err(dev, "Incorrect phy-connection-type specified\n");
1469 ret = xgene_get_tx_delay(pdata);
1473 ret = xgene_get_rx_delay(pdata);
1477 ret = xgene_enet_get_irqs(pdata);
1481 ret = xgene_enet_check_phy_handle(pdata);
1485 xgene_enet_gpiod_get(pdata);
1487 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1488 if (IS_ERR(pdata->clk)) {
1489 /* Firmware may have set up the clock already. */
1490 dev_info(dev, "clocks have been setup already\n");
1493 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1494 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1496 base_addr = pdata->base_addr;
1497 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1498 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1499 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1500 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1501 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1502 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1503 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1504 offset = (pdata->enet_id == XGENE_ENET1) ?
1505 BLOCK_ETH_MAC_CSR_OFFSET :
1506 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1507 pdata->mcx_mac_csr_addr = base_addr + offset;
1509 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1510 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1511 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1513 pdata->rx_buff_cnt = NUM_PKT_BUF;
1518 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1520 struct xgene_enet_cle *enet_cle = &pdata->cle;
1521 struct net_device *ndev = pdata->ndev;
1522 struct xgene_enet_desc_ring *buf_pool;
1526 ret = pdata->port_ops->reset(pdata);
1530 ret = xgene_enet_create_desc_rings(ndev);
1532 netdev_err(ndev, "Error in ring configuration\n");
1536 /* setup buffer pool */
1537 for (i = 0; i < pdata->rxq_cnt; i++) {
1538 buf_pool = pdata->rx_ring[i]->buf_pool;
1539 xgene_enet_init_bufpool(buf_pool);
1540 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1545 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1546 buf_pool = pdata->rx_ring[0]->buf_pool;
1547 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1548 /* Initialize and Enable PreClassifier Tree */
1549 enet_cle->max_nodes = 512;
1550 enet_cle->max_dbptrs = 1024;
1551 enet_cle->parsers = 3;
1552 enet_cle->active_parser = PARSER_ALL;
1553 enet_cle->ptree.start_node = 0;
1554 enet_cle->ptree.start_dbptr = 0;
1555 enet_cle->jump_bytes = 8;
1556 ret = pdata->cle_ops->cle_init(pdata);
1558 netdev_err(ndev, "Preclass Tree init error\n");
1562 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1565 pdata->phy_speed = SPEED_UNKNOWN;
1566 pdata->mac_ops->init(pdata);
1571 xgene_enet_delete_desc_rings(pdata);
1575 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1577 switch (pdata->phy_mode) {
1578 case PHY_INTERFACE_MODE_RGMII:
1579 pdata->mac_ops = &xgene_gmac_ops;
1580 pdata->port_ops = &xgene_gport_ops;
1586 case PHY_INTERFACE_MODE_SGMII:
1587 pdata->mac_ops = &xgene_sgmac_ops;
1588 pdata->port_ops = &xgene_sgport_ops;
1595 pdata->mac_ops = &xgene_xgmac_ops;
1596 pdata->port_ops = &xgene_xgport_ops;
1597 pdata->cle_ops = &xgene_cle3in_ops;
1599 if (!pdata->rxq_cnt) {
1600 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1601 pdata->txq_cnt = XGENE_NUM_TX_RING;
1602 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1607 if (pdata->enet_id == XGENE_ENET1) {
1608 switch (pdata->port_id) {
1610 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1611 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1612 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1613 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1614 pdata->ring_num = START_RING_NUM_0;
1616 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1617 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1618 pdata->bp_bufnum = START_BP_BUFNUM_0;
1619 pdata->ring_num = START_RING_NUM_0;
1623 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1624 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1625 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1626 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1627 pdata->ring_num = XG_START_RING_NUM_1;
1629 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1630 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1631 pdata->bp_bufnum = START_BP_BUFNUM_1;
1632 pdata->ring_num = START_RING_NUM_1;
1638 pdata->ring_ops = &xgene_ring1_ops;
1640 switch (pdata->port_id) {
1642 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1643 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1644 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1645 pdata->ring_num = X2_START_RING_NUM_0;
1648 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1649 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1650 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1651 pdata->ring_num = X2_START_RING_NUM_1;
1657 pdata->ring_ops = &xgene_ring2_ops;
1661 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1663 struct napi_struct *napi;
1666 for (i = 0; i < pdata->rxq_cnt; i++) {
1667 napi = &pdata->rx_ring[i]->napi;
1668 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1672 for (i = 0; i < pdata->cq_cnt; i++) {
1673 napi = &pdata->tx_ring[i]->cp_ring->napi;
1674 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1679 static int xgene_enet_probe(struct platform_device *pdev)
1681 struct net_device *ndev;
1682 struct xgene_enet_pdata *pdata;
1683 struct device *dev = &pdev->dev;
1684 void (*link_state)(struct work_struct *);
1685 const struct of_device_id *of_id;
1688 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1689 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1693 pdata = netdev_priv(ndev);
1697 SET_NETDEV_DEV(ndev, dev);
1698 platform_set_drvdata(pdev, pdata);
1699 ndev->netdev_ops = &xgene_ndev_ops;
1700 xgene_enet_set_ethtool_ops(ndev);
1701 ndev->features |= NETIF_F_IP_CSUM |
1706 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1708 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1712 const struct acpi_device_id *acpi_id;
1714 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1716 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1719 if (!pdata->enet_id) {
1724 ret = xgene_enet_get_resources(pdata);
1728 xgene_enet_setup_ops(pdata);
1730 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1731 ndev->features |= NETIF_F_TSO;
1732 spin_lock_init(&pdata->mss_lock);
1734 ndev->hw_features = ndev->features;
1736 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1738 netdev_err(ndev, "No usable DMA configuration\n");
1742 ret = xgene_enet_init_hw(pdata);
1746 link_state = pdata->mac_ops->link_state;
1747 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1748 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1749 } else if (!pdata->mdio_driver) {
1750 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1751 ret = xgene_enet_mdio_config(pdata);
1753 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1759 xgene_enet_napi_add(pdata);
1760 ret = register_netdev(ndev);
1762 netdev_err(ndev, "Failed to register netdev\n");
1770 * If necessary, free_netdev() will call netif_napi_del() and undo
1771 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1774 if (pdata->mdio_driver)
1775 xgene_enet_phy_disconnect(pdata);
1776 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1777 xgene_enet_mdio_remove(pdata);
1779 xgene_enet_delete_desc_rings(pdata);
1785 static int xgene_enet_remove(struct platform_device *pdev)
1787 struct xgene_enet_pdata *pdata;
1788 struct net_device *ndev;
1790 pdata = platform_get_drvdata(pdev);
1794 if (netif_running(ndev))
1798 if (pdata->mdio_driver)
1799 xgene_enet_phy_disconnect(pdata);
1800 else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1801 xgene_enet_mdio_remove(pdata);
1803 unregister_netdev(ndev);
1804 pdata->port_ops->shutdown(pdata);
1805 xgene_enet_delete_desc_rings(pdata);
1811 static void xgene_enet_shutdown(struct platform_device *pdev)
1813 struct xgene_enet_pdata *pdata;
1815 pdata = platform_get_drvdata(pdev);
1822 xgene_enet_remove(pdev);
1826 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1827 { "APMC0D05", XGENE_ENET1},
1828 { "APMC0D30", XGENE_ENET1},
1829 { "APMC0D31", XGENE_ENET1},
1830 { "APMC0D3F", XGENE_ENET1},
1831 { "APMC0D26", XGENE_ENET2},
1832 { "APMC0D25", XGENE_ENET2},
1835 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1839 static const struct of_device_id xgene_enet_of_match[] = {
1840 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1841 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1842 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1843 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1844 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1848 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1851 static struct platform_driver xgene_enet_driver = {
1853 .name = "xgene-enet",
1854 .of_match_table = of_match_ptr(xgene_enet_of_match),
1855 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1857 .probe = xgene_enet_probe,
1858 .remove = xgene_enet_remove,
1859 .shutdown = xgene_enet_shutdown,
1862 module_platform_driver(xgene_enet_driver);
1864 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1865 MODULE_VERSION(XGENE_DRV_VERSION);
1866 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1867 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1868 MODULE_LICENSE("GPL");