1 /* Synopsys DesignWare Core Enterprise Ethernet (XLGMAC) Driver
3 * Copyright (c) 2017 Synopsys, Inc. (www.synopsys.com)
5 * This program is dual-licensed; you may select either version 2 of
6 * the GNU General Public License ("GPL") or BSD license ("BSD").
8 * This Synopsys DWC XLGMAC software driver and associated documentation
9 * (hereinafter the "Software") is an unsupported proprietary work of
10 * Synopsys, Inc. unless otherwise expressly agreed to in writing between
11 * Synopsys and you. The Software IS NOT an item of Licensed Software or a
12 * Licensed Product under any End User Software License Agreement or
13 * Agreement for Licensed Products with Synopsys or any supplement thereto.
14 * Synopsys is a registered trademark of Synopsys, Inc. Other names included
15 * in the SOFTWARE may be the trademarks of their respective owners.
18 #include "dwc-xlgmac.h"
19 #include "dwc-xlgmac-reg.h"
21 static void xlgmac_unmap_desc_data(struct xlgmac_pdata *pdata,
22 struct xlgmac_desc_data *desc_data)
24 if (desc_data->skb_dma) {
25 if (desc_data->mapped_as_page) {
26 dma_unmap_page(pdata->dev, desc_data->skb_dma,
27 desc_data->skb_dma_len, DMA_TO_DEVICE);
29 dma_unmap_single(pdata->dev, desc_data->skb_dma,
30 desc_data->skb_dma_len, DMA_TO_DEVICE);
32 desc_data->skb_dma = 0;
33 desc_data->skb_dma_len = 0;
37 dev_kfree_skb_any(desc_data->skb);
38 desc_data->skb = NULL;
41 if (desc_data->rx.hdr.pa.pages)
42 put_page(desc_data->rx.hdr.pa.pages);
44 if (desc_data->rx.hdr.pa_unmap.pages) {
45 dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma,
46 desc_data->rx.hdr.pa_unmap.pages_len,
48 put_page(desc_data->rx.hdr.pa_unmap.pages);
51 if (desc_data->rx.buf.pa.pages)
52 put_page(desc_data->rx.buf.pa.pages);
54 if (desc_data->rx.buf.pa_unmap.pages) {
55 dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma,
56 desc_data->rx.buf.pa_unmap.pages_len,
58 put_page(desc_data->rx.buf.pa_unmap.pages);
61 memset(&desc_data->tx, 0, sizeof(desc_data->tx));
62 memset(&desc_data->rx, 0, sizeof(desc_data->rx));
64 desc_data->mapped_as_page = 0;
66 if (desc_data->state_saved) {
67 desc_data->state_saved = 0;
68 desc_data->state.skb = NULL;
69 desc_data->state.len = 0;
70 desc_data->state.error = 0;
74 static void xlgmac_free_ring(struct xlgmac_pdata *pdata,
75 struct xlgmac_ring *ring)
77 struct xlgmac_desc_data *desc_data;
83 if (ring->desc_data_head) {
84 for (i = 0; i < ring->dma_desc_count; i++) {
85 desc_data = XLGMAC_GET_DESC_DATA(ring, i);
86 xlgmac_unmap_desc_data(pdata, desc_data);
89 kfree(ring->desc_data_head);
90 ring->desc_data_head = NULL;
93 if (ring->rx_hdr_pa.pages) {
94 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
95 ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
96 put_page(ring->rx_hdr_pa.pages);
98 ring->rx_hdr_pa.pages = NULL;
99 ring->rx_hdr_pa.pages_len = 0;
100 ring->rx_hdr_pa.pages_offset = 0;
101 ring->rx_hdr_pa.pages_dma = 0;
104 if (ring->rx_buf_pa.pages) {
105 dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
106 ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
107 put_page(ring->rx_buf_pa.pages);
109 ring->rx_buf_pa.pages = NULL;
110 ring->rx_buf_pa.pages_len = 0;
111 ring->rx_buf_pa.pages_offset = 0;
112 ring->rx_buf_pa.pages_dma = 0;
115 if (ring->dma_desc_head) {
116 dma_free_coherent(pdata->dev,
117 (sizeof(struct xlgmac_dma_desc) *
118 ring->dma_desc_count),
120 ring->dma_desc_head_addr);
121 ring->dma_desc_head = NULL;
125 static int xlgmac_init_ring(struct xlgmac_pdata *pdata,
126 struct xlgmac_ring *ring,
127 unsigned int dma_desc_count)
133 ring->dma_desc_count = dma_desc_count;
134 ring->dma_desc_head = dma_alloc_coherent(pdata->dev,
135 (sizeof(struct xlgmac_dma_desc) *
137 &ring->dma_desc_head_addr,
139 if (!ring->dma_desc_head)
142 /* Array of descriptor data */
143 ring->desc_data_head = kcalloc(dma_desc_count,
144 sizeof(struct xlgmac_desc_data),
146 if (!ring->desc_data_head)
149 netif_dbg(pdata, drv, pdata->netdev,
150 "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n",
152 &ring->dma_desc_head_addr,
153 ring->desc_data_head);
158 static void xlgmac_free_rings(struct xlgmac_pdata *pdata)
160 struct xlgmac_channel *channel;
163 if (!pdata->channel_head)
166 channel = pdata->channel_head;
167 for (i = 0; i < pdata->channel_count; i++, channel++) {
168 xlgmac_free_ring(pdata, channel->tx_ring);
169 xlgmac_free_ring(pdata, channel->rx_ring);
173 static int xlgmac_alloc_rings(struct xlgmac_pdata *pdata)
175 struct xlgmac_channel *channel;
179 channel = pdata->channel_head;
180 for (i = 0; i < pdata->channel_count; i++, channel++) {
181 netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
184 ret = xlgmac_init_ring(pdata, channel->tx_ring,
185 pdata->tx_desc_count);
188 netdev_alert(pdata->netdev,
189 "error initializing Tx ring");
193 netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
196 ret = xlgmac_init_ring(pdata, channel->rx_ring,
197 pdata->rx_desc_count);
199 netdev_alert(pdata->netdev,
200 "error initializing Rx ring\n");
208 xlgmac_free_rings(pdata);
213 static void xlgmac_free_channels(struct xlgmac_pdata *pdata)
215 if (!pdata->channel_head)
218 kfree(pdata->channel_head->tx_ring);
219 pdata->channel_head->tx_ring = NULL;
221 kfree(pdata->channel_head->rx_ring);
222 pdata->channel_head->rx_ring = NULL;
224 kfree(pdata->channel_head);
226 pdata->channel_head = NULL;
227 pdata->channel_count = 0;
230 static int xlgmac_alloc_channels(struct xlgmac_pdata *pdata)
232 struct xlgmac_channel *channel_head, *channel;
233 struct xlgmac_ring *tx_ring, *rx_ring;
237 channel_head = kcalloc(pdata->channel_count,
238 sizeof(struct xlgmac_channel), GFP_KERNEL);
242 netif_dbg(pdata, drv, pdata->netdev,
243 "channel_head=%p\n", channel_head);
245 tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xlgmac_ring),
250 rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xlgmac_ring),
255 for (i = 0, channel = channel_head; i < pdata->channel_count;
257 snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
258 channel->pdata = pdata;
259 channel->queue_index = i;
260 channel->dma_regs = pdata->mac_regs + DMA_CH_BASE +
263 if (pdata->per_channel_irq) {
264 /* Get the per DMA interrupt */
265 ret = pdata->channel_irq[i];
267 netdev_err(pdata->netdev,
268 "get_irq %u failed\n",
272 channel->dma_irq = ret;
275 if (i < pdata->tx_ring_count)
276 channel->tx_ring = tx_ring++;
278 if (i < pdata->rx_ring_count)
279 channel->rx_ring = rx_ring++;
281 netif_dbg(pdata, drv, pdata->netdev,
282 "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n",
283 channel->name, channel->dma_regs,
284 channel->tx_ring, channel->rx_ring);
287 pdata->channel_head = channel_head;
303 static void xlgmac_free_channels_and_rings(struct xlgmac_pdata *pdata)
305 xlgmac_free_rings(pdata);
307 xlgmac_free_channels(pdata);
310 static int xlgmac_alloc_channels_and_rings(struct xlgmac_pdata *pdata)
314 ret = xlgmac_alloc_channels(pdata);
318 ret = xlgmac_alloc_rings(pdata);
325 xlgmac_free_channels_and_rings(pdata);
330 static int xlgmac_alloc_pages(struct xlgmac_pdata *pdata,
331 struct xlgmac_page_alloc *pa,
332 gfp_t gfp, int order)
334 struct page *pages = NULL;
335 dma_addr_t pages_dma;
337 /* Try to obtain pages, decreasing order if necessary */
338 gfp |= __GFP_COMP | __GFP_NOWARN;
340 pages = alloc_pages(gfp, order);
350 pages_dma = dma_map_page(pdata->dev, pages, 0,
351 PAGE_SIZE << order, DMA_FROM_DEVICE);
352 if (dma_mapping_error(pdata->dev, pages_dma)) {
358 pa->pages_len = PAGE_SIZE << order;
359 pa->pages_offset = 0;
360 pa->pages_dma = pages_dma;
365 static void xlgmac_set_buffer_data(struct xlgmac_buffer_data *bd,
366 struct xlgmac_page_alloc *pa,
372 bd->dma_base = pa->pages_dma;
373 bd->dma_off = pa->pages_offset;
376 pa->pages_offset += len;
377 if ((pa->pages_offset + len) > pa->pages_len) {
378 /* This data descriptor is responsible for unmapping page(s) */
381 /* Get a new allocation next time */
384 pa->pages_offset = 0;
389 static int xlgmac_map_rx_buffer(struct xlgmac_pdata *pdata,
390 struct xlgmac_ring *ring,
391 struct xlgmac_desc_data *desc_data)
395 if (!ring->rx_hdr_pa.pages) {
396 ret = xlgmac_alloc_pages(pdata, &ring->rx_hdr_pa,
402 if (!ring->rx_buf_pa.pages) {
403 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
404 ret = xlgmac_alloc_pages(pdata, &ring->rx_buf_pa,
410 /* Set up the header page info */
411 xlgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa,
412 XLGMAC_SKB_ALLOC_SIZE);
414 /* Set up the buffer page info */
415 xlgmac_set_buffer_data(&desc_data->rx.buf, &ring->rx_buf_pa,
421 static void xlgmac_tx_desc_init(struct xlgmac_pdata *pdata)
423 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
424 struct xlgmac_desc_data *desc_data;
425 struct xlgmac_dma_desc *dma_desc;
426 struct xlgmac_channel *channel;
427 struct xlgmac_ring *ring;
428 dma_addr_t dma_desc_addr;
431 channel = pdata->channel_head;
432 for (i = 0; i < pdata->channel_count; i++, channel++) {
433 ring = channel->tx_ring;
437 dma_desc = ring->dma_desc_head;
438 dma_desc_addr = ring->dma_desc_head_addr;
440 for (j = 0; j < ring->dma_desc_count; j++) {
441 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
443 desc_data->dma_desc = dma_desc;
444 desc_data->dma_desc_addr = dma_desc_addr;
447 dma_desc_addr += sizeof(struct xlgmac_dma_desc);
452 memset(&ring->tx, 0, sizeof(ring->tx));
454 hw_ops->tx_desc_init(channel);
458 static void xlgmac_rx_desc_init(struct xlgmac_pdata *pdata)
460 struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops;
461 struct xlgmac_desc_data *desc_data;
462 struct xlgmac_dma_desc *dma_desc;
463 struct xlgmac_channel *channel;
464 struct xlgmac_ring *ring;
465 dma_addr_t dma_desc_addr;
468 channel = pdata->channel_head;
469 for (i = 0; i < pdata->channel_count; i++, channel++) {
470 ring = channel->rx_ring;
474 dma_desc = ring->dma_desc_head;
475 dma_desc_addr = ring->dma_desc_head_addr;
477 for (j = 0; j < ring->dma_desc_count; j++) {
478 desc_data = XLGMAC_GET_DESC_DATA(ring, j);
480 desc_data->dma_desc = dma_desc;
481 desc_data->dma_desc_addr = dma_desc_addr;
483 if (xlgmac_map_rx_buffer(pdata, ring, desc_data))
487 dma_desc_addr += sizeof(struct xlgmac_dma_desc);
493 hw_ops->rx_desc_init(channel);
497 static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
500 struct xlgmac_pdata *pdata = channel->pdata;
501 struct xlgmac_ring *ring = channel->tx_ring;
502 unsigned int start_index, cur_index;
503 struct xlgmac_desc_data *desc_data;
504 unsigned int offset, datalen, len;
505 struct xlgmac_pkt_info *pkt_info;
507 unsigned int tso, vlan;
512 start_index = ring->cur;
513 cur_index = ring->cur;
515 pkt_info = &ring->pkt_info;
516 pkt_info->desc_count = 0;
517 pkt_info->length = 0;
519 tso = XLGMAC_GET_REG_BITS(pkt_info->attributes,
520 TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS,
521 TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN);
522 vlan = XLGMAC_GET_REG_BITS(pkt_info->attributes,
523 TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS,
524 TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN);
526 /* Save space for a context descriptor if needed */
527 if ((tso && (pkt_info->mss != ring->tx.cur_mss)) ||
528 (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)))
530 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
533 /* Map the TSO header */
534 skb_dma = dma_map_single(pdata->dev, skb->data,
535 pkt_info->header_len, DMA_TO_DEVICE);
536 if (dma_mapping_error(pdata->dev, skb_dma)) {
537 netdev_alert(pdata->netdev, "dma_map_single failed\n");
540 desc_data->skb_dma = skb_dma;
541 desc_data->skb_dma_len = pkt_info->header_len;
542 netif_dbg(pdata, tx_queued, pdata->netdev,
543 "skb header: index=%u, dma=%pad, len=%u\n",
544 cur_index, &skb_dma, pkt_info->header_len);
546 offset = pkt_info->header_len;
548 pkt_info->length += pkt_info->header_len;
551 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
554 /* Map the (remainder of the) packet */
555 for (datalen = skb_headlen(skb) - offset; datalen; ) {
556 len = min_t(unsigned int, datalen, XLGMAC_TX_MAX_BUF_SIZE);
558 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
560 if (dma_mapping_error(pdata->dev, skb_dma)) {
561 netdev_alert(pdata->netdev, "dma_map_single failed\n");
564 desc_data->skb_dma = skb_dma;
565 desc_data->skb_dma_len = len;
566 netif_dbg(pdata, tx_queued, pdata->netdev,
567 "skb data: index=%u, dma=%pad, len=%u\n",
568 cur_index, &skb_dma, len);
573 pkt_info->length += len;
576 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
580 netif_dbg(pdata, tx_queued, pdata->netdev,
581 "mapping frag %u\n", i);
583 frag = &skb_shinfo(skb)->frags[i];
586 for (datalen = skb_frag_size(frag); datalen; ) {
587 len = min_t(unsigned int, datalen,
588 XLGMAC_TX_MAX_BUF_SIZE);
590 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
592 if (dma_mapping_error(pdata->dev, skb_dma)) {
593 netdev_alert(pdata->netdev,
594 "skb_frag_dma_map failed\n");
597 desc_data->skb_dma = skb_dma;
598 desc_data->skb_dma_len = len;
599 desc_data->mapped_as_page = 1;
600 netif_dbg(pdata, tx_queued, pdata->netdev,
601 "skb frag: index=%u, dma=%pad, len=%u\n",
602 cur_index, &skb_dma, len);
607 pkt_info->length += len;
610 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index);
614 /* Save the skb address in the last entry. We always have some data
615 * that has been mapped so desc_data is always advanced past the last
616 * piece of mapped data - use the entry pointed to by cur_index - 1.
618 desc_data = XLGMAC_GET_DESC_DATA(ring, cur_index - 1);
619 desc_data->skb = skb;
621 /* Save the number of descriptor entries used */
622 pkt_info->desc_count = cur_index - start_index;
624 return pkt_info->desc_count;
627 while (start_index < cur_index) {
628 desc_data = XLGMAC_GET_DESC_DATA(ring, start_index++);
629 xlgmac_unmap_desc_data(pdata, desc_data);
635 void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
637 desc_ops->alloc_channels_and_rings = xlgmac_alloc_channels_and_rings;
638 desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
639 desc_ops->map_tx_skb = xlgmac_map_tx_skb;
640 desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
641 desc_ops->unmap_desc_data = xlgmac_unmap_desc_data;
642 desc_ops->tx_desc_init = xlgmac_tx_desc_init;
643 desc_ops->rx_desc_init = xlgmac_rx_desc_init;