Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-microblaze.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *          Ravi Patel <rapatel@apm.com>
6  *          Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
27
28 #define RES_ENET_CSR    0
29 #define RES_RING_CSR    1
30 #define RES_RING_CMD    2
31
32 static const struct of_device_id xgene_enet_of_match[];
33 static const struct acpi_device_id xgene_enet_acpi_match[];
34
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
36 {
37         struct xgene_enet_raw_desc16 *raw_desc;
38         int i;
39
40         for (i = 0; i < buf_pool->slots; i++) {
41                 raw_desc = &buf_pool->raw_desc16[i];
42
43                 /* Hardware expects descriptor in little endian format */
44                 raw_desc->m0 = cpu_to_le64(i |
45                                 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
46                                 SET_VAL(STASH, 3));
47         }
48 }
49
50 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
51                                      u32 nbuf)
52 {
53         struct sk_buff *skb;
54         struct xgene_enet_raw_desc16 *raw_desc;
55         struct xgene_enet_pdata *pdata;
56         struct net_device *ndev;
57         struct device *dev;
58         dma_addr_t dma_addr;
59         u32 tail = buf_pool->tail;
60         u32 slots = buf_pool->slots - 1;
61         u16 bufdatalen, len;
62         int i;
63
64         ndev = buf_pool->ndev;
65         dev = ndev_to_dev(buf_pool->ndev);
66         pdata = netdev_priv(ndev);
67         bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
68         len = XGENE_ENET_MAX_MTU;
69
70         for (i = 0; i < nbuf; i++) {
71                 raw_desc = &buf_pool->raw_desc16[tail];
72
73                 skb = netdev_alloc_skb_ip_align(ndev, len);
74                 if (unlikely(!skb))
75                         return -ENOMEM;
76
77                 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78                 if (dma_mapping_error(dev, dma_addr)) {
79                         netdev_err(ndev, "DMA mapping error\n");
80                         dev_kfree_skb_any(skb);
81                         return -EINVAL;
82                 }
83
84                 buf_pool->rx_skb[tail] = skb;
85
86                 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
87                                            SET_VAL(BUFDATALEN, bufdatalen) |
88                                            SET_BIT(COHERENT));
89                 tail = (tail + 1) & slots;
90         }
91
92         pdata->ring_ops->wr_cmd(buf_pool, nbuf);
93         buf_pool->tail = tail;
94
95         return 0;
96 }
97
98 static u8 xgene_enet_hdr_len(const void *data)
99 {
100         const struct ethhdr *eth = data;
101
102         return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
103 }
104
105 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
106 {
107         struct device *dev = ndev_to_dev(buf_pool->ndev);
108         struct xgene_enet_raw_desc16 *raw_desc;
109         dma_addr_t dma_addr;
110         int i;
111
112         /* Free up the buffers held by hardware */
113         for (i = 0; i < buf_pool->slots; i++) {
114                 if (buf_pool->rx_skb[i]) {
115                         dev_kfree_skb_any(buf_pool->rx_skb[i]);
116
117                         raw_desc = &buf_pool->raw_desc16[i];
118                         dma_addr = GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1));
119                         dma_unmap_single(dev, dma_addr, XGENE_ENET_MAX_MTU,
120                                          DMA_FROM_DEVICE);
121                 }
122         }
123 }
124
125 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
126 {
127         struct xgene_enet_desc_ring *rx_ring = data;
128
129         if (napi_schedule_prep(&rx_ring->napi)) {
130                 disable_irq_nosync(irq);
131                 __napi_schedule(&rx_ring->napi);
132         }
133
134         return IRQ_HANDLED;
135 }
136
137 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
138                                     struct xgene_enet_raw_desc *raw_desc)
139 {
140         struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
141         struct sk_buff *skb;
142         struct device *dev;
143         skb_frag_t *frag;
144         dma_addr_t *frag_dma_addr;
145         u16 skb_index;
146         u8 status;
147         int i, ret = 0;
148         u8 mss_index;
149
150         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
151         skb = cp_ring->cp_skb[skb_index];
152         frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
153
154         dev = ndev_to_dev(cp_ring->ndev);
155         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
156                          skb_headlen(skb),
157                          DMA_TO_DEVICE);
158
159         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
160                 frag = &skb_shinfo(skb)->frags[i];
161                 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
162                                DMA_TO_DEVICE);
163         }
164
165         if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
166                 mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
167                 spin_lock(&pdata->mss_lock);
168                 pdata->mss_refcnt[mss_index]--;
169                 spin_unlock(&pdata->mss_lock);
170         }
171
172         /* Checking for error */
173         status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
174         if (unlikely(status > 2)) {
175                 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
176                                        status);
177                 ret = -EIO;
178         }
179
180         if (likely(skb)) {
181                 dev_kfree_skb_any(skb);
182         } else {
183                 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
184                 ret = -EIO;
185         }
186
187         return ret;
188 }
189
190 static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
191 {
192         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
193         bool mss_index_found = false;
194         int mss_index;
195         int i;
196
197         spin_lock(&pdata->mss_lock);
198
199         /* Reuse the slot if MSS matches */
200         for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
201                 if (pdata->mss[i] == mss) {
202                         pdata->mss_refcnt[i]++;
203                         mss_index = i;
204                         mss_index_found = true;
205                 }
206         }
207
208         /* Overwrite the slot with ref_count = 0 */
209         for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
210                 if (!pdata->mss_refcnt[i]) {
211                         pdata->mss_refcnt[i]++;
212                         pdata->mac_ops->set_mss(pdata, mss, i);
213                         pdata->mss[i] = mss;
214                         mss_index = i;
215                         mss_index_found = true;
216                 }
217         }
218
219         spin_unlock(&pdata->mss_lock);
220
221         /* No slots with ref_count = 0 available, return busy */
222         if (!mss_index_found)
223                 return -EBUSY;
224
225         return mss_index;
226 }
227
228 static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
229 {
230         struct net_device *ndev = skb->dev;
231         struct iphdr *iph;
232         u8 l3hlen = 0, l4hlen = 0;
233         u8 ethhdr, proto = 0, csum_enable = 0;
234         u32 hdr_len, mss = 0;
235         u32 i, len, nr_frags;
236         int mss_index;
237
238         ethhdr = xgene_enet_hdr_len(skb->data);
239
240         if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
241             unlikely(skb->protocol != htons(ETH_P_8021Q)))
242                 goto out;
243
244         if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
245                 goto out;
246
247         iph = ip_hdr(skb);
248         if (unlikely(ip_is_fragment(iph)))
249                 goto out;
250
251         if (likely(iph->protocol == IPPROTO_TCP)) {
252                 l4hlen = tcp_hdrlen(skb) >> 2;
253                 csum_enable = 1;
254                 proto = TSO_IPPROTO_TCP;
255                 if (ndev->features & NETIF_F_TSO) {
256                         hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
257                         mss = skb_shinfo(skb)->gso_size;
258
259                         if (skb_is_nonlinear(skb)) {
260                                 len = skb_headlen(skb);
261                                 nr_frags = skb_shinfo(skb)->nr_frags;
262
263                                 for (i = 0; i < 2 && i < nr_frags; i++)
264                                         len += skb_shinfo(skb)->frags[i].size;
265
266                                 /* HW requires header must reside in 3 buffer */
267                                 if (unlikely(hdr_len > len)) {
268                                         if (skb_linearize(skb))
269                                                 return 0;
270                                 }
271                         }
272
273                         if (!mss || ((skb->len - hdr_len) <= mss))
274                                 goto out;
275
276                         mss_index = xgene_enet_setup_mss(ndev, mss);
277                         if (unlikely(mss_index < 0))
278                                 return -EBUSY;
279
280                         *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
281                 }
282         } else if (iph->protocol == IPPROTO_UDP) {
283                 l4hlen = UDP_HDR_SIZE;
284                 csum_enable = 1;
285         }
286 out:
287         l3hlen = ip_hdrlen(skb) >> 2;
288         *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
289                     SET_VAL(IPHDR, l3hlen) |
290                     SET_VAL(ETHHDR, ethhdr) |
291                     SET_VAL(EC, csum_enable) |
292                     SET_VAL(IS, proto) |
293                     SET_BIT(IC) |
294                     SET_BIT(TYPE_ETH_WORK_MESSAGE);
295
296         return 0;
297 }
298
299 static u16 xgene_enet_encode_len(u16 len)
300 {
301         return (len == BUFLEN_16K) ? 0 : len;
302 }
303
304 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
305 {
306         desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
307                                     SET_VAL(BUFDATALEN, len));
308 }
309
310 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
311 {
312         __le64 *exp_bufs;
313
314         exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
315         memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
316         ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
317
318         return exp_bufs;
319 }
320
321 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
322 {
323         return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
324 }
325
326 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
327                                     struct sk_buff *skb)
328 {
329         struct device *dev = ndev_to_dev(tx_ring->ndev);
330         struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
331         struct xgene_enet_raw_desc *raw_desc;
332         __le64 *exp_desc = NULL, *exp_bufs = NULL;
333         dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
334         skb_frag_t *frag;
335         u16 tail = tx_ring->tail;
336         u64 hopinfo = 0;
337         u32 len, hw_len;
338         u8 ll = 0, nv = 0, idx = 0;
339         bool split = false;
340         u32 size, offset, ell_bytes = 0;
341         u32 i, fidx, nr_frags, count = 1;
342         int ret;
343
344         raw_desc = &tx_ring->raw_desc[tail];
345         tail = (tail + 1) & (tx_ring->slots - 1);
346         memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
347
348         ret = xgene_enet_work_msg(skb, &hopinfo);
349         if (ret)
350                 return ret;
351
352         raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
353                                    hopinfo);
354
355         len = skb_headlen(skb);
356         hw_len = xgene_enet_encode_len(len);
357
358         dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
359         if (dma_mapping_error(dev, dma_addr)) {
360                 netdev_err(tx_ring->ndev, "DMA mapping error\n");
361                 return -EINVAL;
362         }
363
364         /* Hardware expects descriptor in little endian format */
365         raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
366                                    SET_VAL(BUFDATALEN, hw_len) |
367                                    SET_BIT(COHERENT));
368
369         if (!skb_is_nonlinear(skb))
370                 goto out;
371
372         /* scatter gather */
373         nv = 1;
374         exp_desc = (void *)&tx_ring->raw_desc[tail];
375         tail = (tail + 1) & (tx_ring->slots - 1);
376         memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
377
378         nr_frags = skb_shinfo(skb)->nr_frags;
379         for (i = nr_frags; i < 4 ; i++)
380                 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
381
382         frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
383
384         for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
385                 if (!split) {
386                         frag = &skb_shinfo(skb)->frags[fidx];
387                         size = skb_frag_size(frag);
388                         offset = 0;
389
390                         pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
391                                                      DMA_TO_DEVICE);
392                         if (dma_mapping_error(dev, pbuf_addr))
393                                 return -EINVAL;
394
395                         frag_dma_addr[fidx] = pbuf_addr;
396                         fidx++;
397
398                         if (size > BUFLEN_16K)
399                                 split = true;
400                 }
401
402                 if (size > BUFLEN_16K) {
403                         len = BUFLEN_16K;
404                         size -= BUFLEN_16K;
405                 } else {
406                         len = size;
407                         split = false;
408                 }
409
410                 dma_addr = pbuf_addr + offset;
411                 hw_len = xgene_enet_encode_len(len);
412
413                 switch (i) {
414                 case 0:
415                 case 1:
416                 case 2:
417                         xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
418                         break;
419                 case 3:
420                         if (split || (fidx != nr_frags)) {
421                                 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
422                                 xgene_set_addr_len(exp_bufs, idx, dma_addr,
423                                                    hw_len);
424                                 idx++;
425                                 ell_bytes += len;
426                         } else {
427                                 xgene_set_addr_len(exp_desc, i, dma_addr,
428                                                    hw_len);
429                         }
430                         break;
431                 default:
432                         xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
433                         idx++;
434                         ell_bytes += len;
435                         break;
436                 }
437
438                 if (split)
439                         offset += BUFLEN_16K;
440         }
441         count++;
442
443         if (idx) {
444                 ll = 1;
445                 dma_addr = dma_map_single(dev, exp_bufs,
446                                           sizeof(u64) * MAX_EXP_BUFFS,
447                                           DMA_TO_DEVICE);
448                 if (dma_mapping_error(dev, dma_addr)) {
449                         dev_kfree_skb_any(skb);
450                         return -EINVAL;
451                 }
452                 i = ell_bytes >> LL_BYTES_LSB_LEN;
453                 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
454                                           SET_VAL(LL_BYTES_MSB, i) |
455                                           SET_VAL(LL_LEN, idx));
456                 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
457         }
458
459 out:
460         raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
461                                    SET_VAL(USERINFO, tx_ring->tail));
462         tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
463         pdata->tx_level[tx_ring->cp_ring->index] += count;
464         tx_ring->tail = tail;
465
466         return count;
467 }
468
469 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
470                                          struct net_device *ndev)
471 {
472         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
473         struct xgene_enet_desc_ring *tx_ring;
474         int index = skb->queue_mapping;
475         u32 tx_level = pdata->tx_level[index];
476         int count;
477
478         tx_ring = pdata->tx_ring[index];
479         if (tx_level < pdata->txc_level[index])
480                 tx_level += ((typeof(pdata->tx_level[index]))~0U);
481
482         if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
483                 netif_stop_subqueue(ndev, index);
484                 return NETDEV_TX_BUSY;
485         }
486
487         if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
488                 return NETDEV_TX_OK;
489
490         count = xgene_enet_setup_tx_desc(tx_ring, skb);
491         if (count == -EBUSY)
492                 return NETDEV_TX_BUSY;
493
494         if (count <= 0) {
495                 dev_kfree_skb_any(skb);
496                 return NETDEV_TX_OK;
497         }
498
499         skb_tx_timestamp(skb);
500
501         tx_ring->tx_packets++;
502         tx_ring->tx_bytes += skb->len;
503
504         pdata->ring_ops->wr_cmd(tx_ring, count);
505         return NETDEV_TX_OK;
506 }
507
508 static void xgene_enet_skip_csum(struct sk_buff *skb)
509 {
510         struct iphdr *iph = ip_hdr(skb);
511
512         if (!ip_is_fragment(iph) ||
513             (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
514                 skb->ip_summed = CHECKSUM_UNNECESSARY;
515         }
516 }
517
518 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
519                                struct xgene_enet_raw_desc *raw_desc)
520 {
521         struct net_device *ndev;
522         struct device *dev;
523         struct xgene_enet_desc_ring *buf_pool;
524         u32 datalen, skb_index;
525         struct sk_buff *skb;
526         u8 status;
527         int ret = 0;
528
529         ndev = rx_ring->ndev;
530         dev = ndev_to_dev(rx_ring->ndev);
531         buf_pool = rx_ring->buf_pool;
532
533         dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
534                          XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
535         skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
536         skb = buf_pool->rx_skb[skb_index];
537         buf_pool->rx_skb[skb_index] = NULL;
538
539         /* checking for error */
540         status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
541                   GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
542         if (unlikely(status > 2)) {
543                 dev_kfree_skb_any(skb);
544                 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
545                                        status);
546                 ret = -EIO;
547                 goto out;
548         }
549
550         /* strip off CRC as HW isn't doing this */
551         datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
552         datalen = (datalen & DATALEN_MASK) - 4;
553         prefetch(skb->data - NET_IP_ALIGN);
554         skb_put(skb, datalen);
555
556         skb_checksum_none_assert(skb);
557         skb->protocol = eth_type_trans(skb, ndev);
558         if (likely((ndev->features & NETIF_F_IP_CSUM) &&
559                    skb->protocol == htons(ETH_P_IP))) {
560                 xgene_enet_skip_csum(skb);
561         }
562
563         rx_ring->rx_packets++;
564         rx_ring->rx_bytes += datalen;
565         napi_gro_receive(&rx_ring->napi, skb);
566 out:
567         if (--rx_ring->nbufpool == 0) {
568                 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
569                 rx_ring->nbufpool = NUM_BUFPOOL;
570         }
571
572         return ret;
573 }
574
575 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
576 {
577         return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
578 }
579
580 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
581                                    int budget)
582 {
583         struct net_device *ndev = ring->ndev;
584         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
585         struct xgene_enet_raw_desc *raw_desc, *exp_desc;
586         u16 head = ring->head;
587         u16 slots = ring->slots - 1;
588         int ret, desc_count, count = 0, processed = 0;
589         bool is_completion;
590
591         do {
592                 raw_desc = &ring->raw_desc[head];
593                 desc_count = 0;
594                 is_completion = false;
595                 exp_desc = NULL;
596                 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
597                         break;
598
599                 /* read fpqnum field after dataaddr field */
600                 dma_rmb();
601                 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
602                         head = (head + 1) & slots;
603                         exp_desc = &ring->raw_desc[head];
604
605                         if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
606                                 head = (head - 1) & slots;
607                                 break;
608                         }
609                         dma_rmb();
610                         count++;
611                         desc_count++;
612                 }
613                 if (is_rx_desc(raw_desc)) {
614                         ret = xgene_enet_rx_frame(ring, raw_desc);
615                 } else {
616                         ret = xgene_enet_tx_completion(ring, raw_desc);
617                         is_completion = true;
618                 }
619                 xgene_enet_mark_desc_slot_empty(raw_desc);
620                 if (exp_desc)
621                         xgene_enet_mark_desc_slot_empty(exp_desc);
622
623                 head = (head + 1) & slots;
624                 count++;
625                 desc_count++;
626                 processed++;
627                 if (is_completion)
628                         pdata->txc_level[ring->index] += desc_count;
629
630                 if (ret)
631                         break;
632         } while (--budget);
633
634         if (likely(count)) {
635                 pdata->ring_ops->wr_cmd(ring, -count);
636                 ring->head = head;
637
638                 if (__netif_subqueue_stopped(ndev, ring->index))
639                         netif_start_subqueue(ndev, ring->index);
640         }
641
642         return processed;
643 }
644
645 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
646 {
647         struct xgene_enet_desc_ring *ring;
648         int processed;
649
650         ring = container_of(napi, struct xgene_enet_desc_ring, napi);
651         processed = xgene_enet_process_ring(ring, budget);
652
653         if (processed != budget) {
654                 napi_complete(napi);
655                 enable_irq(ring->irq);
656         }
657
658         return processed;
659 }
660
661 static void xgene_enet_timeout(struct net_device *ndev)
662 {
663         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
664         struct netdev_queue *txq;
665         int i;
666
667         pdata->mac_ops->reset(pdata);
668
669         for (i = 0; i < pdata->txq_cnt; i++) {
670                 txq = netdev_get_tx_queue(ndev, i);
671                 txq->trans_start = jiffies;
672                 netif_tx_start_queue(txq);
673         }
674 }
675
676 static void xgene_enet_set_irq_name(struct net_device *ndev)
677 {
678         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
679         struct xgene_enet_desc_ring *ring;
680         int i;
681
682         for (i = 0; i < pdata->rxq_cnt; i++) {
683                 ring = pdata->rx_ring[i];
684                 if (!pdata->cq_cnt) {
685                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
686                                  ndev->name);
687                 } else {
688                         snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
689                                  ndev->name, i);
690                 }
691         }
692
693         for (i = 0; i < pdata->cq_cnt; i++) {
694                 ring = pdata->tx_ring[i]->cp_ring;
695                 snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
696                          ndev->name, i);
697         }
698 }
699
700 static int xgene_enet_register_irq(struct net_device *ndev)
701 {
702         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
703         struct device *dev = ndev_to_dev(ndev);
704         struct xgene_enet_desc_ring *ring;
705         int ret = 0, i;
706
707         xgene_enet_set_irq_name(ndev);
708         for (i = 0; i < pdata->rxq_cnt; i++) {
709                 ring = pdata->rx_ring[i];
710                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
711                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
712                                        0, ring->irq_name, ring);
713                 if (ret) {
714                         netdev_err(ndev, "Failed to request irq %s\n",
715                                    ring->irq_name);
716                 }
717         }
718
719         for (i = 0; i < pdata->cq_cnt; i++) {
720                 ring = pdata->tx_ring[i]->cp_ring;
721                 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
722                 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
723                                        0, ring->irq_name, ring);
724                 if (ret) {
725                         netdev_err(ndev, "Failed to request irq %s\n",
726                                    ring->irq_name);
727                 }
728         }
729
730         return ret;
731 }
732
733 static void xgene_enet_free_irq(struct net_device *ndev)
734 {
735         struct xgene_enet_pdata *pdata;
736         struct xgene_enet_desc_ring *ring;
737         struct device *dev;
738         int i;
739
740         pdata = netdev_priv(ndev);
741         dev = ndev_to_dev(ndev);
742
743         for (i = 0; i < pdata->rxq_cnt; i++) {
744                 ring = pdata->rx_ring[i];
745                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
746                 devm_free_irq(dev, ring->irq, ring);
747         }
748
749         for (i = 0; i < pdata->cq_cnt; i++) {
750                 ring = pdata->tx_ring[i]->cp_ring;
751                 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
752                 devm_free_irq(dev, ring->irq, ring);
753         }
754 }
755
756 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
757 {
758         struct napi_struct *napi;
759         int i;
760
761         for (i = 0; i < pdata->rxq_cnt; i++) {
762                 napi = &pdata->rx_ring[i]->napi;
763                 napi_enable(napi);
764         }
765
766         for (i = 0; i < pdata->cq_cnt; i++) {
767                 napi = &pdata->tx_ring[i]->cp_ring->napi;
768                 napi_enable(napi);
769         }
770 }
771
772 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
773 {
774         struct napi_struct *napi;
775         int i;
776
777         for (i = 0; i < pdata->rxq_cnt; i++) {
778                 napi = &pdata->rx_ring[i]->napi;
779                 napi_disable(napi);
780         }
781
782         for (i = 0; i < pdata->cq_cnt; i++) {
783                 napi = &pdata->tx_ring[i]->cp_ring->napi;
784                 napi_disable(napi);
785         }
786 }
787
788 static int xgene_enet_open(struct net_device *ndev)
789 {
790         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
791         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
792         int ret;
793
794         ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
795         if (ret)
796                 return ret;
797
798         ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
799         if (ret)
800                 return ret;
801
802         xgene_enet_napi_enable(pdata);
803         ret = xgene_enet_register_irq(ndev);
804         if (ret)
805                 return ret;
806
807         if (ndev->phydev) {
808                 phy_start(ndev->phydev);
809         } else {
810                 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
811                 netif_carrier_off(ndev);
812         }
813
814         mac_ops->tx_enable(pdata);
815         mac_ops->rx_enable(pdata);
816         netif_tx_start_all_queues(ndev);
817
818         return ret;
819 }
820
821 static int xgene_enet_close(struct net_device *ndev)
822 {
823         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
824         const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
825         int i;
826
827         netif_tx_stop_all_queues(ndev);
828         mac_ops->tx_disable(pdata);
829         mac_ops->rx_disable(pdata);
830
831         if (ndev->phydev)
832                 phy_stop(ndev->phydev);
833         else
834                 cancel_delayed_work_sync(&pdata->link_work);
835
836         xgene_enet_free_irq(ndev);
837         xgene_enet_napi_disable(pdata);
838         for (i = 0; i < pdata->rxq_cnt; i++)
839                 xgene_enet_process_ring(pdata->rx_ring[i], -1);
840
841         return 0;
842 }
843 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
844 {
845         struct xgene_enet_pdata *pdata;
846         struct device *dev;
847
848         pdata = netdev_priv(ring->ndev);
849         dev = ndev_to_dev(ring->ndev);
850
851         pdata->ring_ops->clear(ring);
852         dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
853 }
854
855 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
856 {
857         struct xgene_enet_desc_ring *buf_pool;
858         struct xgene_enet_desc_ring *ring;
859         int i;
860
861         for (i = 0; i < pdata->txq_cnt; i++) {
862                 ring = pdata->tx_ring[i];
863                 if (ring) {
864                         xgene_enet_delete_ring(ring);
865                         pdata->port_ops->clear(pdata, ring);
866                         if (pdata->cq_cnt)
867                                 xgene_enet_delete_ring(ring->cp_ring);
868                         pdata->tx_ring[i] = NULL;
869                 }
870         }
871
872         for (i = 0; i < pdata->rxq_cnt; i++) {
873                 ring = pdata->rx_ring[i];
874                 if (ring) {
875                         buf_pool = ring->buf_pool;
876                         xgene_enet_delete_bufpool(buf_pool);
877                         xgene_enet_delete_ring(buf_pool);
878                         pdata->port_ops->clear(pdata, buf_pool);
879                         xgene_enet_delete_ring(ring);
880                         pdata->rx_ring[i] = NULL;
881                 }
882         }
883 }
884
885 static int xgene_enet_get_ring_size(struct device *dev,
886                                     enum xgene_enet_ring_cfgsize cfgsize)
887 {
888         int size = -EINVAL;
889
890         switch (cfgsize) {
891         case RING_CFGSIZE_512B:
892                 size = 0x200;
893                 break;
894         case RING_CFGSIZE_2KB:
895                 size = 0x800;
896                 break;
897         case RING_CFGSIZE_16KB:
898                 size = 0x4000;
899                 break;
900         case RING_CFGSIZE_64KB:
901                 size = 0x10000;
902                 break;
903         case RING_CFGSIZE_512KB:
904                 size = 0x80000;
905                 break;
906         default:
907                 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
908                 break;
909         }
910
911         return size;
912 }
913
914 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
915 {
916         struct xgene_enet_pdata *pdata;
917         struct device *dev;
918
919         if (!ring)
920                 return;
921
922         dev = ndev_to_dev(ring->ndev);
923         pdata = netdev_priv(ring->ndev);
924
925         if (ring->desc_addr) {
926                 pdata->ring_ops->clear(ring);
927                 dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
928         }
929         devm_kfree(dev, ring);
930 }
931
932 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
933 {
934         struct device *dev = &pdata->pdev->dev;
935         struct xgene_enet_desc_ring *ring;
936         int i;
937
938         for (i = 0; i < pdata->txq_cnt; i++) {
939                 ring = pdata->tx_ring[i];
940                 if (ring) {
941                         if (ring->cp_ring && ring->cp_ring->cp_skb)
942                                 devm_kfree(dev, ring->cp_ring->cp_skb);
943                         if (ring->cp_ring && pdata->cq_cnt)
944                                 xgene_enet_free_desc_ring(ring->cp_ring);
945                         xgene_enet_free_desc_ring(ring);
946                 }
947         }
948
949         for (i = 0; i < pdata->rxq_cnt; i++) {
950                 ring = pdata->rx_ring[i];
951                 if (ring) {
952                         if (ring->buf_pool) {
953                                 if (ring->buf_pool->rx_skb)
954                                         devm_kfree(dev, ring->buf_pool->rx_skb);
955                                 xgene_enet_free_desc_ring(ring->buf_pool);
956                         }
957                         xgene_enet_free_desc_ring(ring);
958                 }
959         }
960 }
961
962 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
963                                  struct xgene_enet_desc_ring *ring)
964 {
965         if ((pdata->enet_id == XGENE_ENET2) &&
966             (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
967                 return true;
968         }
969
970         return false;
971 }
972
973 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
974                                               struct xgene_enet_desc_ring *ring)
975 {
976         u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
977
978         return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
979 }
980
981 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
982                         struct net_device *ndev, u32 ring_num,
983                         enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
984 {
985         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
986         struct device *dev = ndev_to_dev(ndev);
987         struct xgene_enet_desc_ring *ring;
988         void *irq_mbox_addr;
989         int size;
990
991         size = xgene_enet_get_ring_size(dev, cfgsize);
992         if (size < 0)
993                 return NULL;
994
995         ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
996                             GFP_KERNEL);
997         if (!ring)
998                 return NULL;
999
1000         ring->ndev = ndev;
1001         ring->num = ring_num;
1002         ring->cfgsize = cfgsize;
1003         ring->id = ring_id;
1004
1005         ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
1006                                               GFP_KERNEL | __GFP_ZERO);
1007         if (!ring->desc_addr) {
1008                 devm_kfree(dev, ring);
1009                 return NULL;
1010         }
1011         ring->size = size;
1012
1013         if (is_irq_mbox_required(pdata, ring)) {
1014                 irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
1015                                                     &ring->irq_mbox_dma,
1016                                                     GFP_KERNEL | __GFP_ZERO);
1017                 if (!irq_mbox_addr) {
1018                         dmam_free_coherent(dev, size, ring->desc_addr,
1019                                            ring->dma);
1020                         devm_kfree(dev, ring);
1021                         return NULL;
1022                 }
1023                 ring->irq_mbox_addr = irq_mbox_addr;
1024         }
1025
1026         ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
1027         ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
1028         ring = pdata->ring_ops->setup(ring);
1029         netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
1030                    ring->num, ring->size, ring->id, ring->slots);
1031
1032         return ring;
1033 }
1034
1035 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
1036 {
1037         return (owner << 6) | (bufnum & GENMASK(5, 0));
1038 }
1039
1040 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
1041 {
1042         enum xgene_ring_owner owner;
1043
1044         if (p->enet_id == XGENE_ENET1) {
1045                 switch (p->phy_mode) {
1046                 case PHY_INTERFACE_MODE_SGMII:
1047                         owner = RING_OWNER_ETH0;
1048                         break;
1049                 default:
1050                         owner = (!p->port_id) ? RING_OWNER_ETH0 :
1051                                                 RING_OWNER_ETH1;
1052                         break;
1053                 }
1054         } else {
1055                 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
1056         }
1057
1058         return owner;
1059 }
1060
1061 static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
1062 {
1063         struct device *dev = &pdata->pdev->dev;
1064         u32 cpu_bufnum;
1065         int ret;
1066
1067         ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
1068
1069         return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
1070 }
1071
1072 static int xgene_enet_create_desc_rings(struct net_device *ndev)
1073 {
1074         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1075         struct device *dev = ndev_to_dev(ndev);
1076         struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
1077         struct xgene_enet_desc_ring *buf_pool = NULL;
1078         enum xgene_ring_owner owner;
1079         dma_addr_t dma_exp_bufs;
1080         u8 cpu_bufnum;
1081         u8 eth_bufnum = pdata->eth_bufnum;
1082         u8 bp_bufnum = pdata->bp_bufnum;
1083         u16 ring_num = pdata->ring_num;
1084         __le64 *exp_bufs;
1085         u16 ring_id;
1086         int i, ret, size;
1087
1088         cpu_bufnum = xgene_start_cpu_bufnum(pdata);
1089
1090         for (i = 0; i < pdata->rxq_cnt; i++) {
1091                 /* allocate rx descriptor ring */
1092                 owner = xgene_derive_ring_owner(pdata);
1093                 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
1094                 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1095                                                       RING_CFGSIZE_16KB,
1096                                                       ring_id);
1097                 if (!rx_ring) {
1098                         ret = -ENOMEM;
1099                         goto err;
1100                 }
1101
1102                 /* allocate buffer pool for receiving packets */
1103                 owner = xgene_derive_ring_owner(pdata);
1104                 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1105                 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1106                                                        RING_CFGSIZE_2KB,
1107                                                        ring_id);
1108                 if (!buf_pool) {
1109                         ret = -ENOMEM;
1110                         goto err;
1111                 }
1112
1113                 rx_ring->nbufpool = NUM_BUFPOOL;
1114                 rx_ring->buf_pool = buf_pool;
1115                 rx_ring->irq = pdata->irqs[i];
1116                 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1117                                                 sizeof(struct sk_buff *),
1118                                                 GFP_KERNEL);
1119                 if (!buf_pool->rx_skb) {
1120                         ret = -ENOMEM;
1121                         goto err;
1122                 }
1123
1124                 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1125                 rx_ring->buf_pool = buf_pool;
1126                 pdata->rx_ring[i] = rx_ring;
1127         }
1128
1129         for (i = 0; i < pdata->txq_cnt; i++) {
1130                 /* allocate tx descriptor ring */
1131                 owner = xgene_derive_ring_owner(pdata);
1132                 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1133                 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1134                                                       RING_CFGSIZE_16KB,
1135                                                       ring_id);
1136                 if (!tx_ring) {
1137                         ret = -ENOMEM;
1138                         goto err;
1139                 }
1140
1141                 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1142                 exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
1143                                                GFP_KERNEL | __GFP_ZERO);
1144                 if (!exp_bufs) {
1145                         ret = -ENOMEM;
1146                         goto err;
1147                 }
1148                 tx_ring->exp_bufs = exp_bufs;
1149
1150                 pdata->tx_ring[i] = tx_ring;
1151
1152                 if (!pdata->cq_cnt) {
1153                         cp_ring = pdata->rx_ring[i];
1154                 } else {
1155                         /* allocate tx completion descriptor ring */
1156                         ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1157                                                          cpu_bufnum++);
1158                         cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1159                                                               RING_CFGSIZE_16KB,
1160                                                               ring_id);
1161                         if (!cp_ring) {
1162                                 ret = -ENOMEM;
1163                                 goto err;
1164                         }
1165
1166                         cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1167                         cp_ring->index = i;
1168                 }
1169
1170                 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1171                                                sizeof(struct sk_buff *),
1172                                                GFP_KERNEL);
1173                 if (!cp_ring->cp_skb) {
1174                         ret = -ENOMEM;
1175                         goto err;
1176                 }
1177
1178                 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1179                 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1180                                                       size, GFP_KERNEL);
1181                 if (!cp_ring->frag_dma_addr) {
1182                         devm_kfree(dev, cp_ring->cp_skb);
1183                         ret = -ENOMEM;
1184                         goto err;
1185                 }
1186
1187                 tx_ring->cp_ring = cp_ring;
1188                 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1189         }
1190
1191         if (pdata->ring_ops->coalesce)
1192                 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1193         pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1194
1195         return 0;
1196
1197 err:
1198         xgene_enet_free_desc_rings(pdata);
1199         return ret;
1200 }
1201
1202 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1203                         struct net_device *ndev,
1204                         struct rtnl_link_stats64 *storage)
1205 {
1206         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1207         struct rtnl_link_stats64 *stats = &pdata->stats;
1208         struct xgene_enet_desc_ring *ring;
1209         int i;
1210
1211         memset(stats, 0, sizeof(struct rtnl_link_stats64));
1212         for (i = 0; i < pdata->txq_cnt; i++) {
1213                 ring = pdata->tx_ring[i];
1214                 if (ring) {
1215                         stats->tx_packets += ring->tx_packets;
1216                         stats->tx_bytes += ring->tx_bytes;
1217                 }
1218         }
1219
1220         for (i = 0; i < pdata->rxq_cnt; i++) {
1221                 ring = pdata->rx_ring[i];
1222                 if (ring) {
1223                         stats->rx_packets += ring->rx_packets;
1224                         stats->rx_bytes += ring->rx_bytes;
1225                         stats->rx_errors += ring->rx_length_errors +
1226                                 ring->rx_crc_errors +
1227                                 ring->rx_frame_errors +
1228                                 ring->rx_fifo_errors;
1229                         stats->rx_dropped += ring->rx_dropped;
1230                 }
1231         }
1232         memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
1233
1234         return storage;
1235 }
1236
1237 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1238 {
1239         struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1240         int ret;
1241
1242         ret = eth_mac_addr(ndev, addr);
1243         if (ret)
1244                 return ret;
1245         pdata->mac_ops->set_mac_addr(pdata);
1246
1247         return ret;
1248 }
1249
1250 static const struct net_device_ops xgene_ndev_ops = {
1251         .ndo_open = xgene_enet_open,
1252         .ndo_stop = xgene_enet_close,
1253         .ndo_start_xmit = xgene_enet_start_xmit,
1254         .ndo_tx_timeout = xgene_enet_timeout,
1255         .ndo_get_stats64 = xgene_enet_get_stats64,
1256         .ndo_set_mac_address = xgene_enet_set_mac_address,
1257 };
1258
1259 #ifdef CONFIG_ACPI
1260 static void xgene_get_port_id_acpi(struct device *dev,
1261                                   struct xgene_enet_pdata *pdata)
1262 {
1263         acpi_status status;
1264         u64 temp;
1265
1266         status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1267         if (ACPI_FAILURE(status)) {
1268                 pdata->port_id = 0;
1269         } else {
1270                 pdata->port_id = temp;
1271         }
1272
1273         return;
1274 }
1275 #endif
1276
1277 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1278 {
1279         u32 id = 0;
1280
1281         of_property_read_u32(dev->of_node, "port-id", &id);
1282
1283         pdata->port_id = id & BIT(0);
1284
1285         return;
1286 }
1287
1288 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1289 {
1290         struct device *dev = &pdata->pdev->dev;
1291         int delay, ret;
1292
1293         ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1294         if (ret) {
1295                 pdata->tx_delay = 4;
1296                 return 0;
1297         }
1298
1299         if (delay < 0 || delay > 7) {
1300                 dev_err(dev, "Invalid tx-delay specified\n");
1301                 return -EINVAL;
1302         }
1303
1304         pdata->tx_delay = delay;
1305
1306         return 0;
1307 }
1308
1309 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1310 {
1311         struct device *dev = &pdata->pdev->dev;
1312         int delay, ret;
1313
1314         ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1315         if (ret) {
1316                 pdata->rx_delay = 2;
1317                 return 0;
1318         }
1319
1320         if (delay < 0 || delay > 7) {
1321                 dev_err(dev, "Invalid rx-delay specified\n");
1322                 return -EINVAL;
1323         }
1324
1325         pdata->rx_delay = delay;
1326
1327         return 0;
1328 }
1329
1330 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1331 {
1332         struct platform_device *pdev = pdata->pdev;
1333         struct device *dev = &pdev->dev;
1334         int i, ret, max_irqs;
1335
1336         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1337                 max_irqs = 1;
1338         else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1339                 max_irqs = 2;
1340         else
1341                 max_irqs = XGENE_MAX_ENET_IRQ;
1342
1343         for (i = 0; i < max_irqs; i++) {
1344                 ret = platform_get_irq(pdev, i);
1345                 if (ret <= 0) {
1346                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1347                                 max_irqs = i;
1348                                 pdata->rxq_cnt = max_irqs / 2;
1349                                 pdata->txq_cnt = max_irqs / 2;
1350                                 pdata->cq_cnt = max_irqs / 2;
1351                                 break;
1352                         }
1353                         dev_err(dev, "Unable to get ENET IRQ\n");
1354                         ret = ret ? : -ENXIO;
1355                         return ret;
1356                 }
1357                 pdata->irqs[i] = ret;
1358         }
1359
1360         return 0;
1361 }
1362
1363 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
1364 {
1365         int ret;
1366
1367         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
1368                 return 0;
1369
1370         if (!IS_ENABLED(CONFIG_MDIO_XGENE))
1371                 return 0;
1372
1373         ret = xgene_enet_phy_connect(pdata->ndev);
1374         if (!ret)
1375                 pdata->mdio_driver = true;
1376
1377         return 0;
1378 }
1379
1380 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
1381 {
1382         struct device *dev = &pdata->pdev->dev;
1383
1384         pdata->sfp_gpio_en = false;
1385         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII ||
1386             (!device_property_present(dev, "sfp-gpios") &&
1387              !device_property_present(dev, "rxlos-gpios")))
1388                 return;
1389
1390         pdata->sfp_gpio_en = true;
1391         pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
1392         if (IS_ERR(pdata->sfp_rdy))
1393                 pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
1394 }
1395
1396 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1397 {
1398         struct platform_device *pdev;
1399         struct net_device *ndev;
1400         struct device *dev;
1401         struct resource *res;
1402         void __iomem *base_addr;
1403         u32 offset;
1404         int ret = 0;
1405
1406         pdev = pdata->pdev;
1407         dev = &pdev->dev;
1408         ndev = pdata->ndev;
1409
1410         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1411         if (!res) {
1412                 dev_err(dev, "Resource enet_csr not defined\n");
1413                 return -ENODEV;
1414         }
1415         pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1416         if (!pdata->base_addr) {
1417                 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1418                 return -ENOMEM;
1419         }
1420
1421         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1422         if (!res) {
1423                 dev_err(dev, "Resource ring_csr not defined\n");
1424                 return -ENODEV;
1425         }
1426         pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1427                                                         resource_size(res));
1428         if (!pdata->ring_csr_addr) {
1429                 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1430                 return -ENOMEM;
1431         }
1432
1433         res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1434         if (!res) {
1435                 dev_err(dev, "Resource ring_cmd not defined\n");
1436                 return -ENODEV;
1437         }
1438         pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1439                                                         resource_size(res));
1440         if (!pdata->ring_cmd_addr) {
1441                 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1442                 return -ENOMEM;
1443         }
1444
1445         if (dev->of_node)
1446                 xgene_get_port_id_dt(dev, pdata);
1447 #ifdef CONFIG_ACPI
1448         else
1449                 xgene_get_port_id_acpi(dev, pdata);
1450 #endif
1451
1452         if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1453                 eth_hw_addr_random(ndev);
1454
1455         memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1456
1457         pdata->phy_mode = device_get_phy_mode(dev);
1458         if (pdata->phy_mode < 0) {
1459                 dev_err(dev, "Unable to get phy-connection-type\n");
1460                 return pdata->phy_mode;
1461         }
1462         if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1463             pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1464             pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1465                 dev_err(dev, "Incorrect phy-connection-type specified\n");
1466                 return -ENODEV;
1467         }
1468
1469         ret = xgene_get_tx_delay(pdata);
1470         if (ret)
1471                 return ret;
1472
1473         ret = xgene_get_rx_delay(pdata);
1474         if (ret)
1475                 return ret;
1476
1477         ret = xgene_enet_get_irqs(pdata);
1478         if (ret)
1479                 return ret;
1480
1481         ret = xgene_enet_check_phy_handle(pdata);
1482         if (ret)
1483                 return ret;
1484
1485         xgene_enet_gpiod_get(pdata);
1486
1487         pdata->clk = devm_clk_get(&pdev->dev, NULL);
1488         if (IS_ERR(pdata->clk)) {
1489                 /* Firmware may have set up the clock already. */
1490                 dev_info(dev, "clocks have been setup already\n");
1491         }
1492
1493         if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1494                 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1495         else
1496                 base_addr = pdata->base_addr;
1497         pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1498         pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1499         pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1500         pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1501         if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1502             pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1503                 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1504                 offset = (pdata->enet_id == XGENE_ENET1) ?
1505                           BLOCK_ETH_MAC_CSR_OFFSET :
1506                           X2_BLOCK_ETH_MAC_CSR_OFFSET;
1507                 pdata->mcx_mac_csr_addr = base_addr + offset;
1508         } else {
1509                 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1510                 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1511                 pdata->pcs_addr = base_addr + BLOCK_PCS_OFFSET;
1512         }
1513         pdata->rx_buff_cnt = NUM_PKT_BUF;
1514
1515         return 0;
1516 }
1517
1518 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1519 {
1520         struct xgene_enet_cle *enet_cle = &pdata->cle;
1521         struct net_device *ndev = pdata->ndev;
1522         struct xgene_enet_desc_ring *buf_pool;
1523         u16 dst_ring_num;
1524         int i, ret;
1525
1526         ret = pdata->port_ops->reset(pdata);
1527         if (ret)
1528                 return ret;
1529
1530         ret = xgene_enet_create_desc_rings(ndev);
1531         if (ret) {
1532                 netdev_err(ndev, "Error in ring configuration\n");
1533                 return ret;
1534         }
1535
1536         /* setup buffer pool */
1537         for (i = 0; i < pdata->rxq_cnt; i++) {
1538                 buf_pool = pdata->rx_ring[i]->buf_pool;
1539                 xgene_enet_init_bufpool(buf_pool);
1540                 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1541                 if (ret)
1542                         goto err;
1543         }
1544
1545         dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1546         buf_pool = pdata->rx_ring[0]->buf_pool;
1547         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1548                 /* Initialize and Enable  PreClassifier Tree */
1549                 enet_cle->max_nodes = 512;
1550                 enet_cle->max_dbptrs = 1024;
1551                 enet_cle->parsers = 3;
1552                 enet_cle->active_parser = PARSER_ALL;
1553                 enet_cle->ptree.start_node = 0;
1554                 enet_cle->ptree.start_dbptr = 0;
1555                 enet_cle->jump_bytes = 8;
1556                 ret = pdata->cle_ops->cle_init(pdata);
1557                 if (ret) {
1558                         netdev_err(ndev, "Preclass Tree init error\n");
1559                         goto err;
1560                 }
1561         } else {
1562                 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1563         }
1564
1565         pdata->phy_speed = SPEED_UNKNOWN;
1566         pdata->mac_ops->init(pdata);
1567
1568         return ret;
1569
1570 err:
1571         xgene_enet_delete_desc_rings(pdata);
1572         return ret;
1573 }
1574
1575 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1576 {
1577         switch (pdata->phy_mode) {
1578         case PHY_INTERFACE_MODE_RGMII:
1579                 pdata->mac_ops = &xgene_gmac_ops;
1580                 pdata->port_ops = &xgene_gport_ops;
1581                 pdata->rm = RM3;
1582                 pdata->rxq_cnt = 1;
1583                 pdata->txq_cnt = 1;
1584                 pdata->cq_cnt = 0;
1585                 break;
1586         case PHY_INTERFACE_MODE_SGMII:
1587                 pdata->mac_ops = &xgene_sgmac_ops;
1588                 pdata->port_ops = &xgene_sgport_ops;
1589                 pdata->rm = RM1;
1590                 pdata->rxq_cnt = 1;
1591                 pdata->txq_cnt = 1;
1592                 pdata->cq_cnt = 1;
1593                 break;
1594         default:
1595                 pdata->mac_ops = &xgene_xgmac_ops;
1596                 pdata->port_ops = &xgene_xgport_ops;
1597                 pdata->cle_ops = &xgene_cle3in_ops;
1598                 pdata->rm = RM0;
1599                 if (!pdata->rxq_cnt) {
1600                         pdata->rxq_cnt = XGENE_NUM_RX_RING;
1601                         pdata->txq_cnt = XGENE_NUM_TX_RING;
1602                         pdata->cq_cnt = XGENE_NUM_TXC_RING;
1603                 }
1604                 break;
1605         }
1606
1607         if (pdata->enet_id == XGENE_ENET1) {
1608                 switch (pdata->port_id) {
1609                 case 0:
1610                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1611                                 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1612                                 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1613                                 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1614                                 pdata->ring_num = START_RING_NUM_0;
1615                         } else {
1616                                 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1617                                 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1618                                 pdata->bp_bufnum = START_BP_BUFNUM_0;
1619                                 pdata->ring_num = START_RING_NUM_0;
1620                         }
1621                         break;
1622                 case 1:
1623                         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1624                                 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1625                                 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1626                                 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1627                                 pdata->ring_num = XG_START_RING_NUM_1;
1628                         } else {
1629                                 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1630                                 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1631                                 pdata->bp_bufnum = START_BP_BUFNUM_1;
1632                                 pdata->ring_num = START_RING_NUM_1;
1633                         }
1634                         break;
1635                 default:
1636                         break;
1637                 }
1638                 pdata->ring_ops = &xgene_ring1_ops;
1639         } else {
1640                 switch (pdata->port_id) {
1641                 case 0:
1642                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1643                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1644                         pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1645                         pdata->ring_num = X2_START_RING_NUM_0;
1646                         break;
1647                 case 1:
1648                         pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1649                         pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1650                         pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1651                         pdata->ring_num = X2_START_RING_NUM_1;
1652                         break;
1653                 default:
1654                         break;
1655                 }
1656                 pdata->rm = RM0;
1657                 pdata->ring_ops = &xgene_ring2_ops;
1658         }
1659 }
1660
1661 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1662 {
1663         struct napi_struct *napi;
1664         int i;
1665
1666         for (i = 0; i < pdata->rxq_cnt; i++) {
1667                 napi = &pdata->rx_ring[i]->napi;
1668                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1669                                NAPI_POLL_WEIGHT);
1670         }
1671
1672         for (i = 0; i < pdata->cq_cnt; i++) {
1673                 napi = &pdata->tx_ring[i]->cp_ring->napi;
1674                 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1675                                NAPI_POLL_WEIGHT);
1676         }
1677 }
1678
1679 static int xgene_enet_probe(struct platform_device *pdev)
1680 {
1681         struct net_device *ndev;
1682         struct xgene_enet_pdata *pdata;
1683         struct device *dev = &pdev->dev;
1684         void (*link_state)(struct work_struct *);
1685         const struct of_device_id *of_id;
1686         int ret;
1687
1688         ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1689                                   XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1690         if (!ndev)
1691                 return -ENOMEM;
1692
1693         pdata = netdev_priv(ndev);
1694
1695         pdata->pdev = pdev;
1696         pdata->ndev = ndev;
1697         SET_NETDEV_DEV(ndev, dev);
1698         platform_set_drvdata(pdev, pdata);
1699         ndev->netdev_ops = &xgene_ndev_ops;
1700         xgene_enet_set_ethtool_ops(ndev);
1701         ndev->features |= NETIF_F_IP_CSUM |
1702                           NETIF_F_GSO |
1703                           NETIF_F_GRO |
1704                           NETIF_F_SG;
1705
1706         of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1707         if (of_id) {
1708                 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1709         }
1710 #ifdef CONFIG_ACPI
1711         else {
1712                 const struct acpi_device_id *acpi_id;
1713
1714                 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1715                 if (acpi_id)
1716                         pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1717         }
1718 #endif
1719         if (!pdata->enet_id) {
1720                 ret = -ENODEV;
1721                 goto err;
1722         }
1723
1724         ret = xgene_enet_get_resources(pdata);
1725         if (ret)
1726                 goto err;
1727
1728         xgene_enet_setup_ops(pdata);
1729
1730         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1731                 ndev->features |= NETIF_F_TSO;
1732                 spin_lock_init(&pdata->mss_lock);
1733         }
1734         ndev->hw_features = ndev->features;
1735
1736         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1737         if (ret) {
1738                 netdev_err(ndev, "No usable DMA configuration\n");
1739                 goto err;
1740         }
1741
1742         ret = xgene_enet_init_hw(pdata);
1743         if (ret)
1744                 goto err;
1745
1746         link_state = pdata->mac_ops->link_state;
1747         if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1748                 INIT_DELAYED_WORK(&pdata->link_work, link_state);
1749         } else if (!pdata->mdio_driver) {
1750                 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1751                         ret = xgene_enet_mdio_config(pdata);
1752                 else
1753                         INIT_DELAYED_WORK(&pdata->link_work, link_state);
1754
1755                 if (ret)
1756                         goto err1;
1757         }
1758
1759         xgene_enet_napi_add(pdata);
1760         ret = register_netdev(ndev);
1761         if (ret) {
1762                 netdev_err(ndev, "Failed to register netdev\n");
1763                 goto err2;
1764         }
1765
1766         return 0;
1767
1768 err2:
1769         /*
1770          * If necessary, free_netdev() will call netif_napi_del() and undo
1771          * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
1772          */
1773
1774         if (pdata->mdio_driver)
1775                 xgene_enet_phy_disconnect(pdata);
1776         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1777                 xgene_enet_mdio_remove(pdata);
1778 err1:
1779         xgene_enet_delete_desc_rings(pdata);
1780 err:
1781         free_netdev(ndev);
1782         return ret;
1783 }
1784
1785 static int xgene_enet_remove(struct platform_device *pdev)
1786 {
1787         struct xgene_enet_pdata *pdata;
1788         struct net_device *ndev;
1789
1790         pdata = platform_get_drvdata(pdev);
1791         ndev = pdata->ndev;
1792
1793         rtnl_lock();
1794         if (netif_running(ndev))
1795                 dev_close(ndev);
1796         rtnl_unlock();
1797
1798         if (pdata->mdio_driver)
1799                 xgene_enet_phy_disconnect(pdata);
1800         else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1801                 xgene_enet_mdio_remove(pdata);
1802
1803         unregister_netdev(ndev);
1804         pdata->port_ops->shutdown(pdata);
1805         xgene_enet_delete_desc_rings(pdata);
1806         free_netdev(ndev);
1807
1808         return 0;
1809 }
1810
1811 static void xgene_enet_shutdown(struct platform_device *pdev)
1812 {
1813         struct xgene_enet_pdata *pdata;
1814
1815         pdata = platform_get_drvdata(pdev);
1816         if (!pdata)
1817                 return;
1818
1819         if (!pdata->ndev)
1820                 return;
1821
1822         xgene_enet_remove(pdev);
1823 }
1824
1825 #ifdef CONFIG_ACPI
1826 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1827         { "APMC0D05", XGENE_ENET1},
1828         { "APMC0D30", XGENE_ENET1},
1829         { "APMC0D31", XGENE_ENET1},
1830         { "APMC0D3F", XGENE_ENET1},
1831         { "APMC0D26", XGENE_ENET2},
1832         { "APMC0D25", XGENE_ENET2},
1833         { }
1834 };
1835 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1836 #endif
1837
1838 #ifdef CONFIG_OF
1839 static const struct of_device_id xgene_enet_of_match[] = {
1840         {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
1841         {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1842         {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1843         {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1844         {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1845         {},
1846 };
1847
1848 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1849 #endif
1850
1851 static struct platform_driver xgene_enet_driver = {
1852         .driver = {
1853                    .name = "xgene-enet",
1854                    .of_match_table = of_match_ptr(xgene_enet_of_match),
1855                    .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1856         },
1857         .probe = xgene_enet_probe,
1858         .remove = xgene_enet_remove,
1859         .shutdown = xgene_enet_shutdown,
1860 };
1861
1862 module_platform_driver(xgene_enet_driver);
1863
1864 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1865 MODULE_VERSION(XGENE_DRV_VERSION);
1866 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1867 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1868 MODULE_LICENSE("GPL");