Merge branch 'remove-ksys-mount-dup' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / staging / octeon / ethernet-rx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file is based on code from OCTEON SDK by Cavium Networks.
4  *
5  * Copyright (c) 2003-2010 Cavium Networks
6  */
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/cache.h>
11 #include <linux/cpumask.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ip.h>
15 #include <linux/string.h>
16 #include <linux/prefetch.h>
17 #include <linux/ratelimit.h>
18 #include <linux/smp.h>
19 #include <linux/interrupt.h>
20 #include <net/dst.h>
21 #ifdef CONFIG_XFRM
22 #include <linux/xfrm.h>
23 #include <net/xfrm.h>
24 #endif /* CONFIG_XFRM */
25
26 #include "octeon-ethernet.h"
27 #include "ethernet-defines.h"
28 #include "ethernet-mem.h"
29 #include "ethernet-rx.h"
30 #include "ethernet-util.h"
31
32 static atomic_t oct_rx_ready = ATOMIC_INIT(0);
33
34 static struct oct_rx_group {
35         int irq;
36         int group;
37         struct napi_struct napi;
38 } oct_rx_group[16];
39
40 /**
41  * cvm_oct_do_interrupt - interrupt handler.
42  * @irq: Interrupt number.
43  * @napi_id: Cookie to identify the NAPI instance.
44  *
45  * The interrupt occurs whenever the POW has packets in our group.
46  *
47  */
48 static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
49 {
50         /* Disable the IRQ and start napi_poll. */
51         disable_irq_nosync(irq);
52         napi_schedule(napi_id);
53
54         return IRQ_HANDLED;
55 }
56
57 /**
58  * cvm_oct_check_rcv_error - process receive errors
59  * @work: Work queue entry pointing to the packet.
60  *
61  * Returns Non-zero if the packet can be dropped, zero otherwise.
62  */
63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work)
64 {
65         int port;
66
67         if (octeon_has_feature(OCTEON_FEATURE_PKND))
68                 port = work->word0.pip.cn68xx.pknd;
69         else
70                 port = work->word1.cn38xx.ipprt;
71
72         if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
73                 /*
74                  * Ignore length errors on min size packets. Some
75                  * equipment incorrectly pads packets to 64+4FCS
76                  * instead of 60+4FCS.  Note these packets still get
77                  * counted as frame errors.
78                  */
79         } else if (work->word2.snoip.err_code == 5 ||
80                    work->word2.snoip.err_code == 7) {
81                 /*
82                  * We received a packet with either an alignment error
83                  * or a FCS error. This may be signalling that we are
84                  * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
85                  * off. If this is the case we need to parse the
86                  * packet to determine if we can remove a non spec
87                  * preamble and generate a correct packet.
88                  */
89                 int interface = cvmx_helper_get_interface_num(port);
90                 int index = cvmx_helper_get_interface_index_num(port);
91                 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
92
93                 gmxx_rxx_frm_ctl.u64 =
94                     cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
95                 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
96                         u8 *ptr =
97                             cvmx_phys_to_ptr(work->packet_ptr.s.addr);
98                         int i = 0;
99
100                         while (i < work->word1.len - 1) {
101                                 if (*ptr != 0x55)
102                                         break;
103                                 ptr++;
104                                 i++;
105                         }
106
107                         if (*ptr == 0xd5) {
108                                 /* Port received 0xd5 preamble */
109                                 work->packet_ptr.s.addr += i + 1;
110                                 work->word1.len -= i + 5;
111                         } else if ((*ptr & 0xf) == 0xd) {
112                                 /* Port received 0xd preamble */
113                                 work->packet_ptr.s.addr += i;
114                                 work->word1.len -= i + 4;
115                                 for (i = 0; i < work->word1.len; i++) {
116                                         *ptr =
117                                             ((*ptr & 0xf0) >> 4) |
118                                             ((*(ptr + 1) & 0xf) << 4);
119                                         ptr++;
120                                 }
121                         } else {
122                                 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
123                                                    port);
124                                 cvm_oct_free_work(work);
125                                 return 1;
126                         }
127                 }
128         } else {
129                 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
130                                    port, work->word2.snoip.err_code);
131                 cvm_oct_free_work(work);
132                 return 1;
133         }
134
135         return 0;
136 }
137
138 static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb)
139 {
140         int segments = work->word2.s.bufs;
141         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
142         int len = work->word1.len;
143         int segment_size;
144
145         while (segments--) {
146                 union cvmx_buf_ptr next_ptr;
147
148                 next_ptr = *(union cvmx_buf_ptr *)
149                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
150
151                 /*
152                  * Octeon Errata PKI-100: The segment size is wrong.
153                  *
154                  * Until it is fixed, calculate the segment size based on
155                  * the packet pool buffer size.
156                  * When it is fixed, the following line should be replaced
157                  * with this one:
158                  * int segment_size = segment_ptr.s.size;
159                  */
160                 segment_size =
161                         CVMX_FPA_PACKET_POOL_SIZE -
162                         (segment_ptr.s.addr -
163                          (((segment_ptr.s.addr >> 7) -
164                            segment_ptr.s.back) << 7));
165
166                 /* Don't copy more than what is left in the packet */
167                 if (segment_size > len)
168                         segment_size = len;
169
170                 /* Copy the data into the packet */
171                 skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr),
172                              segment_size);
173                 len -= segment_size;
174                 segment_ptr = next_ptr;
175         }
176 }
177
178 static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
179 {
180         const int       coreid = cvmx_get_core_num();
181         u64     old_group_mask;
182         u64     old_scratch;
183         int             rx_count = 0;
184         int             did_work_request = 0;
185         int             packet_not_copied;
186
187         /* Prefetch cvm_oct_device since we know we need it soon */
188         prefetch(cvm_oct_device);
189
190         if (USE_ASYNC_IOBDMA) {
191                 /* Save scratch in case userspace is using it */
192                 CVMX_SYNCIOBDMA;
193                 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
194         }
195
196         /* Only allow work for our group (and preserve priorities) */
197         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
198                 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
199                 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
200                                BIT(rx_group->group));
201                 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
202         } else {
203                 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
204                 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
205                                (old_group_mask & ~0xFFFFull) |
206                                BIT(rx_group->group));
207         }
208
209         if (USE_ASYNC_IOBDMA) {
210                 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
211                 did_work_request = 1;
212         }
213
214         while (rx_count < budget) {
215                 struct sk_buff *skb = NULL;
216                 struct sk_buff **pskb = NULL;
217                 int skb_in_hw;
218                 struct cvmx_wqe *work;
219                 int port;
220
221                 if (USE_ASYNC_IOBDMA && did_work_request)
222                         work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
223                 else
224                         work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
225
226                 prefetch(work);
227                 did_work_request = 0;
228                 if (!work) {
229                         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
230                                 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
231                                                BIT(rx_group->group));
232                                 cvmx_write_csr(CVMX_SSO_WQ_INT,
233                                                BIT(rx_group->group));
234                         } else {
235                                 union cvmx_pow_wq_int wq_int;
236
237                                 wq_int.u64 = 0;
238                                 wq_int.s.iq_dis = BIT(rx_group->group);
239                                 wq_int.s.wq_int = BIT(rx_group->group);
240                                 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
241                         }
242                         break;
243                 }
244                 pskb = (struct sk_buff **)
245                         (cvm_oct_get_buffer_ptr(work->packet_ptr) -
246                         sizeof(void *));
247                 prefetch(pskb);
248
249                 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
250                         cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
251                                                             CVMX_POW_NO_WAIT);
252                         did_work_request = 1;
253                 }
254                 rx_count++;
255
256                 skb_in_hw = work->word2.s.bufs == 1;
257                 if (likely(skb_in_hw)) {
258                         skb = *pskb;
259                         prefetch(&skb->head);
260                         prefetch(&skb->len);
261                 }
262
263                 if (octeon_has_feature(OCTEON_FEATURE_PKND))
264                         port = work->word0.pip.cn68xx.pknd;
265                 else
266                         port = work->word1.cn38xx.ipprt;
267
268                 prefetch(cvm_oct_device[port]);
269
270                 /* Immediately throw away all packets with receive errors */
271                 if (unlikely(work->word2.snoip.rcv_error)) {
272                         if (cvm_oct_check_rcv_error(work))
273                                 continue;
274                 }
275
276                 /*
277                  * We can only use the zero copy path if skbuffs are
278                  * in the FPA pool and the packet fits in a single
279                  * buffer.
280                  */
281                 if (likely(skb_in_hw)) {
282                         skb->data = skb->head + work->packet_ptr.s.addr -
283                                 cvmx_ptr_to_phys(skb->head);
284                         prefetch(skb->data);
285                         skb->len = work->word1.len;
286                         skb_set_tail_pointer(skb, skb->len);
287                         packet_not_copied = 1;
288                 } else {
289                         /*
290                          * We have to copy the packet. First allocate
291                          * an skbuff for it.
292                          */
293                         skb = dev_alloc_skb(work->word1.len);
294                         if (!skb) {
295                                 cvm_oct_free_work(work);
296                                 continue;
297                         }
298
299                         /*
300                          * Check if we've received a packet that was
301                          * entirely stored in the work entry.
302                          */
303                         if (unlikely(work->word2.s.bufs == 0)) {
304                                 u8 *ptr = work->packet_data;
305
306                                 if (likely(!work->word2.s.not_IP)) {
307                                         /*
308                                          * The beginning of the packet
309                                          * moves for IP packets.
310                                          */
311                                         if (work->word2.s.is_v6)
312                                                 ptr += 2;
313                                         else
314                                                 ptr += 6;
315                                 }
316                                 skb_put_data(skb, ptr, work->word1.len);
317                                 /* No packet buffers to free */
318                         } else {
319                                 copy_segments_to_skb(work, skb);
320                         }
321                         packet_not_copied = 0;
322                 }
323                 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
324                            cvm_oct_device[port])) {
325                         struct net_device *dev = cvm_oct_device[port];
326
327                         /*
328                          * Only accept packets for devices that are
329                          * currently up.
330                          */
331                         if (likely(dev->flags & IFF_UP)) {
332                                 skb->protocol = eth_type_trans(skb, dev);
333                                 skb->dev = dev;
334
335                                 if (unlikely(work->word2.s.not_IP ||
336                                              work->word2.s.IP_exc ||
337                                              work->word2.s.L4_error ||
338                                              !work->word2.s.tcp_or_udp))
339                                         skb->ip_summed = CHECKSUM_NONE;
340                                 else
341                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
342
343                                 /* Increment RX stats for virtual ports */
344                                 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
345                                         dev->stats.rx_packets++;
346                                         dev->stats.rx_bytes += skb->len;
347                                 }
348                                 netif_receive_skb(skb);
349                         } else {
350                                 /*
351                                  * Drop any packet received for a device that
352                                  * isn't up.
353                                  */
354                                 dev->stats.rx_dropped++;
355                                 dev_kfree_skb_irq(skb);
356                         }
357                 } else {
358                         /*
359                          * Drop any packet received for a device that
360                          * doesn't exist.
361                          */
362                         printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
363                                            port);
364                         dev_kfree_skb_irq(skb);
365                 }
366                 /*
367                  * Check to see if the skbuff and work share the same
368                  * packet buffer.
369                  */
370                 if (likely(packet_not_copied)) {
371                         /*
372                          * This buffer needs to be replaced, increment
373                          * the number of buffers we need to free by
374                          * one.
375                          */
376                         cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
377                                               1);
378
379                         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
380                 } else {
381                         cvm_oct_free_work(work);
382                 }
383         }
384         /* Restore the original POW group mask */
385         if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
386                 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
387                 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
388         } else {
389                 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
390         }
391
392         if (USE_ASYNC_IOBDMA) {
393                 /* Restore the scratch area */
394                 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
395         }
396         cvm_oct_rx_refill_pool(0);
397
398         return rx_count;
399 }
400
401 /**
402  * cvm_oct_napi_poll - the NAPI poll function.
403  * @napi: The NAPI instance.
404  * @budget: Maximum number of packets to receive.
405  *
406  * Returns the number of packets processed.
407  */
408 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
409 {
410         struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
411                                                      napi);
412         int rx_count;
413
414         rx_count = cvm_oct_poll(rx_group, budget);
415
416         if (rx_count < budget) {
417                 /* No more work */
418                 napi_complete_done(napi, rx_count);
419                 enable_irq(rx_group->irq);
420         }
421         return rx_count;
422 }
423
424 #ifdef CONFIG_NET_POLL_CONTROLLER
425 /**
426  * cvm_oct_poll_controller - poll for receive packets
427  * device.
428  *
429  * @dev:    Device to poll. Unused
430  */
431 void cvm_oct_poll_controller(struct net_device *dev)
432 {
433         int i;
434
435         if (!atomic_read(&oct_rx_ready))
436                 return;
437
438         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
439                 if (!(pow_receive_groups & BIT(i)))
440                         continue;
441
442                 cvm_oct_poll(&oct_rx_group[i], 16);
443         }
444 }
445 #endif
446
447 void cvm_oct_rx_initialize(void)
448 {
449         int i;
450         struct net_device *dev_for_napi = NULL;
451
452         for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
453                 if (cvm_oct_device[i]) {
454                         dev_for_napi = cvm_oct_device[i];
455                         break;
456                 }
457         }
458
459         if (!dev_for_napi)
460                 panic("No net_devices were allocated.");
461
462         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
463                 int ret;
464
465                 if (!(pow_receive_groups & BIT(i)))
466                         continue;
467
468                 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
469                                cvm_oct_napi_poll, rx_napi_weight);
470                 napi_enable(&oct_rx_group[i].napi);
471
472                 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
473                 oct_rx_group[i].group = i;
474
475                 /* Register an IRQ handler to receive POW interrupts */
476                 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
477                                   "Ethernet", &oct_rx_group[i].napi);
478                 if (ret)
479                         panic("Could not acquire Ethernet IRQ %d\n",
480                               oct_rx_group[i].irq);
481
482                 disable_irq_nosync(oct_rx_group[i].irq);
483
484                 /* Enable POW interrupt when our port has at least one packet */
485                 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
486                         union cvmx_sso_wq_int_thrx int_thr;
487                         union cvmx_pow_wq_int_pc int_pc;
488
489                         int_thr.u64 = 0;
490                         int_thr.s.tc_en = 1;
491                         int_thr.s.tc_thr = 1;
492                         cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
493
494                         int_pc.u64 = 0;
495                         int_pc.s.pc_thr = 5;
496                         cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
497                 } else {
498                         union cvmx_pow_wq_int_thrx int_thr;
499                         union cvmx_pow_wq_int_pc int_pc;
500
501                         int_thr.u64 = 0;
502                         int_thr.s.tc_en = 1;
503                         int_thr.s.tc_thr = 1;
504                         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
505
506                         int_pc.u64 = 0;
507                         int_pc.s.pc_thr = 5;
508                         cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
509                 }
510
511                 /* Schedule NAPI now. This will indirectly enable the
512                  * interrupt.
513                  */
514                 napi_schedule(&oct_rx_group[i].napi);
515         }
516         atomic_inc(&oct_rx_ready);
517 }
518
519 void cvm_oct_rx_shutdown(void)
520 {
521         int i;
522
523         for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
524                 if (!(pow_receive_groups & BIT(i)))
525                         continue;
526
527                 /* Disable POW interrupt */
528                 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
529                         cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
530                 else
531                         cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
532
533                 /* Free the interrupt handler */
534                 free_irq(oct_rx_group[i].irq, cvm_oct_device);
535
536                 netif_napi_del(&oct_rx_group[i].napi);
537         }
538 }