Merge tag 'pci-v3.10-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / drivers / net / ethernet / sfc / rx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/slab.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
23 #include "efx.h"
24 #include "nic.h"
25 #include "selftest.h"
26 #include "workarounds.h"
27
28 /* Preferred number of descriptors to fill at once */
29 #define EFX_RX_PREFERRED_BATCH 8U
30
31 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
32  * ring, this number is divided by the number of buffers per page to calculate
33  * the number of pages to store in the RX page recycle ring.
34  */
35 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
37
38 /* Size of buffer allocated for skb header area. */
39 #define EFX_SKB_HEADERS  64u
40
41 /* This is the percentage fill level below which new RX descriptors
42  * will be added to the RX descriptor ring.
43  */
44 static unsigned int rx_refill_threshold;
45
46 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
47 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
48                                       EFX_RX_USR_BUF_SIZE)
49
50 /*
51  * RX maximum head room required.
52  *
53  * This must be at least 1 to prevent overflow, plus one packet-worth
54  * to allow pipelined receives.
55  */
56 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
57
58 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
59 {
60         return page_address(buf->page) + buf->page_offset;
61 }
62
63 static inline u32 efx_rx_buf_hash(const u8 *eh)
64 {
65         /* The ethernet header is always directly after any hash. */
66 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
67         return __le32_to_cpup((const __le32 *)(eh - 4));
68 #else
69         const u8 *data = eh - 4;
70         return (u32)data[0]       |
71                (u32)data[1] << 8  |
72                (u32)data[2] << 16 |
73                (u32)data[3] << 24;
74 #endif
75 }
76
77 static inline struct efx_rx_buffer *
78 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
79 {
80         if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
81                 return efx_rx_buffer(rx_queue, 0);
82         else
83                 return rx_buf + 1;
84 }
85
86 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
87                                       struct efx_rx_buffer *rx_buf,
88                                       unsigned int len)
89 {
90         dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
91                                 DMA_FROM_DEVICE);
92 }
93
94 void efx_rx_config_page_split(struct efx_nic *efx)
95 {
96         efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
97                                       EFX_RX_BUF_ALIGNMENT);
98         efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99                 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100                  efx->rx_page_buf_step);
101         efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102                 efx->rx_bufs_per_page;
103         efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104                                                efx->rx_bufs_per_page);
105 }
106
107 /* Check the RX page recycle ring for a page that can be reused. */
108 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
109 {
110         struct efx_nic *efx = rx_queue->efx;
111         struct page *page;
112         struct efx_rx_page_state *state;
113         unsigned index;
114
115         index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116         page = rx_queue->page_ring[index];
117         if (page == NULL)
118                 return NULL;
119
120         rx_queue->page_ring[index] = NULL;
121         /* page_remove cannot exceed page_add. */
122         if (rx_queue->page_remove != rx_queue->page_add)
123                 ++rx_queue->page_remove;
124
125         /* If page_count is 1 then we hold the only reference to this page. */
126         if (page_count(page) == 1) {
127                 ++rx_queue->page_recycle_count;
128                 return page;
129         } else {
130                 state = page_address(page);
131                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132                                PAGE_SIZE << efx->rx_buffer_order,
133                                DMA_FROM_DEVICE);
134                 put_page(page);
135                 ++rx_queue->page_recycle_failed;
136         }
137
138         return NULL;
139 }
140
141 /**
142  * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
143  *
144  * @rx_queue:           Efx RX queue
145  *
146  * This allocates a batch of pages, maps them for DMA, and populates
147  * struct efx_rx_buffers for each one. Return a negative error code or
148  * 0 on success. If a single page can be used for multiple buffers,
149  * then the page will either be inserted fully, or not at all.
150  */
151 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
152 {
153         struct efx_nic *efx = rx_queue->efx;
154         struct efx_rx_buffer *rx_buf;
155         struct page *page;
156         unsigned int page_offset;
157         struct efx_rx_page_state *state;
158         dma_addr_t dma_addr;
159         unsigned index, count;
160
161         count = 0;
162         do {
163                 page = efx_reuse_page(rx_queue);
164                 if (page == NULL) {
165                         page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
166                                            efx->rx_buffer_order);
167                         if (unlikely(page == NULL))
168                                 return -ENOMEM;
169                         dma_addr =
170                                 dma_map_page(&efx->pci_dev->dev, page, 0,
171                                              PAGE_SIZE << efx->rx_buffer_order,
172                                              DMA_FROM_DEVICE);
173                         if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
174                                                        dma_addr))) {
175                                 __free_pages(page, efx->rx_buffer_order);
176                                 return -EIO;
177                         }
178                         state = page_address(page);
179                         state->dma_addr = dma_addr;
180                 } else {
181                         state = page_address(page);
182                         dma_addr = state->dma_addr;
183                 }
184
185                 dma_addr += sizeof(struct efx_rx_page_state);
186                 page_offset = sizeof(struct efx_rx_page_state);
187
188                 do {
189                         index = rx_queue->added_count & rx_queue->ptr_mask;
190                         rx_buf = efx_rx_buffer(rx_queue, index);
191                         rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
192                         rx_buf->page = page;
193                         rx_buf->page_offset = page_offset + NET_IP_ALIGN;
194                         rx_buf->len = efx->rx_dma_len;
195                         rx_buf->flags = 0;
196                         ++rx_queue->added_count;
197                         get_page(page);
198                         dma_addr += efx->rx_page_buf_step;
199                         page_offset += efx->rx_page_buf_step;
200                 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
201
202                 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
203         } while (++count < efx->rx_pages_per_batch);
204
205         return 0;
206 }
207
208 /* Unmap a DMA-mapped page.  This function is only called for the final RX
209  * buffer in a page.
210  */
211 static void efx_unmap_rx_buffer(struct efx_nic *efx,
212                                 struct efx_rx_buffer *rx_buf)
213 {
214         struct page *page = rx_buf->page;
215
216         if (page) {
217                 struct efx_rx_page_state *state = page_address(page);
218                 dma_unmap_page(&efx->pci_dev->dev,
219                                state->dma_addr,
220                                PAGE_SIZE << efx->rx_buffer_order,
221                                DMA_FROM_DEVICE);
222         }
223 }
224
225 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
226 {
227         if (rx_buf->page) {
228                 put_page(rx_buf->page);
229                 rx_buf->page = NULL;
230         }
231 }
232
233 /* Attempt to recycle the page if there is an RX recycle ring; the page can
234  * only be added if this is the final RX buffer, to prevent pages being used in
235  * the descriptor ring and appearing in the recycle ring simultaneously.
236  */
237 static void efx_recycle_rx_page(struct efx_channel *channel,
238                                 struct efx_rx_buffer *rx_buf)
239 {
240         struct page *page = rx_buf->page;
241         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
242         struct efx_nic *efx = rx_queue->efx;
243         unsigned index;
244
245         /* Only recycle the page after processing the final buffer. */
246         if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
247                 return;
248
249         index = rx_queue->page_add & rx_queue->page_ptr_mask;
250         if (rx_queue->page_ring[index] == NULL) {
251                 unsigned read_index = rx_queue->page_remove &
252                         rx_queue->page_ptr_mask;
253
254                 /* The next slot in the recycle ring is available, but
255                  * increment page_remove if the read pointer currently
256                  * points here.
257                  */
258                 if (read_index == index)
259                         ++rx_queue->page_remove;
260                 rx_queue->page_ring[index] = page;
261                 ++rx_queue->page_add;
262                 return;
263         }
264         ++rx_queue->page_recycle_full;
265         efx_unmap_rx_buffer(efx, rx_buf);
266         put_page(rx_buf->page);
267 }
268
269 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
270                                struct efx_rx_buffer *rx_buf)
271 {
272         /* Release the page reference we hold for the buffer. */
273         if (rx_buf->page)
274                 put_page(rx_buf->page);
275
276         /* If this is the last buffer in a page, unmap and free it. */
277         if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
278                 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
279                 efx_free_rx_buffer(rx_buf);
280         }
281         rx_buf->page = NULL;
282 }
283
284 /* Recycle the pages that are used by buffers that have just been received. */
285 static void efx_recycle_rx_buffers(struct efx_channel *channel,
286                                    struct efx_rx_buffer *rx_buf,
287                                    unsigned int n_frags)
288 {
289         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
290
291         do {
292                 efx_recycle_rx_page(channel, rx_buf);
293                 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
294         } while (--n_frags);
295 }
296
297 /**
298  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
299  * @rx_queue:           RX descriptor queue
300  *
301  * This will aim to fill the RX descriptor queue up to
302  * @rx_queue->@max_fill. If there is insufficient atomic
303  * memory to do so, a slow fill will be scheduled.
304  *
305  * The caller must provide serialisation (none is used here). In practise,
306  * this means this function must run from the NAPI handler, or be called
307  * when NAPI is disabled.
308  */
309 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
310 {
311         struct efx_nic *efx = rx_queue->efx;
312         unsigned int fill_level, batch_size;
313         int space, rc = 0;
314
315         /* Calculate current fill level, and exit if we don't need to fill */
316         fill_level = (rx_queue->added_count - rx_queue->removed_count);
317         EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
318         if (fill_level >= rx_queue->fast_fill_trigger)
319                 goto out;
320
321         /* Record minimum fill level */
322         if (unlikely(fill_level < rx_queue->min_fill)) {
323                 if (fill_level)
324                         rx_queue->min_fill = fill_level;
325         }
326
327         batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
328         space = rx_queue->max_fill - fill_level;
329         EFX_BUG_ON_PARANOID(space < batch_size);
330
331         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
332                    "RX queue %d fast-filling descriptor ring from"
333                    " level %d to level %d\n",
334                    efx_rx_queue_index(rx_queue), fill_level,
335                    rx_queue->max_fill);
336
337
338         do {
339                 rc = efx_init_rx_buffers(rx_queue);
340                 if (unlikely(rc)) {
341                         /* Ensure that we don't leave the rx queue empty */
342                         if (rx_queue->added_count == rx_queue->removed_count)
343                                 efx_schedule_slow_fill(rx_queue);
344                         goto out;
345                 }
346         } while ((space -= batch_size) >= batch_size);
347
348         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
349                    "RX queue %d fast-filled descriptor ring "
350                    "to level %d\n", efx_rx_queue_index(rx_queue),
351                    rx_queue->added_count - rx_queue->removed_count);
352
353  out:
354         if (rx_queue->notified_count != rx_queue->added_count)
355                 efx_nic_notify_rx_desc(rx_queue);
356 }
357
358 void efx_rx_slow_fill(unsigned long context)
359 {
360         struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
361
362         /* Post an event to cause NAPI to run and refill the queue */
363         efx_nic_generate_fill_event(rx_queue);
364         ++rx_queue->slow_fill_count;
365 }
366
367 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
368                                      struct efx_rx_buffer *rx_buf,
369                                      int len)
370 {
371         struct efx_nic *efx = rx_queue->efx;
372         unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
373
374         if (likely(len <= max_len))
375                 return;
376
377         /* The packet must be discarded, but this is only a fatal error
378          * if the caller indicated it was
379          */
380         rx_buf->flags |= EFX_RX_PKT_DISCARD;
381
382         if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
383                 if (net_ratelimit())
384                         netif_err(efx, rx_err, efx->net_dev,
385                                   " RX queue %d seriously overlength "
386                                   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
387                                   efx_rx_queue_index(rx_queue), len, max_len,
388                                   efx->type->rx_buffer_padding);
389                 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
390         } else {
391                 if (net_ratelimit())
392                         netif_err(efx, rx_err, efx->net_dev,
393                                   " RX queue %d overlength RX event "
394                                   "(0x%x > 0x%x)\n",
395                                   efx_rx_queue_index(rx_queue), len, max_len);
396         }
397
398         efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
399 }
400
401 /* Pass a received packet up through GRO.  GRO can handle pages
402  * regardless of checksum state and skbs with a good checksum.
403  */
404 static void
405 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
406                   unsigned int n_frags, u8 *eh)
407 {
408         struct napi_struct *napi = &channel->napi_str;
409         gro_result_t gro_result;
410         struct efx_nic *efx = channel->efx;
411         struct sk_buff *skb;
412
413         skb = napi_get_frags(napi);
414         if (unlikely(!skb)) {
415                 while (n_frags--) {
416                         put_page(rx_buf->page);
417                         rx_buf->page = NULL;
418                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
419                 }
420                 return;
421         }
422
423         if (efx->net_dev->features & NETIF_F_RXHASH)
424                 skb->rxhash = efx_rx_buf_hash(eh);
425         skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
426                           CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
427
428         for (;;) {
429                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
430                                    rx_buf->page, rx_buf->page_offset,
431                                    rx_buf->len);
432                 rx_buf->page = NULL;
433                 skb->len += rx_buf->len;
434                 if (skb_shinfo(skb)->nr_frags == n_frags)
435                         break;
436
437                 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
438         }
439
440         skb->data_len = skb->len;
441         skb->truesize += n_frags * efx->rx_buffer_truesize;
442
443         skb_record_rx_queue(skb, channel->rx_queue.core_index);
444
445         gro_result = napi_gro_frags(napi);
446         if (gro_result != GRO_DROP)
447                 channel->irq_mod_score += 2;
448 }
449
450 /* Allocate and construct an SKB around page fragments */
451 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
452                                      struct efx_rx_buffer *rx_buf,
453                                      unsigned int n_frags,
454                                      u8 *eh, int hdr_len)
455 {
456         struct efx_nic *efx = channel->efx;
457         struct sk_buff *skb;
458
459         /* Allocate an SKB to store the headers */
460         skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
461         if (unlikely(skb == NULL))
462                 return NULL;
463
464         EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
465
466         skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
467         memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
468
469         /* Append the remaining page(s) onto the frag list */
470         if (rx_buf->len > hdr_len) {
471                 rx_buf->page_offset += hdr_len;
472                 rx_buf->len -= hdr_len;
473
474                 for (;;) {
475                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
476                                            rx_buf->page, rx_buf->page_offset,
477                                            rx_buf->len);
478                         rx_buf->page = NULL;
479                         skb->len += rx_buf->len;
480                         skb->data_len += rx_buf->len;
481                         if (skb_shinfo(skb)->nr_frags == n_frags)
482                                 break;
483
484                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
485                 }
486         } else {
487                 __free_pages(rx_buf->page, efx->rx_buffer_order);
488                 rx_buf->page = NULL;
489                 n_frags = 0;
490         }
491
492         skb->truesize += n_frags * efx->rx_buffer_truesize;
493
494         /* Move past the ethernet header */
495         skb->protocol = eth_type_trans(skb, efx->net_dev);
496
497         return skb;
498 }
499
500 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
501                    unsigned int n_frags, unsigned int len, u16 flags)
502 {
503         struct efx_nic *efx = rx_queue->efx;
504         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
505         struct efx_rx_buffer *rx_buf;
506
507         rx_buf = efx_rx_buffer(rx_queue, index);
508         rx_buf->flags |= flags;
509
510         /* Validate the number of fragments and completed length */
511         if (n_frags == 1) {
512                 efx_rx_packet__check_len(rx_queue, rx_buf, len);
513         } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
514                    unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
515                    unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
516                    unlikely(!efx->rx_scatter)) {
517                 /* If this isn't an explicit discard request, either
518                  * the hardware or the driver is broken.
519                  */
520                 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
521                 rx_buf->flags |= EFX_RX_PKT_DISCARD;
522         }
523
524         netif_vdbg(efx, rx_status, efx->net_dev,
525                    "RX queue %d received ids %x-%x len %d %s%s\n",
526                    efx_rx_queue_index(rx_queue), index,
527                    (index + n_frags - 1) & rx_queue->ptr_mask, len,
528                    (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
529                    (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
530
531         /* Discard packet, if instructed to do so.  Process the
532          * previous receive first.
533          */
534         if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
535                 efx_rx_flush_packet(channel);
536                 put_page(rx_buf->page);
537                 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
538                 return;
539         }
540
541         if (n_frags == 1)
542                 rx_buf->len = len;
543
544         /* Release and/or sync the DMA mapping - assumes all RX buffers
545          * consumed in-order per RX queue.
546          */
547         efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
548
549         /* Prefetch nice and early so data will (hopefully) be in cache by
550          * the time we look at it.
551          */
552         prefetch(efx_rx_buf_va(rx_buf));
553
554         rx_buf->page_offset += efx->type->rx_buffer_hash_size;
555         rx_buf->len -= efx->type->rx_buffer_hash_size;
556
557         if (n_frags > 1) {
558                 /* Release/sync DMA mapping for additional fragments.
559                  * Fix length for last fragment.
560                  */
561                 unsigned int tail_frags = n_frags - 1;
562
563                 for (;;) {
564                         rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
565                         if (--tail_frags == 0)
566                                 break;
567                         efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
568                 }
569                 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
570                 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
571         }
572
573         /* All fragments have been DMA-synced, so recycle buffers and pages. */
574         rx_buf = efx_rx_buffer(rx_queue, index);
575         efx_recycle_rx_buffers(channel, rx_buf, n_frags);
576
577         /* Pipeline receives so that we give time for packet headers to be
578          * prefetched into cache.
579          */
580         efx_rx_flush_packet(channel);
581         channel->rx_pkt_n_frags = n_frags;
582         channel->rx_pkt_index = index;
583 }
584
585 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
586                            struct efx_rx_buffer *rx_buf,
587                            unsigned int n_frags)
588 {
589         struct sk_buff *skb;
590         u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
591
592         skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
593         if (unlikely(skb == NULL)) {
594                 efx_free_rx_buffer(rx_buf);
595                 return;
596         }
597         skb_record_rx_queue(skb, channel->rx_queue.core_index);
598
599         /* Set the SKB flags */
600         skb_checksum_none_assert(skb);
601
602         if (channel->type->receive_skb)
603                 if (channel->type->receive_skb(channel, skb))
604                         return;
605
606         /* Pass the packet up */
607         netif_receive_skb(skb);
608 }
609
610 /* Handle a received packet.  Second half: Touches packet payload. */
611 void __efx_rx_packet(struct efx_channel *channel)
612 {
613         struct efx_nic *efx = channel->efx;
614         struct efx_rx_buffer *rx_buf =
615                 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
616         u8 *eh = efx_rx_buf_va(rx_buf);
617
618         /* If we're in loopback test, then pass the packet directly to the
619          * loopback layer, and free the rx_buf here
620          */
621         if (unlikely(efx->loopback_selftest)) {
622                 efx_loopback_rx_packet(efx, eh, rx_buf->len);
623                 efx_free_rx_buffer(rx_buf);
624                 goto out;
625         }
626
627         if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
628                 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
629
630         if (!channel->type->receive_skb)
631                 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
632         else
633                 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
634 out:
635         channel->rx_pkt_n_frags = 0;
636 }
637
638 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
639 {
640         struct efx_nic *efx = rx_queue->efx;
641         unsigned int entries;
642         int rc;
643
644         /* Create the smallest power-of-two aligned ring */
645         entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
646         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
647         rx_queue->ptr_mask = entries - 1;
648
649         netif_dbg(efx, probe, efx->net_dev,
650                   "creating RX queue %d size %#x mask %#x\n",
651                   efx_rx_queue_index(rx_queue), efx->rxq_entries,
652                   rx_queue->ptr_mask);
653
654         /* Allocate RX buffers */
655         rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
656                                    GFP_KERNEL);
657         if (!rx_queue->buffer)
658                 return -ENOMEM;
659
660         rc = efx_nic_probe_rx(rx_queue);
661         if (rc) {
662                 kfree(rx_queue->buffer);
663                 rx_queue->buffer = NULL;
664         }
665
666         return rc;
667 }
668
669 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
670                                      struct efx_rx_queue *rx_queue)
671 {
672         unsigned int bufs_in_recycle_ring, page_ring_size;
673
674         /* Set the RX recycle ring size */
675 #ifdef CONFIG_PPC64
676         bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
677 #else
678         if (efx->pci_dev->dev.iommu_group)
679                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
680         else
681                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
682 #endif /* CONFIG_PPC64 */
683
684         page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
685                                             efx->rx_bufs_per_page);
686         rx_queue->page_ring = kcalloc(page_ring_size,
687                                       sizeof(*rx_queue->page_ring), GFP_KERNEL);
688         rx_queue->page_ptr_mask = page_ring_size - 1;
689 }
690
691 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
692 {
693         struct efx_nic *efx = rx_queue->efx;
694         unsigned int max_fill, trigger, max_trigger;
695
696         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
697                   "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
698
699         /* Initialise ptr fields */
700         rx_queue->added_count = 0;
701         rx_queue->notified_count = 0;
702         rx_queue->removed_count = 0;
703         rx_queue->min_fill = -1U;
704         efx_init_rx_recycle_ring(efx, rx_queue);
705
706         rx_queue->page_remove = 0;
707         rx_queue->page_add = rx_queue->page_ptr_mask + 1;
708         rx_queue->page_recycle_count = 0;
709         rx_queue->page_recycle_failed = 0;
710         rx_queue->page_recycle_full = 0;
711
712         /* Initialise limit fields */
713         max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
714         max_trigger =
715                 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
716         if (rx_refill_threshold != 0) {
717                 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
718                 if (trigger > max_trigger)
719                         trigger = max_trigger;
720         } else {
721                 trigger = max_trigger;
722         }
723
724         rx_queue->max_fill = max_fill;
725         rx_queue->fast_fill_trigger = trigger;
726
727         /* Set up RX descriptor ring */
728         rx_queue->enabled = true;
729         efx_nic_init_rx(rx_queue);
730 }
731
732 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
733 {
734         int i;
735         struct efx_nic *efx = rx_queue->efx;
736         struct efx_rx_buffer *rx_buf;
737
738         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
739                   "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
740
741         /* A flush failure might have left rx_queue->enabled */
742         rx_queue->enabled = false;
743
744         del_timer_sync(&rx_queue->slow_fill);
745         efx_nic_fini_rx(rx_queue);
746
747         /* Release RX buffers from the current read ptr to the write ptr */
748         if (rx_queue->buffer) {
749                 for (i = rx_queue->removed_count; i < rx_queue->added_count;
750                      i++) {
751                         unsigned index = i & rx_queue->ptr_mask;
752                         rx_buf = efx_rx_buffer(rx_queue, index);
753                         efx_fini_rx_buffer(rx_queue, rx_buf);
754                 }
755         }
756
757         /* Unmap and release the pages in the recycle ring. Remove the ring. */
758         for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
759                 struct page *page = rx_queue->page_ring[i];
760                 struct efx_rx_page_state *state;
761
762                 if (page == NULL)
763                         continue;
764
765                 state = page_address(page);
766                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
767                                PAGE_SIZE << efx->rx_buffer_order,
768                                DMA_FROM_DEVICE);
769                 put_page(page);
770         }
771         kfree(rx_queue->page_ring);
772         rx_queue->page_ring = NULL;
773 }
774
775 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
776 {
777         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
778                   "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
779
780         efx_nic_remove_rx(rx_queue);
781
782         kfree(rx_queue->buffer);
783         rx_queue->buffer = NULL;
784 }
785
786
787 module_param(rx_refill_threshold, uint, 0444);
788 MODULE_PARM_DESC(rx_refill_threshold,
789                  "RX descriptor ring refill threshold (%)");
790