1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2006-2013 Solarflare Communications Inc.
8 #include <linux/bitops.h>
9 #include <linux/delay.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
13 #include <linux/seq_file.h>
14 #include <linux/crc32.h>
15 #include "net_driver.h"
18 #include "rx_common.h"
20 #include "farch_regs.h"
22 #include "siena_sriov.h"
24 #include "workarounds.h"
26 /* Falcon-architecture (SFC9000-family) support */
28 /**************************************************************************
32 **************************************************************************
35 /* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
42 #define TX_DC_ENTRIES 16
43 #define TX_DC_ENTRIES_ORDER 1
45 #define RX_DC_ENTRIES 64
46 #define RX_DC_ENTRIES_ORDER 3
48 /* If EFX_MAX_INT_ERRORS internal errors occur within
49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
52 #define EFX_INT_ERROR_EXPIRE 3600
53 #define EFX_MAX_INT_ERRORS 5
55 /* Depth of RX flush request fifo */
56 #define EFX_RX_FLUSH_COUNT 4
58 /* Driver generated events */
59 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
60 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
61 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
62 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
64 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
67 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
69 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
71 efx_rx_queue_index(_rx_queue))
72 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
74 efx_rx_queue_index(_rx_queue))
75 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
79 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
81 /**************************************************************************
85 **************************************************************************/
87 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
90 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
94 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
95 const efx_oword_t *mask)
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
101 int efx_farch_test_registers(struct efx_nic *efx,
102 const struct efx_farch_register_test *regs,
105 unsigned address = 0;
107 efx_oword_t mask, imask, original, reg, buf;
109 for (i = 0; i < n_regs; ++i) {
110 address = regs[i].address;
111 mask = imask = regs[i].mask;
112 EFX_INVERT_OWORD(imask);
114 efx_reado(efx, &original, address);
116 /* bit sweep on and off */
117 for (j = 0; j < 128; j++) {
118 if (!EFX_EXTRACT_OWORD32(mask, j, j))
121 /* Test this testable bit can be set in isolation */
122 EFX_AND_OWORD(reg, original, mask);
123 EFX_SET_OWORD32(reg, j, j, 1);
125 efx_writeo(efx, ®, address);
126 efx_reado(efx, &buf, address);
128 if (efx_masked_compare_oword(®, &buf, &mask))
131 /* Test this testable bit can be cleared in isolation */
132 EFX_OR_OWORD(reg, original, mask);
133 EFX_SET_OWORD32(reg, j, j, 0);
135 efx_writeo(efx, ®, address);
136 efx_reado(efx, &buf, address);
138 if (efx_masked_compare_oword(®, &buf, &mask))
142 efx_writeo(efx, &original, address);
148 netif_err(efx, hw, efx->net_dev,
149 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
150 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
151 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
155 /**************************************************************************
157 * Special buffer handling
158 * Special buffers are used for event queues and the TX and RX
161 *************************************************************************/
164 * Initialise a special buffer
166 * This will define a buffer (previously allocated via
167 * efx_alloc_special_buffer()) in the buffer table, allowing
168 * it to be used for event queues, descriptor rings etc.
171 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
173 efx_qword_t buf_desc;
178 EFX_WARN_ON_PARANOID(!buffer->buf.addr);
180 /* Write buffer descriptors to NIC */
181 for (i = 0; i < buffer->entries; i++) {
182 index = buffer->index + i;
183 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
184 netif_dbg(efx, probe, efx->net_dev,
185 "mapping special buffer %d at %llx\n",
186 index, (unsigned long long)dma_addr);
187 EFX_POPULATE_QWORD_3(buf_desc,
188 FRF_AZ_BUF_ADR_REGION, 0,
189 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
190 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
191 efx_write_buf_tbl(efx, &buf_desc, index);
195 /* Unmaps a buffer and clears the buffer table entries */
197 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
199 efx_oword_t buf_tbl_upd;
200 unsigned int start = buffer->index;
201 unsigned int end = (buffer->index + buffer->entries - 1);
203 if (!buffer->entries)
206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
207 buffer->index, buffer->index + buffer->entries - 1);
209 EFX_POPULATE_OWORD_4(buf_tbl_upd,
210 FRF_AZ_BUF_UPD_CMD, 0,
211 FRF_AZ_BUF_CLR_CMD, 1,
212 FRF_AZ_BUF_CLR_END_ID, end,
213 FRF_AZ_BUF_CLR_START_ID, start);
214 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
218 * Allocate a new special buffer
220 * This allocates memory for a new buffer, clears it and allocates a
221 * new buffer ID range. It does not write into the buffer table.
223 * This call will allocate 4KB buffers, since 8KB buffers can't be
224 * used for event queues and descriptor rings.
226 static int efx_alloc_special_buffer(struct efx_nic *efx,
227 struct efx_special_buffer *buffer,
230 #ifdef CONFIG_SFC_SRIOV
231 struct siena_nic_data *nic_data = efx->nic_data;
233 len = ALIGN(len, EFX_BUF_SIZE);
235 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
237 buffer->entries = len / EFX_BUF_SIZE;
238 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
240 /* Select new buffer ID */
241 buffer->index = efx->next_buffer_table;
242 efx->next_buffer_table += buffer->entries;
243 #ifdef CONFIG_SFC_SRIOV
244 BUG_ON(efx_siena_sriov_enabled(efx) &&
245 nic_data->vf_buftbl_base < efx->next_buffer_table);
248 netif_dbg(efx, probe, efx->net_dev,
249 "allocating special buffers %d-%d at %llx+%x "
250 "(virt %p phys %llx)\n", buffer->index,
251 buffer->index + buffer->entries - 1,
252 (u64)buffer->buf.dma_addr, len,
253 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
259 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
261 if (!buffer->buf.addr)
264 netif_dbg(efx, hw, efx->net_dev,
265 "deallocating special buffers %d-%d at %llx+%x "
266 "(virt %p phys %llx)\n", buffer->index,
267 buffer->index + buffer->entries - 1,
268 (u64)buffer->buf.dma_addr, buffer->buf.len,
269 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
271 efx_nic_free_buffer(efx, &buffer->buf);
275 /**************************************************************************
279 **************************************************************************/
281 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
282 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
287 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
288 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
289 efx_writed_page(tx_queue->efx, ®,
290 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
293 /* Write pointer and first descriptor for TX descriptor ring */
294 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
295 const efx_qword_t *txd)
300 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
301 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
303 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
304 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
305 FRF_AZ_TX_DESC_WPTR, write_ptr);
307 efx_writeo_page(tx_queue->efx, ®,
308 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
312 /* For each entry inserted into the software descriptor ring, create a
313 * descriptor in the hardware TX descriptor ring (in host memory), and
316 void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
318 struct efx_tx_buffer *buffer;
321 unsigned old_write_count = tx_queue->write_count;
323 tx_queue->xmit_pending = false;
324 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
328 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
329 buffer = &tx_queue->buffer[write_ptr];
330 txd = efx_tx_desc(tx_queue, write_ptr);
331 ++tx_queue->write_count;
333 EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
335 /* Create TX descriptor ring entry */
336 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
337 EFX_POPULATE_QWORD_4(*txd,
339 buffer->flags & EFX_TX_BUF_CONT,
340 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
341 FSF_AZ_TX_KER_BUF_REGION, 0,
342 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
343 } while (tx_queue->write_count != tx_queue->insert_count);
345 wmb(); /* Ensure descriptors are written before they are fetched */
347 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
348 txd = efx_tx_desc(tx_queue,
349 old_write_count & tx_queue->ptr_mask);
350 efx_farch_push_tx_desc(tx_queue, txd);
353 efx_farch_notify_tx_desc(tx_queue);
357 unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
358 dma_addr_t dma_addr, unsigned int len)
360 /* Don't cross 4K boundaries with descriptors. */
361 unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
363 len = min(limit, len);
369 /* Allocate hardware resources for a TX queue */
370 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
372 struct efx_nic *efx = tx_queue->efx;
375 tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OFFLOAD : 0) |
376 ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
377 entries = tx_queue->ptr_mask + 1;
378 return efx_alloc_special_buffer(efx, &tx_queue->txd,
379 entries * sizeof(efx_qword_t));
382 void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
384 int csum = tx_queue->type & EFX_TXQ_TYPE_OFFLOAD;
385 struct efx_nic *efx = tx_queue->efx;
388 /* Pin TX descriptor ring */
389 efx_init_special_buffer(efx, &tx_queue->txd);
391 /* Push TX descriptor ring to card */
392 EFX_POPULATE_OWORD_10(reg,
393 FRF_AZ_TX_DESCQ_EN, 1,
394 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
395 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
396 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
397 FRF_AZ_TX_DESCQ_EVQ_ID,
398 tx_queue->channel->channel,
399 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
400 FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
401 FRF_AZ_TX_DESCQ_SIZE,
402 __ffs(tx_queue->txd.entries),
403 FRF_AZ_TX_DESCQ_TYPE, 0,
404 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
406 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
407 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
409 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
412 EFX_POPULATE_OWORD_1(reg,
414 (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
416 FFE_BZ_TX_PACE_RESERVED);
417 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue);
420 static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
422 struct efx_nic *efx = tx_queue->efx;
423 efx_oword_t tx_flush_descq;
425 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
426 atomic_set(&tx_queue->flush_outstanding, 1);
428 EFX_POPULATE_OWORD_2(tx_flush_descq,
429 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
430 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
431 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
434 void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
436 struct efx_nic *efx = tx_queue->efx;
437 efx_oword_t tx_desc_ptr;
439 /* Remove TX descriptor ring from card */
440 EFX_ZERO_OWORD(tx_desc_ptr);
441 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
444 /* Unpin TX descriptor ring */
445 efx_fini_special_buffer(efx, &tx_queue->txd);
448 /* Free buffers backing TX queue */
449 void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
451 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
454 /**************************************************************************
458 **************************************************************************/
460 /* This creates an entry in the RX descriptor queue */
462 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
464 struct efx_rx_buffer *rx_buf;
467 rxd = efx_rx_desc(rx_queue, index);
468 rx_buf = efx_rx_buffer(rx_queue, index);
469 EFX_POPULATE_QWORD_3(*rxd,
470 FSF_AZ_RX_KER_BUF_SIZE,
472 rx_queue->efx->type->rx_buffer_padding,
473 FSF_AZ_RX_KER_BUF_REGION, 0,
474 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
477 /* This writes to the RX_DESC_WPTR register for the specified receive
480 void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
482 struct efx_nic *efx = rx_queue->efx;
486 while (rx_queue->notified_count != rx_queue->added_count) {
487 efx_farch_build_rx_desc(
489 rx_queue->notified_count & rx_queue->ptr_mask);
490 ++rx_queue->notified_count;
494 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
495 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
496 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
497 efx_rx_queue_index(rx_queue));
500 int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
502 struct efx_nic *efx = rx_queue->efx;
505 entries = rx_queue->ptr_mask + 1;
506 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
507 entries * sizeof(efx_qword_t));
510 void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
512 efx_oword_t rx_desc_ptr;
513 struct efx_nic *efx = rx_queue->efx;
516 /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
517 jumbo_en = efx->rx_scatter;
519 netif_dbg(efx, hw, efx->net_dev,
520 "RX queue %d ring in special buffers %d-%d\n",
521 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
522 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
524 rx_queue->scatter_n = 0;
526 /* Pin RX descriptor ring */
527 efx_init_special_buffer(efx, &rx_queue->rxd);
529 /* Push RX descriptor ring to card */
530 EFX_POPULATE_OWORD_10(rx_desc_ptr,
531 FRF_AZ_RX_ISCSI_DDIG_EN, true,
532 FRF_AZ_RX_ISCSI_HDIG_EN, true,
533 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
534 FRF_AZ_RX_DESCQ_EVQ_ID,
535 efx_rx_queue_channel(rx_queue)->channel,
536 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
537 FRF_AZ_RX_DESCQ_LABEL,
538 efx_rx_queue_index(rx_queue),
539 FRF_AZ_RX_DESCQ_SIZE,
540 __ffs(rx_queue->rxd.entries),
541 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
542 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
543 FRF_AZ_RX_DESCQ_EN, 1);
544 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
545 efx_rx_queue_index(rx_queue));
548 static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
550 struct efx_nic *efx = rx_queue->efx;
551 efx_oword_t rx_flush_descq;
553 EFX_POPULATE_OWORD_2(rx_flush_descq,
554 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
555 FRF_AZ_RX_FLUSH_DESCQ,
556 efx_rx_queue_index(rx_queue));
557 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
560 void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
562 efx_oword_t rx_desc_ptr;
563 struct efx_nic *efx = rx_queue->efx;
565 /* Remove RX descriptor ring from card */
566 EFX_ZERO_OWORD(rx_desc_ptr);
567 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
568 efx_rx_queue_index(rx_queue));
570 /* Unpin RX descriptor ring */
571 efx_fini_special_buffer(efx, &rx_queue->rxd);
574 /* Free buffers backing RX queue */
575 void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
577 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
580 /**************************************************************************
584 **************************************************************************/
586 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
587 * or more RX flushes can be kicked off.
589 static bool efx_farch_flush_wake(struct efx_nic *efx)
591 /* Ensure that all updates are visible to efx_farch_flush_queues() */
594 return (atomic_read(&efx->active_queues) == 0 ||
595 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
596 && atomic_read(&efx->rxq_flush_pending) > 0));
599 static bool efx_check_tx_flush_complete(struct efx_nic *efx)
602 efx_oword_t txd_ptr_tbl;
603 struct efx_channel *channel;
604 struct efx_tx_queue *tx_queue;
606 efx_for_each_channel(channel, efx) {
607 efx_for_each_channel_tx_queue(tx_queue, channel) {
608 efx_reado_table(efx, &txd_ptr_tbl,
609 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
610 if (EFX_OWORD_FIELD(txd_ptr_tbl,
611 FRF_AZ_TX_DESCQ_FLUSH) ||
612 EFX_OWORD_FIELD(txd_ptr_tbl,
613 FRF_AZ_TX_DESCQ_EN)) {
614 netif_dbg(efx, hw, efx->net_dev,
615 "flush did not complete on TXQ %d\n",
618 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
620 /* The flush is complete, but we didn't
621 * receive a flush completion event
623 netif_dbg(efx, hw, efx->net_dev,
624 "flush complete on TXQ %d, so drain "
625 "the queue\n", tx_queue->queue);
626 /* Don't need to increment active_queues as it
627 * has already been incremented for the queues
628 * which did not drain
630 efx_farch_magic_event(channel,
631 EFX_CHANNEL_MAGIC_TX_DRAIN(
640 /* Flush all the transmit queues, and continue flushing receive queues until
641 * they're all flushed. Wait for the DRAIN events to be received so that there
642 * are no more RX and TX events left on any channel. */
643 static int efx_farch_do_flush(struct efx_nic *efx)
645 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
646 struct efx_channel *channel;
647 struct efx_rx_queue *rx_queue;
648 struct efx_tx_queue *tx_queue;
651 efx_for_each_channel(channel, efx) {
652 efx_for_each_channel_tx_queue(tx_queue, channel) {
653 efx_farch_flush_tx_queue(tx_queue);
655 efx_for_each_channel_rx_queue(rx_queue, channel) {
656 rx_queue->flush_pending = true;
657 atomic_inc(&efx->rxq_flush_pending);
661 while (timeout && atomic_read(&efx->active_queues) > 0) {
662 /* If SRIOV is enabled, then offload receive queue flushing to
663 * the firmware (though we will still have to poll for
664 * completion). If that fails, fall back to the old scheme.
666 if (efx_siena_sriov_enabled(efx)) {
667 rc = efx_mcdi_flush_rxqs(efx);
672 /* The hardware supports four concurrent rx flushes, each of
673 * which may need to be retried if there is an outstanding
676 efx_for_each_channel(channel, efx) {
677 efx_for_each_channel_rx_queue(rx_queue, channel) {
678 if (atomic_read(&efx->rxq_flush_outstanding) >=
682 if (rx_queue->flush_pending) {
683 rx_queue->flush_pending = false;
684 atomic_dec(&efx->rxq_flush_pending);
685 atomic_inc(&efx->rxq_flush_outstanding);
686 efx_farch_flush_rx_queue(rx_queue);
692 timeout = wait_event_timeout(efx->flush_wq,
693 efx_farch_flush_wake(efx),
697 if (atomic_read(&efx->active_queues) &&
698 !efx_check_tx_flush_complete(efx)) {
699 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
700 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
701 atomic_read(&efx->rxq_flush_outstanding),
702 atomic_read(&efx->rxq_flush_pending));
705 atomic_set(&efx->active_queues, 0);
706 atomic_set(&efx->rxq_flush_pending, 0);
707 atomic_set(&efx->rxq_flush_outstanding, 0);
713 int efx_farch_fini_dmaq(struct efx_nic *efx)
715 struct efx_channel *channel;
716 struct efx_tx_queue *tx_queue;
717 struct efx_rx_queue *rx_queue;
720 /* Do not attempt to write to the NIC during EEH recovery */
721 if (efx->state != STATE_RECOVERY) {
722 /* Only perform flush if DMA is enabled */
723 if (efx->pci_dev->is_busmaster) {
724 efx->type->prepare_flush(efx);
725 rc = efx_farch_do_flush(efx);
726 efx->type->finish_flush(efx);
729 efx_for_each_channel(channel, efx) {
730 efx_for_each_channel_rx_queue(rx_queue, channel)
731 efx_farch_rx_fini(rx_queue);
732 efx_for_each_channel_tx_queue(tx_queue, channel)
733 efx_farch_tx_fini(tx_queue);
740 /* Reset queue and flush accounting after FLR
742 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
743 * mastering was disabled), in which case we don't receive (RXQ) flush
744 * completion events. This means that efx->rxq_flush_outstanding remained at 4
745 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
746 * events were received, and we didn't go through efx_check_tx_flush_complete())
747 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
748 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
749 * for batched flush requests; and the efx->active_queues gets messed up because
750 * we keep incrementing for the newly initialised queues, but it never went to
751 * zero previously. Then we get a timeout every time we try to restart the
752 * queues, as it doesn't go back to zero when we should be flushing the queues.
754 void efx_farch_finish_flr(struct efx_nic *efx)
756 atomic_set(&efx->rxq_flush_pending, 0);
757 atomic_set(&efx->rxq_flush_outstanding, 0);
758 atomic_set(&efx->active_queues, 0);
762 /**************************************************************************
764 * Event queue processing
765 * Event queues are processed by per-channel tasklets.
767 **************************************************************************/
769 /* Update a channel's event queue's read pointer (RPTR) register
771 * This writes the EVQ_RPTR_REG register for the specified channel's
774 void efx_farch_ev_read_ack(struct efx_channel *channel)
777 struct efx_nic *efx = channel->efx;
779 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
780 channel->eventq_read_ptr & channel->eventq_mask);
782 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
783 * of 4 bytes, but it is really 16 bytes just like later revisions.
785 efx_writed(efx, ®,
786 efx->type->evq_rptr_tbl_base +
787 FR_BZ_EVQ_RPTR_STEP * channel->channel);
790 /* Use HW to insert a SW defined event */
791 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
794 efx_oword_t drv_ev_reg;
796 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
797 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
798 drv_ev_reg.u32[0] = event->u32[0];
799 drv_ev_reg.u32[1] = event->u32[1];
800 drv_ev_reg.u32[2] = 0;
801 drv_ev_reg.u32[3] = 0;
802 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
803 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
806 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
810 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
811 FSE_AZ_EV_CODE_DRV_GEN_EV,
812 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
813 efx_farch_generate_event(channel->efx, channel->channel, &event);
816 /* Handle a transmit completion event
818 * The NIC batches TX completion events; the message we receive is of
819 * the form "complete all TX events up to this index".
822 efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
824 unsigned int tx_ev_desc_ptr;
825 unsigned int tx_ev_q_label;
826 struct efx_tx_queue *tx_queue;
827 struct efx_nic *efx = channel->efx;
829 if (unlikely(READ_ONCE(efx->reset_pending)))
832 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
833 /* Transmit completion */
834 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
835 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
836 tx_queue = efx_channel_get_tx_queue(
837 channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
838 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
839 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
840 /* Rewrite the FIFO write pointer */
841 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
842 tx_queue = efx_channel_get_tx_queue(
843 channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
845 netif_tx_lock(efx->net_dev);
846 efx_farch_notify_tx_desc(tx_queue);
847 netif_tx_unlock(efx->net_dev);
848 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
849 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
851 netif_err(efx, tx_err, efx->net_dev,
852 "channel %d unexpected TX event "
853 EFX_QWORD_FMT"\n", channel->channel,
854 EFX_QWORD_VAL(*event));
858 /* Detect errors included in the rx_evt_pkt_ok bit. */
859 static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
860 const efx_qword_t *event)
862 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
863 struct efx_nic *efx = rx_queue->efx;
864 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
865 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
866 bool rx_ev_frm_trunc, rx_ev_tobe_disc;
867 bool rx_ev_other_err, rx_ev_pause_frm;
869 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
870 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
871 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
872 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
873 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
874 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
875 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
876 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
877 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
878 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
880 /* Every error apart from tobe_disc and pause_frm */
881 rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
882 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
883 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
885 /* Count errors that are not in MAC stats. Ignore expected
886 * checksum errors during self-test. */
888 ++channel->n_rx_frm_trunc;
889 else if (rx_ev_tobe_disc)
890 ++channel->n_rx_tobe_disc;
891 else if (!efx->loopback_selftest) {
892 if (rx_ev_ip_hdr_chksum_err)
893 ++channel->n_rx_ip_hdr_chksum_err;
894 else if (rx_ev_tcp_udp_chksum_err)
895 ++channel->n_rx_tcp_udp_chksum_err;
898 /* TOBE_DISC is expected on unicast mismatches; don't print out an
899 * error message. FRM_TRUNC indicates RXDP dropped the packet due
900 * to a FIFO overflow.
903 if (rx_ev_other_err && net_ratelimit()) {
904 netif_dbg(efx, rx_err, efx->net_dev,
905 " RX queue %d unexpected RX event "
906 EFX_QWORD_FMT "%s%s%s%s%s%s%s\n",
907 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
908 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
909 rx_ev_ip_hdr_chksum_err ?
910 " [IP_HDR_CHKSUM_ERR]" : "",
911 rx_ev_tcp_udp_chksum_err ?
912 " [TCP_UDP_CHKSUM_ERR]" : "",
913 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
914 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
915 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
916 rx_ev_pause_frm ? " [PAUSE]" : "");
919 (void) rx_ev_other_err;
922 if (efx->net_dev->features & NETIF_F_RXALL)
923 /* don't discard frame for CRC error */
924 rx_ev_eth_crc_err = false;
926 /* The frame must be discarded if any of these are true. */
927 return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
928 rx_ev_tobe_disc | rx_ev_pause_frm) ?
929 EFX_RX_PKT_DISCARD : 0;
932 /* Handle receive events that are not in-order. Return true if this
933 * can be handled as a partial packet discard, false if it's more
937 efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
939 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
940 struct efx_nic *efx = rx_queue->efx;
941 unsigned expected, dropped;
943 if (rx_queue->scatter_n &&
944 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
945 rx_queue->ptr_mask)) {
946 ++channel->n_rx_nodesc_trunc;
950 expected = rx_queue->removed_count & rx_queue->ptr_mask;
951 dropped = (index - expected) & rx_queue->ptr_mask;
952 netif_info(efx, rx_err, efx->net_dev,
953 "dropped %d events (index=%d expected=%d)\n",
954 dropped, index, expected);
956 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
960 /* Handle a packet received event
962 * The NIC gives a "discard" flag if it's a unicast packet with the
963 * wrong destination address
964 * Also "is multicast" and "matches multicast filter" flags can be used to
965 * discard non-matching multicast packets.
968 efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
970 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
971 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
972 unsigned expected_ptr;
973 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
975 struct efx_rx_queue *rx_queue;
976 struct efx_nic *efx = channel->efx;
978 if (unlikely(READ_ONCE(efx->reset_pending)))
981 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
982 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
983 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
986 rx_queue = efx_channel_get_rx_queue(channel);
988 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
989 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
992 /* Check for partial drops and other errors */
993 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
994 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
995 if (rx_ev_desc_ptr != expected_ptr &&
996 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
999 /* Discard all pending fragments */
1000 if (rx_queue->scatter_n) {
1003 rx_queue->removed_count & rx_queue->ptr_mask,
1004 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1005 rx_queue->removed_count += rx_queue->scatter_n;
1006 rx_queue->scatter_n = 0;
1009 /* Return if there is no new fragment */
1010 if (rx_ev_desc_ptr != expected_ptr)
1013 /* Discard new fragment if not SOP */
1017 rx_queue->removed_count & rx_queue->ptr_mask,
1018 1, 0, EFX_RX_PKT_DISCARD);
1019 ++rx_queue->removed_count;
1024 ++rx_queue->scatter_n;
1028 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1029 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1030 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1032 if (likely(rx_ev_pkt_ok)) {
1033 /* If packet is marked as OK then we can rely on the
1034 * hardware checksum and classification.
1037 switch (rx_ev_hdr_type) {
1038 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1039 flags |= EFX_RX_PKT_TCP;
1041 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1042 flags |= EFX_RX_PKT_CSUMMED;
1044 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1045 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1049 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1052 /* Detect multicast packets that didn't match the filter */
1053 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1054 if (rx_ev_mcast_pkt) {
1055 unsigned int rx_ev_mcast_hash_match =
1056 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1058 if (unlikely(!rx_ev_mcast_hash_match)) {
1059 ++channel->n_rx_mcast_mismatch;
1060 flags |= EFX_RX_PKT_DISCARD;
1064 channel->irq_mod_score += 2;
1066 /* Handle received packet */
1067 efx_rx_packet(rx_queue,
1068 rx_queue->removed_count & rx_queue->ptr_mask,
1069 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1070 rx_queue->removed_count += rx_queue->scatter_n;
1071 rx_queue->scatter_n = 0;
1074 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1075 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1076 * of all transmit completions.
1079 efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1081 struct efx_tx_queue *tx_queue;
1084 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1085 if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
1086 tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
1087 qid % EFX_MAX_TXQ_PER_CHANNEL);
1088 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1089 efx_farch_magic_event(tx_queue->channel,
1090 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1095 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1096 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1097 * the RX queue back to the mask of RX queues in need of flushing.
1100 efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1102 struct efx_channel *channel;
1103 struct efx_rx_queue *rx_queue;
1107 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1108 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1109 if (qid >= efx->n_channels)
1111 channel = efx_get_channel(efx, qid);
1112 if (!efx_channel_has_rx_queue(channel))
1114 rx_queue = efx_channel_get_rx_queue(channel);
1117 netif_info(efx, hw, efx->net_dev,
1118 "RXQ %d flush retry\n", qid);
1119 rx_queue->flush_pending = true;
1120 atomic_inc(&efx->rxq_flush_pending);
1122 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1123 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1125 atomic_dec(&efx->rxq_flush_outstanding);
1126 if (efx_farch_flush_wake(efx))
1127 wake_up(&efx->flush_wq);
1131 efx_farch_handle_drain_event(struct efx_channel *channel)
1133 struct efx_nic *efx = channel->efx;
1135 WARN_ON(atomic_read(&efx->active_queues) == 0);
1136 atomic_dec(&efx->active_queues);
1137 if (efx_farch_flush_wake(efx))
1138 wake_up(&efx->flush_wq);
1141 static void efx_farch_handle_generated_event(struct efx_channel *channel,
1144 struct efx_nic *efx = channel->efx;
1145 struct efx_rx_queue *rx_queue =
1146 efx_channel_has_rx_queue(channel) ?
1147 efx_channel_get_rx_queue(channel) : NULL;
1148 unsigned magic, code;
1150 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1151 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1153 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1154 channel->event_test_cpu = raw_smp_processor_id();
1155 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1156 /* The queue must be empty, so we won't receive any rx
1157 * events, so efx_process_channel() won't refill the
1158 * queue. Refill it here */
1159 efx_fast_push_rx_descriptors(rx_queue, true);
1160 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1161 efx_farch_handle_drain_event(channel);
1162 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1163 efx_farch_handle_drain_event(channel);
1165 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1166 "generated event "EFX_QWORD_FMT"\n",
1167 channel->channel, EFX_QWORD_VAL(*event));
1172 efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1174 struct efx_nic *efx = channel->efx;
1175 unsigned int ev_sub_code;
1176 unsigned int ev_sub_data;
1178 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1179 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1181 switch (ev_sub_code) {
1182 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1183 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1184 channel->channel, ev_sub_data);
1185 efx_farch_handle_tx_flush_done(efx, event);
1186 #ifdef CONFIG_SFC_SRIOV
1187 efx_siena_sriov_tx_flush_done(efx, event);
1190 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1191 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1192 channel->channel, ev_sub_data);
1193 efx_farch_handle_rx_flush_done(efx, event);
1194 #ifdef CONFIG_SFC_SRIOV
1195 efx_siena_sriov_rx_flush_done(efx, event);
1198 case FSE_AZ_EVQ_INIT_DONE_EV:
1199 netif_dbg(efx, hw, efx->net_dev,
1200 "channel %d EVQ %d initialised\n",
1201 channel->channel, ev_sub_data);
1203 case FSE_AZ_SRM_UPD_DONE_EV:
1204 netif_vdbg(efx, hw, efx->net_dev,
1205 "channel %d SRAM update done\n", channel->channel);
1207 case FSE_AZ_WAKE_UP_EV:
1208 netif_vdbg(efx, hw, efx->net_dev,
1209 "channel %d RXQ %d wakeup event\n",
1210 channel->channel, ev_sub_data);
1212 case FSE_AZ_TIMER_EV:
1213 netif_vdbg(efx, hw, efx->net_dev,
1214 "channel %d RX queue %d timer expired\n",
1215 channel->channel, ev_sub_data);
1217 case FSE_AA_RX_RECOVER_EV:
1218 netif_err(efx, rx_err, efx->net_dev,
1219 "channel %d seen DRIVER RX_RESET event. "
1220 "Resetting.\n", channel->channel);
1221 atomic_inc(&efx->rx_reset);
1222 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1224 case FSE_BZ_RX_DSC_ERROR_EV:
1225 if (ev_sub_data < EFX_VI_BASE) {
1226 netif_err(efx, rx_err, efx->net_dev,
1227 "RX DMA Q %d reports descriptor fetch error."
1228 " RX Q %d is disabled.\n", ev_sub_data,
1230 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1232 #ifdef CONFIG_SFC_SRIOV
1234 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1237 case FSE_BZ_TX_DSC_ERROR_EV:
1238 if (ev_sub_data < EFX_VI_BASE) {
1239 netif_err(efx, tx_err, efx->net_dev,
1240 "TX DMA Q %d reports descriptor fetch error."
1241 " TX Q %d is disabled.\n", ev_sub_data,
1243 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1245 #ifdef CONFIG_SFC_SRIOV
1247 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1251 netif_vdbg(efx, hw, efx->net_dev,
1252 "channel %d unknown driver event code %d "
1253 "data %04x\n", channel->channel, ev_sub_code,
1259 int efx_farch_ev_process(struct efx_channel *channel, int budget)
1261 struct efx_nic *efx = channel->efx;
1262 unsigned int read_ptr;
1263 efx_qword_t event, *p_event;
1270 read_ptr = channel->eventq_read_ptr;
1273 p_event = efx_event(channel, read_ptr);
1276 if (!efx_event_present(&event))
1280 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1281 "channel %d event is "EFX_QWORD_FMT"\n",
1282 channel->channel, EFX_QWORD_VAL(event));
1284 /* Clear this event by marking it all ones */
1285 EFX_SET_QWORD(*p_event);
1289 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1292 case FSE_AZ_EV_CODE_RX_EV:
1293 efx_farch_handle_rx_event(channel, &event);
1294 if (++spent == budget)
1297 case FSE_AZ_EV_CODE_TX_EV:
1298 efx_farch_handle_tx_event(channel, &event);
1300 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1301 efx_farch_handle_generated_event(channel, &event);
1303 case FSE_AZ_EV_CODE_DRIVER_EV:
1304 efx_farch_handle_driver_event(channel, &event);
1306 #ifdef CONFIG_SFC_SRIOV
1307 case FSE_CZ_EV_CODE_USER_EV:
1308 efx_siena_sriov_event(channel, &event);
1311 case FSE_CZ_EV_CODE_MCDI_EV:
1312 efx_mcdi_process_event(channel, &event);
1314 case FSE_AZ_EV_CODE_GLOBAL_EV:
1315 if (efx->type->handle_global_event &&
1316 efx->type->handle_global_event(channel, &event))
1320 netif_err(channel->efx, hw, channel->efx->net_dev,
1321 "channel %d unknown event type %d (data "
1322 EFX_QWORD_FMT ")\n", channel->channel,
1323 ev_code, EFX_QWORD_VAL(event));
1328 channel->eventq_read_ptr = read_ptr;
1332 /* Allocate buffer table entries for event queue */
1333 int efx_farch_ev_probe(struct efx_channel *channel)
1335 struct efx_nic *efx = channel->efx;
1338 entries = channel->eventq_mask + 1;
1339 return efx_alloc_special_buffer(efx, &channel->eventq,
1340 entries * sizeof(efx_qword_t));
1343 int efx_farch_ev_init(struct efx_channel *channel)
1346 struct efx_nic *efx = channel->efx;
1348 netif_dbg(efx, hw, efx->net_dev,
1349 "channel %d event queue in special buffers %d-%d\n",
1350 channel->channel, channel->eventq.index,
1351 channel->eventq.index + channel->eventq.entries - 1);
1353 EFX_POPULATE_OWORD_3(reg,
1354 FRF_CZ_TIMER_Q_EN, 1,
1355 FRF_CZ_HOST_NOTIFY_MODE, 0,
1356 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1357 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1359 /* Pin event queue buffer */
1360 efx_init_special_buffer(efx, &channel->eventq);
1362 /* Fill event queue with all ones (i.e. empty events) */
1363 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1365 /* Push event queue to card */
1366 EFX_POPULATE_OWORD_3(reg,
1368 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1369 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1370 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1376 void efx_farch_ev_fini(struct efx_channel *channel)
1379 struct efx_nic *efx = channel->efx;
1381 /* Remove event queue from card */
1382 EFX_ZERO_OWORD(reg);
1383 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1385 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1387 /* Unpin event queue */
1388 efx_fini_special_buffer(efx, &channel->eventq);
1391 /* Free buffers backing event queue */
1392 void efx_farch_ev_remove(struct efx_channel *channel)
1394 efx_free_special_buffer(channel->efx, &channel->eventq);
1398 void efx_farch_ev_test_generate(struct efx_channel *channel)
1400 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1403 void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1405 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1406 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1409 /**************************************************************************
1411 * Hardware interrupts
1412 * The hardware interrupt handler does very little work; all the event
1413 * queue processing is carried out by per-channel tasklets.
1415 **************************************************************************/
1417 /* Enable/disable/generate interrupts */
1418 static inline void efx_farch_interrupts(struct efx_nic *efx,
1419 bool enabled, bool force)
1421 efx_oword_t int_en_reg_ker;
1423 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1424 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1425 FRF_AZ_KER_INT_KER, force,
1426 FRF_AZ_DRV_INT_EN_KER, enabled);
1427 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1430 void efx_farch_irq_enable_master(struct efx_nic *efx)
1432 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1433 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1435 efx_farch_interrupts(efx, true, false);
1438 void efx_farch_irq_disable_master(struct efx_nic *efx)
1440 /* Disable interrupts */
1441 efx_farch_interrupts(efx, false, false);
1444 /* Generate a test interrupt
1445 * Interrupt must already have been enabled, otherwise nasty things
1448 int efx_farch_irq_test_generate(struct efx_nic *efx)
1450 efx_farch_interrupts(efx, true, true);
1454 /* Process a fatal interrupt
1455 * Disable bus mastering ASAP and schedule a reset
1457 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1459 efx_oword_t *int_ker = efx->irq_status.addr;
1460 efx_oword_t fatal_intr;
1461 int error, mem_perr;
1463 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1464 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1466 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1467 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1468 EFX_OWORD_VAL(fatal_intr),
1469 error ? "disabling bus mastering" : "no recognised error");
1471 /* If this is a memory parity error dump which blocks are offending */
1472 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1473 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1476 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1477 netif_err(efx, hw, efx->net_dev,
1478 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1479 EFX_OWORD_VAL(reg));
1482 /* Disable both devices */
1483 pci_clear_master(efx->pci_dev);
1484 efx_farch_irq_disable_master(efx);
1486 /* Count errors and reset or disable the NIC accordingly */
1487 if (efx->int_error_count == 0 ||
1488 time_after(jiffies, efx->int_error_expire)) {
1489 efx->int_error_count = 0;
1490 efx->int_error_expire =
1491 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1493 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1494 netif_err(efx, hw, efx->net_dev,
1495 "SYSTEM ERROR - reset scheduled\n");
1496 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1498 netif_err(efx, hw, efx->net_dev,
1499 "SYSTEM ERROR - max number of errors seen."
1500 "NIC will be disabled\n");
1501 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1507 /* Handle a legacy interrupt
1508 * Acknowledges the interrupt and schedule event queue processing.
1510 irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1512 struct efx_nic *efx = dev_id;
1513 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1514 efx_oword_t *int_ker = efx->irq_status.addr;
1515 irqreturn_t result = IRQ_NONE;
1516 struct efx_channel *channel;
1521 /* Read the ISR which also ACKs the interrupts */
1522 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1523 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1525 /* Legacy interrupts are disabled too late by the EEH kernel
1526 * code. Disable them earlier.
1527 * If an EEH error occurred, the read will have returned all ones.
1529 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1530 !efx->eeh_disabled_legacy_irq) {
1531 disable_irq_nosync(efx->legacy_irq);
1532 efx->eeh_disabled_legacy_irq = true;
1535 /* Handle non-event-queue sources */
1536 if (queues & (1U << efx->irq_level) && soft_enabled) {
1537 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1538 if (unlikely(syserr))
1539 return efx_farch_fatal_interrupt(efx);
1540 efx->last_irq_cpu = raw_smp_processor_id();
1544 efx->irq_zero_count = 0;
1546 /* Schedule processing of any interrupting queues */
1547 if (likely(soft_enabled)) {
1548 efx_for_each_channel(channel, efx) {
1550 efx_schedule_channel_irq(channel);
1554 result = IRQ_HANDLED;
1559 /* Legacy ISR read can return zero once (SF bug 15783) */
1561 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1562 * because this might be a shared interrupt. */
1563 if (efx->irq_zero_count++ == 0)
1564 result = IRQ_HANDLED;
1566 /* Ensure we schedule or rearm all event queues */
1567 if (likely(soft_enabled)) {
1568 efx_for_each_channel(channel, efx) {
1569 event = efx_event(channel,
1570 channel->eventq_read_ptr);
1571 if (efx_event_present(event))
1572 efx_schedule_channel_irq(channel);
1574 efx_farch_ev_read_ack(channel);
1579 if (result == IRQ_HANDLED)
1580 netif_vdbg(efx, intr, efx->net_dev,
1581 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1582 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1587 /* Handle an MSI interrupt
1589 * Handle an MSI hardware interrupt. This routine schedules event
1590 * queue processing. No interrupt acknowledgement cycle is necessary.
1591 * Also, we never need to check that the interrupt is for us, since
1592 * MSI interrupts cannot be shared.
1594 irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1596 struct efx_msi_context *context = dev_id;
1597 struct efx_nic *efx = context->efx;
1598 efx_oword_t *int_ker = efx->irq_status.addr;
1601 netif_vdbg(efx, intr, efx->net_dev,
1602 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1603 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1605 if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1608 /* Handle non-event-queue sources */
1609 if (context->index == efx->irq_level) {
1610 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1611 if (unlikely(syserr))
1612 return efx_farch_fatal_interrupt(efx);
1613 efx->last_irq_cpu = raw_smp_processor_id();
1616 /* Schedule processing of the channel */
1617 efx_schedule_channel_irq(efx->channel[context->index]);
1622 /* Setup RSS indirection table.
1623 * This maps from the hash value of the packet to RXQ
1625 void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1630 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1631 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1633 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1634 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1635 efx->rss_context.rx_indir_table[i]);
1636 efx_writed(efx, &dword,
1637 FR_BZ_RX_INDIRECTION_TBL +
1638 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1642 void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
1647 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1648 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1650 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1651 efx_readd(efx, &dword,
1652 FR_BZ_RX_INDIRECTION_TBL +
1653 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1654 efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
1658 /* Looks at available SRAM resources and works out how many queues we
1659 * can support, and where things like descriptor caches should live.
1661 * SRAM is split up as follows:
1662 * 0 buftbl entries for channels
1663 * efx->vf_buftbl_base buftbl entries for SR-IOV
1664 * efx->rx_dc_base RX descriptor caches
1665 * efx->tx_dc_base TX descriptor caches
1667 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1669 unsigned vi_count, buftbl_min, total_tx_channels;
1671 #ifdef CONFIG_SFC_SRIOV
1672 struct siena_nic_data *nic_data = efx->nic_data;
1675 total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
1676 /* Account for the buffer table entries backing the datapath channels
1677 * and the descriptor caches for those channels.
1679 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1680 total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
1681 efx->n_channels * EFX_MAX_EVQ_SIZE)
1682 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1683 vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
1685 #ifdef CONFIG_SFC_SRIOV
1686 if (efx->type->sriov_wanted) {
1687 if (efx->type->sriov_wanted(efx)) {
1688 unsigned vi_dc_entries, buftbl_free;
1689 unsigned entries_per_vf, vf_limit;
1691 nic_data->vf_buftbl_base = buftbl_min;
1693 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1694 vi_count = max(vi_count, EFX_VI_BASE);
1695 buftbl_free = (sram_lim_qw - buftbl_min -
1696 vi_count * vi_dc_entries);
1698 entries_per_vf = ((vi_dc_entries +
1699 EFX_VF_BUFTBL_PER_VI) *
1701 vf_limit = min(buftbl_free / entries_per_vf,
1702 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1704 if (efx->vf_count > vf_limit) {
1705 netif_err(efx, probe, efx->net_dev,
1706 "Reducing VF count from from %d to %d\n",
1707 efx->vf_count, vf_limit);
1708 efx->vf_count = vf_limit;
1710 vi_count += efx->vf_count * efx_vf_size(efx);
1715 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1716 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1719 u32 efx_farch_fpga_ver(struct efx_nic *efx)
1721 efx_oword_t altera_build;
1722 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1723 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1726 void efx_farch_init_common(struct efx_nic *efx)
1730 /* Set positions of descriptor caches in SRAM. */
1731 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1732 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1733 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1734 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1736 /* Set TX descriptor cache size. */
1737 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1738 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1739 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1741 /* Set RX descriptor cache size. Set low watermark to size-8, as
1742 * this allows most efficient prefetching.
1744 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1745 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1746 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1747 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1748 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1750 /* Program INT_KER address */
1751 EFX_POPULATE_OWORD_2(temp,
1752 FRF_AZ_NORM_INT_VEC_DIS_KER,
1753 EFX_INT_MODE_USE_MSI(efx),
1754 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1755 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1757 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1758 /* Use an interrupt level unused by event queues */
1759 efx->irq_level = 0x1f;
1761 /* Use a valid MSI-X vector */
1764 /* Enable all the genuinely fatal interrupts. (They are still
1765 * masked by the overall interrupt mask, controlled by
1766 * falcon_interrupts()).
1768 * Note: All other fatal interrupts are enabled
1770 EFX_POPULATE_OWORD_3(temp,
1771 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1772 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1773 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1774 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1775 EFX_INVERT_OWORD(temp);
1776 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1778 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1779 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1781 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1782 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1783 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1784 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1785 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1786 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1787 /* Enable SW_EV to inherit in char driver - assume harmless here */
1788 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1789 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1790 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1791 /* Disable hardware watchdog which can misfire */
1792 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1793 /* Squash TX of packets of 16 bytes or less */
1794 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1795 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1797 EFX_POPULATE_OWORD_4(temp,
1798 /* Default values */
1799 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1800 FRF_BZ_TX_PACE_SB_AF, 0xb,
1801 FRF_BZ_TX_PACE_FB_BASE, 0,
1802 /* Allow large pace values in the fast bin. */
1803 FRF_BZ_TX_PACE_BIN_TH,
1804 FFE_BZ_TX_PACE_RESERVED);
1805 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1808 /**************************************************************************
1812 **************************************************************************
1815 /* "Fudge factors" - difference between programmed value and actual depth.
1816 * Due to pipelined implementation we need to program H/W with a value that
1817 * is larger than the hop limit we want.
1819 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1820 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1822 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1823 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1826 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1828 /* Don't try very hard to find space for performance hints, as this is
1829 * counter-productive. */
1830 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1832 enum efx_farch_filter_type {
1833 EFX_FARCH_FILTER_TCP_FULL = 0,
1834 EFX_FARCH_FILTER_TCP_WILD,
1835 EFX_FARCH_FILTER_UDP_FULL,
1836 EFX_FARCH_FILTER_UDP_WILD,
1837 EFX_FARCH_FILTER_MAC_FULL = 4,
1838 EFX_FARCH_FILTER_MAC_WILD,
1839 EFX_FARCH_FILTER_UC_DEF = 8,
1840 EFX_FARCH_FILTER_MC_DEF,
1841 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1844 enum efx_farch_filter_table_id {
1845 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1846 EFX_FARCH_FILTER_TABLE_RX_MAC,
1847 EFX_FARCH_FILTER_TABLE_RX_DEF,
1848 EFX_FARCH_FILTER_TABLE_TX_MAC,
1849 EFX_FARCH_FILTER_TABLE_COUNT,
1852 enum efx_farch_filter_index {
1853 EFX_FARCH_FILTER_INDEX_UC_DEF,
1854 EFX_FARCH_FILTER_INDEX_MC_DEF,
1855 EFX_FARCH_FILTER_SIZE_RX_DEF,
1858 struct efx_farch_filter_spec {
1866 struct efx_farch_filter_table {
1867 enum efx_farch_filter_table_id id;
1868 u32 offset; /* address of table relative to BAR */
1869 unsigned size; /* number of entries */
1870 unsigned step; /* step between entries */
1871 unsigned used; /* number currently used */
1872 unsigned long *used_bitmap;
1873 struct efx_farch_filter_spec *spec;
1874 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1877 struct efx_farch_filter_state {
1878 struct rw_semaphore lock; /* Protects table contents */
1879 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1883 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1884 struct efx_farch_filter_table *table,
1885 unsigned int filter_idx);
1887 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1888 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1889 static u16 efx_farch_filter_hash(u32 key)
1893 /* First 16 rounds */
1894 tmp = 0x1fff ^ key >> 16;
1895 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1896 tmp = tmp ^ tmp >> 9;
1897 /* Last 16 rounds */
1898 tmp = tmp ^ tmp << 13 ^ key;
1899 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1900 return tmp ^ tmp >> 9;
1903 /* To allow for hash collisions, filter search continues at these
1904 * increments from the first possible entry selected by the hash. */
1905 static u16 efx_farch_filter_increment(u32 key)
1910 static enum efx_farch_filter_table_id
1911 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1913 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1914 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1915 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1916 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1917 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1918 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1919 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1920 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1921 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1922 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1923 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1924 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1925 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1926 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1927 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1930 static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1932 struct efx_farch_filter_state *state = efx->filter_state;
1933 struct efx_farch_filter_table *table;
1934 efx_oword_t filter_ctl;
1936 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1938 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1939 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1940 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1941 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1942 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1943 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1944 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1945 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1946 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1947 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1948 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1949 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1950 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1952 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1954 EFX_SET_OWORD_FIELD(
1955 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1956 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1957 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1958 EFX_SET_OWORD_FIELD(
1959 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1960 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1961 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1964 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1966 EFX_SET_OWORD_FIELD(
1967 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1968 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1969 EFX_SET_OWORD_FIELD(
1970 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1971 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1972 EFX_FILTER_FLAG_RX_RSS));
1973 EFX_SET_OWORD_FIELD(
1974 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1975 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1976 EFX_SET_OWORD_FIELD(
1977 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1978 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1979 EFX_FILTER_FLAG_RX_RSS));
1981 /* There is a single bit to enable RX scatter for all
1982 * unmatched packets. Only set it if scatter is
1983 * enabled in both filter specs.
1985 EFX_SET_OWORD_FIELD(
1986 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1987 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1988 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1989 EFX_FILTER_FLAG_RX_SCATTER));
1991 /* We don't expose 'default' filters because unmatched
1992 * packets always go to the queue number found in the
1993 * RSS table. But we still need to set the RX scatter
1996 EFX_SET_OWORD_FIELD(
1997 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
2001 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2004 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
2006 struct efx_farch_filter_state *state = efx->filter_state;
2007 struct efx_farch_filter_table *table;
2010 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
2012 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2014 EFX_SET_OWORD_FIELD(
2015 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
2016 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
2017 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
2018 EFX_SET_OWORD_FIELD(
2019 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
2020 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
2021 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
2024 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2028 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2029 const struct efx_filter_spec *gen_spec)
2031 bool is_full = false;
2033 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context)
2036 spec->priority = gen_spec->priority;
2037 spec->flags = gen_spec->flags;
2038 spec->dmaq_id = gen_spec->dmaq_id;
2040 switch (gen_spec->match_flags) {
2041 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2042 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2043 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2046 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2047 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2048 __be32 rhost, host1, host2;
2049 __be16 rport, port1, port2;
2051 EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2053 if (gen_spec->ether_type != htons(ETH_P_IP))
2054 return -EPROTONOSUPPORT;
2055 if (gen_spec->loc_port == 0 ||
2056 (is_full && gen_spec->rem_port == 0))
2057 return -EADDRNOTAVAIL;
2058 switch (gen_spec->ip_proto) {
2060 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2061 EFX_FARCH_FILTER_TCP_WILD);
2064 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2065 EFX_FARCH_FILTER_UDP_WILD);
2068 return -EPROTONOSUPPORT;
2071 /* Filter is constructed in terms of source and destination,
2072 * with the odd wrinkle that the ports are swapped in a UDP
2073 * wildcard filter. We need to convert from local and remote
2074 * (= zero for wildcard) addresses.
2076 rhost = is_full ? gen_spec->rem_host[0] : 0;
2077 rport = is_full ? gen_spec->rem_port : 0;
2079 host2 = gen_spec->loc_host[0];
2080 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2081 port1 = gen_spec->loc_port;
2085 port2 = gen_spec->loc_port;
2087 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2088 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2089 spec->data[2] = ntohl(host2);
2094 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2097 case EFX_FILTER_MATCH_LOC_MAC:
2098 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2099 EFX_FARCH_FILTER_MAC_WILD);
2100 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2101 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2102 gen_spec->loc_mac[3] << 16 |
2103 gen_spec->loc_mac[4] << 8 |
2104 gen_spec->loc_mac[5]);
2105 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2106 gen_spec->loc_mac[1]);
2109 case EFX_FILTER_MATCH_LOC_MAC_IG:
2110 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2111 EFX_FARCH_FILTER_MC_DEF :
2112 EFX_FARCH_FILTER_UC_DEF);
2113 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2117 return -EPROTONOSUPPORT;
2124 efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2125 const struct efx_farch_filter_spec *spec)
2127 bool is_full = false;
2129 /* *gen_spec should be completely initialised, to be consistent
2130 * with efx_filter_init_{rx,tx}() and in case we want to copy
2131 * it back to userland.
2133 memset(gen_spec, 0, sizeof(*gen_spec));
2135 gen_spec->priority = spec->priority;
2136 gen_spec->flags = spec->flags;
2137 gen_spec->dmaq_id = spec->dmaq_id;
2139 switch (spec->type) {
2140 case EFX_FARCH_FILTER_TCP_FULL:
2141 case EFX_FARCH_FILTER_UDP_FULL:
2144 case EFX_FARCH_FILTER_TCP_WILD:
2145 case EFX_FARCH_FILTER_UDP_WILD: {
2146 __be32 host1, host2;
2147 __be16 port1, port2;
2149 gen_spec->match_flags =
2150 EFX_FILTER_MATCH_ETHER_TYPE |
2151 EFX_FILTER_MATCH_IP_PROTO |
2152 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2154 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2155 EFX_FILTER_MATCH_REM_PORT);
2156 gen_spec->ether_type = htons(ETH_P_IP);
2157 gen_spec->ip_proto =
2158 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2159 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2160 IPPROTO_TCP : IPPROTO_UDP;
2162 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2163 port1 = htons(spec->data[0]);
2164 host2 = htonl(spec->data[2]);
2165 port2 = htons(spec->data[1] >> 16);
2166 if (spec->flags & EFX_FILTER_FLAG_TX) {
2167 gen_spec->loc_host[0] = host1;
2168 gen_spec->rem_host[0] = host2;
2170 gen_spec->loc_host[0] = host2;
2171 gen_spec->rem_host[0] = host1;
2173 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2174 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2175 gen_spec->loc_port = port1;
2176 gen_spec->rem_port = port2;
2178 gen_spec->loc_port = port2;
2179 gen_spec->rem_port = port1;
2185 case EFX_FARCH_FILTER_MAC_FULL:
2188 case EFX_FARCH_FILTER_MAC_WILD:
2189 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2191 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2192 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2193 gen_spec->loc_mac[1] = spec->data[2];
2194 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2195 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2196 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2197 gen_spec->loc_mac[5] = spec->data[1];
2198 gen_spec->outer_vid = htons(spec->data[0]);
2201 case EFX_FARCH_FILTER_UC_DEF:
2202 case EFX_FARCH_FILTER_MC_DEF:
2203 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2204 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2214 efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2215 struct efx_farch_filter_spec *spec)
2217 /* If there's only one channel then disable RSS for non VF
2218 * traffic, thereby allowing VFs to use RSS when the PF can't.
2220 spec->priority = EFX_FILTER_PRI_AUTO;
2221 spec->flags = (EFX_FILTER_FLAG_RX |
2222 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2223 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2227 /* Build a filter entry and return its n-tuple key. */
2228 static u32 efx_farch_filter_build(efx_oword_t *filter,
2229 struct efx_farch_filter_spec *spec)
2233 switch (efx_farch_filter_spec_table_id(spec)) {
2234 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2235 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2236 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2237 EFX_POPULATE_OWORD_7(
2240 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2242 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2243 FRF_BZ_TCP_UDP, is_udp,
2244 FRF_BZ_RXQ_ID, spec->dmaq_id,
2245 EFX_DWORD_2, spec->data[2],
2246 EFX_DWORD_1, spec->data[1],
2247 EFX_DWORD_0, spec->data[0]);
2252 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2253 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2254 EFX_POPULATE_OWORD_7(
2257 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2258 FRF_CZ_RMFT_SCATTER_EN,
2259 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2260 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2261 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2262 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2263 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2264 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2269 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2270 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2271 EFX_POPULATE_OWORD_5(*filter,
2272 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2273 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2274 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2275 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2276 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2277 data3 = is_wild | spec->dmaq_id << 1;
2285 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2288 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2289 const struct efx_farch_filter_spec *right)
2291 if (left->type != right->type ||
2292 memcmp(left->data, right->data, sizeof(left->data)))
2295 if (left->flags & EFX_FILTER_FLAG_TX &&
2296 left->dmaq_id != right->dmaq_id)
2303 * Construct/deconstruct external filter IDs. At least the RX filter
2304 * IDs must be ordered by matching priority, for RX NFC semantics.
2306 * Deconstruction needs to be robust against invalid IDs so that
2307 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2308 * accept user-provided IDs.
2311 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2313 static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2314 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2315 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2316 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2317 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2318 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2319 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2320 [EFX_FARCH_FILTER_UC_DEF] = 4,
2321 [EFX_FARCH_FILTER_MC_DEF] = 4,
2324 static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2325 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2326 EFX_FARCH_FILTER_TABLE_RX_IP,
2327 EFX_FARCH_FILTER_TABLE_RX_MAC,
2328 EFX_FARCH_FILTER_TABLE_RX_MAC,
2329 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2330 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2331 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2334 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2335 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2338 efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2343 range = efx_farch_filter_type_match_pri[spec->type];
2344 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2345 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2347 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2350 static inline enum efx_farch_filter_table_id
2351 efx_farch_filter_id_table_id(u32 id)
2353 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2355 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2356 return efx_farch_filter_range_table[range];
2358 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2361 static inline unsigned int efx_farch_filter_id_index(u32 id)
2363 return id & EFX_FARCH_FILTER_INDEX_MASK;
2366 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2368 struct efx_farch_filter_state *state = efx->filter_state;
2369 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2370 enum efx_farch_filter_table_id table_id;
2373 table_id = efx_farch_filter_range_table[range];
2374 if (state->table[table_id].size != 0)
2375 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2376 state->table[table_id].size;
2382 s32 efx_farch_filter_insert(struct efx_nic *efx,
2383 struct efx_filter_spec *gen_spec,
2386 struct efx_farch_filter_state *state = efx->filter_state;
2387 struct efx_farch_filter_table *table;
2388 struct efx_farch_filter_spec spec;
2390 int rep_index, ins_index;
2391 unsigned int depth = 0;
2394 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2398 down_write(&state->lock);
2400 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2401 if (table->size == 0) {
2406 netif_vdbg(efx, hw, efx->net_dev,
2407 "%s: type %d search_limit=%d", __func__, spec.type,
2408 table->search_limit[spec.type]);
2410 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2411 /* One filter spec per type */
2412 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2413 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2414 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2415 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2416 ins_index = rep_index;
2418 /* Search concurrently for
2419 * (1) a filter to be replaced (rep_index): any filter
2420 * with the same match values, up to the current
2421 * search depth for this type, and
2422 * (2) the insertion point (ins_index): (1) or any
2423 * free slot before it or up to the maximum search
2424 * depth for this priority
2425 * We fail if we cannot find (2).
2427 * We can stop once either
2428 * (a) we find (1), in which case we have definitely
2429 * found (2) as well; or
2430 * (b) we have searched exhaustively for (1), and have
2431 * either found (2) or searched exhaustively for it
2433 u32 key = efx_farch_filter_build(&filter, &spec);
2434 unsigned int hash = efx_farch_filter_hash(key);
2435 unsigned int incr = efx_farch_filter_increment(key);
2436 unsigned int max_rep_depth = table->search_limit[spec.type];
2437 unsigned int max_ins_depth =
2438 spec.priority <= EFX_FILTER_PRI_HINT ?
2439 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2440 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2441 unsigned int i = hash & (table->size - 1);
2447 if (!test_bit(i, table->used_bitmap)) {
2450 } else if (efx_farch_filter_equal(&spec,
2459 if (depth >= max_rep_depth &&
2460 (ins_index >= 0 || depth >= max_ins_depth)) {
2462 if (ins_index < 0) {
2470 i = (i + incr) & (table->size - 1);
2475 /* If we found a filter to be replaced, check whether we
2478 if (rep_index >= 0) {
2479 struct efx_farch_filter_spec *saved_spec =
2480 &table->spec[rep_index];
2482 if (spec.priority == saved_spec->priority && !replace_equal) {
2486 if (spec.priority < saved_spec->priority) {
2490 if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
2491 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
2492 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2495 /* Insert the filter */
2496 if (ins_index != rep_index) {
2497 __set_bit(ins_index, table->used_bitmap);
2500 table->spec[ins_index] = spec;
2502 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2503 efx_farch_filter_push_rx_config(efx);
2505 if (table->search_limit[spec.type] < depth) {
2506 table->search_limit[spec.type] = depth;
2507 if (spec.flags & EFX_FILTER_FLAG_TX)
2508 efx_farch_filter_push_tx_limits(efx);
2510 efx_farch_filter_push_rx_config(efx);
2513 efx_writeo(efx, &filter,
2514 table->offset + table->step * ins_index);
2516 /* If we were able to replace a filter by inserting
2517 * at a lower depth, clear the replaced filter
2519 if (ins_index != rep_index && rep_index >= 0)
2520 efx_farch_filter_table_clear_entry(efx, table,
2524 netif_vdbg(efx, hw, efx->net_dev,
2525 "%s: filter type %d index %d rxq %u set",
2526 __func__, spec.type, ins_index, spec.dmaq_id);
2527 rc = efx_farch_filter_make_id(&spec, ins_index);
2530 up_write(&state->lock);
2535 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2536 struct efx_farch_filter_table *table,
2537 unsigned int filter_idx)
2539 static efx_oword_t filter;
2541 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2542 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2544 __clear_bit(filter_idx, table->used_bitmap);
2546 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2548 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2550 /* If this filter required a greater search depth than
2551 * any other, the search limit for its type can now be
2552 * decreased. However, it is hard to determine that
2553 * unless the table has become completely empty - in
2554 * which case, all its search limits can be set to 0.
2556 if (unlikely(table->used == 0)) {
2557 memset(table->search_limit, 0, sizeof(table->search_limit));
2558 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2559 efx_farch_filter_push_tx_limits(efx);
2561 efx_farch_filter_push_rx_config(efx);
2565 static int efx_farch_filter_remove(struct efx_nic *efx,
2566 struct efx_farch_filter_table *table,
2567 unsigned int filter_idx,
2568 enum efx_filter_priority priority)
2570 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2572 if (!test_bit(filter_idx, table->used_bitmap) ||
2573 spec->priority != priority)
2576 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2577 efx_farch_filter_init_rx_auto(efx, spec);
2578 efx_farch_filter_push_rx_config(efx);
2580 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2586 int efx_farch_filter_remove_safe(struct efx_nic *efx,
2587 enum efx_filter_priority priority,
2590 struct efx_farch_filter_state *state = efx->filter_state;
2591 enum efx_farch_filter_table_id table_id;
2592 struct efx_farch_filter_table *table;
2593 unsigned int filter_idx;
2596 table_id = efx_farch_filter_id_table_id(filter_id);
2597 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2599 table = &state->table[table_id];
2601 filter_idx = efx_farch_filter_id_index(filter_id);
2602 if (filter_idx >= table->size)
2604 down_write(&state->lock);
2606 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2607 up_write(&state->lock);
2612 int efx_farch_filter_get_safe(struct efx_nic *efx,
2613 enum efx_filter_priority priority,
2614 u32 filter_id, struct efx_filter_spec *spec_buf)
2616 struct efx_farch_filter_state *state = efx->filter_state;
2617 enum efx_farch_filter_table_id table_id;
2618 struct efx_farch_filter_table *table;
2619 struct efx_farch_filter_spec *spec;
2620 unsigned int filter_idx;
2623 down_read(&state->lock);
2625 table_id = efx_farch_filter_id_table_id(filter_id);
2626 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2628 table = &state->table[table_id];
2630 filter_idx = efx_farch_filter_id_index(filter_id);
2631 if (filter_idx >= table->size)
2633 spec = &table->spec[filter_idx];
2635 if (test_bit(filter_idx, table->used_bitmap) &&
2636 spec->priority == priority) {
2637 efx_farch_filter_to_gen_spec(spec_buf, spec);
2642 up_read(&state->lock);
2647 efx_farch_filter_table_clear(struct efx_nic *efx,
2648 enum efx_farch_filter_table_id table_id,
2649 enum efx_filter_priority priority)
2651 struct efx_farch_filter_state *state = efx->filter_state;
2652 struct efx_farch_filter_table *table = &state->table[table_id];
2653 unsigned int filter_idx;
2655 down_write(&state->lock);
2656 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2657 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
2658 efx_farch_filter_remove(efx, table,
2659 filter_idx, priority);
2661 up_write(&state->lock);
2664 int efx_farch_filter_clear_rx(struct efx_nic *efx,
2665 enum efx_filter_priority priority)
2667 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2669 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2671 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2676 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2677 enum efx_filter_priority priority)
2679 struct efx_farch_filter_state *state = efx->filter_state;
2680 enum efx_farch_filter_table_id table_id;
2681 struct efx_farch_filter_table *table;
2682 unsigned int filter_idx;
2685 down_read(&state->lock);
2687 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2688 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2690 table = &state->table[table_id];
2691 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2692 if (test_bit(filter_idx, table->used_bitmap) &&
2693 table->spec[filter_idx].priority == priority)
2698 up_read(&state->lock);
2703 s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2704 enum efx_filter_priority priority,
2707 struct efx_farch_filter_state *state = efx->filter_state;
2708 enum efx_farch_filter_table_id table_id;
2709 struct efx_farch_filter_table *table;
2710 unsigned int filter_idx;
2713 down_read(&state->lock);
2715 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2716 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2718 table = &state->table[table_id];
2719 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2720 if (test_bit(filter_idx, table->used_bitmap) &&
2721 table->spec[filter_idx].priority == priority) {
2722 if (count == size) {
2726 buf[count++] = efx_farch_filter_make_id(
2727 &table->spec[filter_idx], filter_idx);
2732 up_read(&state->lock);
2737 /* Restore filter stater after reset */
2738 void efx_farch_filter_table_restore(struct efx_nic *efx)
2740 struct efx_farch_filter_state *state = efx->filter_state;
2741 enum efx_farch_filter_table_id table_id;
2742 struct efx_farch_filter_table *table;
2744 unsigned int filter_idx;
2746 down_write(&state->lock);
2748 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2749 table = &state->table[table_id];
2751 /* Check whether this is a regular register table */
2752 if (table->step == 0)
2755 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2756 if (!test_bit(filter_idx, table->used_bitmap))
2758 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2759 efx_writeo(efx, &filter,
2760 table->offset + table->step * filter_idx);
2764 efx_farch_filter_push_rx_config(efx);
2765 efx_farch_filter_push_tx_limits(efx);
2767 up_write(&state->lock);
2770 void efx_farch_filter_table_remove(struct efx_nic *efx)
2772 struct efx_farch_filter_state *state = efx->filter_state;
2773 enum efx_farch_filter_table_id table_id;
2775 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2776 kfree(state->table[table_id].used_bitmap);
2777 vfree(state->table[table_id].spec);
2782 int efx_farch_filter_table_probe(struct efx_nic *efx)
2784 struct efx_farch_filter_state *state;
2785 struct efx_farch_filter_table *table;
2788 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2791 efx->filter_state = state;
2792 init_rwsem(&state->lock);
2794 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2795 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2796 table->offset = FR_BZ_RX_FILTER_TBL0;
2797 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2798 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2800 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2801 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2802 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2803 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2804 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2806 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2807 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2808 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2810 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2811 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2812 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2813 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2814 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2816 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2817 table = &state->table[table_id];
2818 if (table->size == 0)
2820 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2821 sizeof(unsigned long),
2823 if (!table->used_bitmap)
2825 table->spec = vzalloc(array_size(sizeof(*table->spec),
2831 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2833 /* RX default filters must always exist */
2834 struct efx_farch_filter_spec *spec;
2837 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2838 spec = &table->spec[i];
2839 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2840 efx_farch_filter_init_rx_auto(efx, spec);
2841 __set_bit(i, table->used_bitmap);
2845 efx_farch_filter_push_rx_config(efx);
2850 efx_farch_filter_table_remove(efx);
2854 /* Update scatter enable flags for filters pointing to our own RX queues */
2855 void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2857 struct efx_farch_filter_state *state = efx->filter_state;
2858 enum efx_farch_filter_table_id table_id;
2859 struct efx_farch_filter_table *table;
2861 unsigned int filter_idx;
2863 down_write(&state->lock);
2865 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2866 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2868 table = &state->table[table_id];
2870 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2871 if (!test_bit(filter_idx, table->used_bitmap) ||
2872 table->spec[filter_idx].dmaq_id >=
2876 if (efx->rx_scatter)
2877 table->spec[filter_idx].flags |=
2878 EFX_FILTER_FLAG_RX_SCATTER;
2880 table->spec[filter_idx].flags &=
2881 ~EFX_FILTER_FLAG_RX_SCATTER;
2883 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2884 /* Pushed by efx_farch_filter_push_rx_config() */
2887 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2888 efx_writeo(efx, &filter,
2889 table->offset + table->step * filter_idx);
2893 efx_farch_filter_push_rx_config(efx);
2895 up_write(&state->lock);
2898 #ifdef CONFIG_RFS_ACCEL
2900 bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2903 struct efx_farch_filter_state *state = efx->filter_state;
2904 struct efx_farch_filter_table *table;
2905 bool ret = false, force = false;
2908 down_write(&state->lock);
2909 spin_lock_bh(&efx->rps_hash_lock);
2910 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2911 if (test_bit(index, table->used_bitmap) &&
2912 table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2913 struct efx_arfs_rule *rule = NULL;
2914 struct efx_filter_spec spec;
2916 efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2917 if (!efx->rps_hash_table) {
2918 /* In the absence of the table, we always returned 0 to
2919 * ARFS, so use the same to query it.
2923 rule = efx_rps_hash_find(efx, &spec);
2925 /* ARFS table doesn't know of this filter, remove it */
2928 arfs_id = rule->arfs_id;
2929 if (!efx_rps_check_rule(rule, index, &force))
2933 if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2934 flow_id, arfs_id)) {
2936 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2937 efx_rps_hash_del(efx, &spec);
2938 efx_farch_filter_table_clear_entry(efx, table, index);
2943 spin_unlock_bh(&efx->rps_hash_lock);
2944 up_write(&state->lock);
2948 #endif /* CONFIG_RFS_ACCEL */
2950 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2952 struct net_device *net_dev = efx->net_dev;
2953 struct netdev_hw_addr *ha;
2954 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2958 if (!efx_dev_registered(efx))
2961 netif_addr_lock_bh(net_dev);
2963 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2965 /* Build multicast hash table */
2966 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2967 memset(mc_hash, 0xff, sizeof(*mc_hash));
2969 memset(mc_hash, 0x00, sizeof(*mc_hash));
2970 netdev_for_each_mc_addr(ha, net_dev) {
2971 crc = ether_crc_le(ETH_ALEN, ha->addr);
2972 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2973 __set_bit_le(bit, mc_hash);
2976 /* Broadcast packets go through the multicast hash filter.
2977 * ether_crc_le() of the broadcast address is 0xbe2612ff
2978 * so we always add bit 0xff to the mask.
2980 __set_bit_le(0xff, mc_hash);
2983 netif_addr_unlock_bh(net_dev);