1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/filter.h>
14 #include "efx_channels.h"
16 #include "efx_common.h"
17 #include "tx_common.h"
18 #include "rx_common.h"
21 #include "workarounds.h"
23 /* This is the first interrupt mode to try out of:
28 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
31 * i.e. the number of CPUs among which we may distribute simultaneous
34 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
35 * The default (0) means to assign an interrupt to each core.
37 unsigned int rss_cpus;
39 static unsigned int irq_adapt_low_thresh = 8000;
40 module_param(irq_adapt_low_thresh, uint, 0644);
41 MODULE_PARM_DESC(irq_adapt_low_thresh,
42 "Threshold score for reducing IRQ moderation");
44 static unsigned int irq_adapt_high_thresh = 16000;
45 module_param(irq_adapt_high_thresh, uint, 0644);
46 MODULE_PARM_DESC(irq_adapt_high_thresh,
47 "Threshold score for increasing IRQ moderation");
49 /* This is the weight assigned to each of the (per-channel) virtual
52 static int napi_weight = 64;
58 int efx_channel_dummy_op_int(struct efx_channel *channel)
63 void efx_channel_dummy_op_void(struct efx_channel *channel)
67 static const struct efx_channel_type efx_default_channel_type = {
68 .pre_probe = efx_channel_dummy_op_int,
69 .post_remove = efx_channel_dummy_op_void,
70 .get_name = efx_get_channel_name,
71 .copy = efx_copy_channel,
72 .want_txqs = efx_default_channel_want_txqs,
81 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
83 cpumask_var_t filter_mask;
87 if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
88 netif_warn(efx, probe, efx->net_dev,
89 "RSS disabled due to allocation failure\n");
93 cpumask_copy(filter_mask, cpu_online_mask);
95 int numa_node = pcibus_to_node(efx->pci_dev->bus);
97 cpumask_and(filter_mask, filter_mask, cpumask_of_node(numa_node));
101 for_each_cpu(cpu, filter_mask) {
103 cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
106 free_cpumask_var(filter_mask);
111 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
118 count = count_online_cores(efx, true);
120 /* If no online CPUs in local node, fallback to any online CPUs */
122 count = count_online_cores(efx, false);
125 if (count > EFX_MAX_RX_QUEUES) {
126 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
127 "Reducing number of rx queues from %u to %u.\n",
128 count, EFX_MAX_RX_QUEUES);
129 count = EFX_MAX_RX_QUEUES;
132 /* If RSS is requested for the PF *and* VFs then we can't write RSS
133 * table entries that are inaccessible to VFs
135 #ifdef CONFIG_SFC_SRIOV
136 if (efx->type->sriov_wanted) {
137 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
138 count > efx_vf_size(efx)) {
139 netif_warn(efx, probe, efx->net_dev,
140 "Reducing number of RSS channels from %u to %u for "
141 "VF support. Increase vf-msix-limit to use more "
142 "channels on the PF.\n",
143 count, efx_vf_size(efx));
144 count = efx_vf_size(efx);
152 static int efx_allocate_msix_channels(struct efx_nic *efx,
153 unsigned int max_channels,
154 unsigned int extra_channels,
155 unsigned int parallelism)
157 unsigned int n_channels = parallelism;
163 if (efx_separate_tx_channels)
165 n_channels += extra_channels;
167 /* To allow XDP transmit to happen from arbitrary NAPI contexts
168 * we allocate a TX queue per CPU. We share event queues across
169 * multiple tx queues, assuming tx and ev queues are both
172 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
173 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
174 n_xdp_tx = num_possible_cpus();
175 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
177 vec_count = pci_msix_vec_count(efx->pci_dev);
181 max_channels = min_t(unsigned int, vec_count, max_channels);
184 * We need a channel per event queue, plus a VI per tx queue.
185 * This may be more pessimistic than it needs to be.
187 if (n_channels >= max_channels) {
188 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
189 netif_warn(efx, drv, efx->net_dev,
190 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
191 n_xdp_ev, n_channels, max_channels);
192 netif_warn(efx, drv, efx->net_dev,
193 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
194 } else if (n_channels + n_xdp_tx > efx->max_vis) {
195 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
196 netif_warn(efx, drv, efx->net_dev,
197 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
198 n_xdp_tx, n_channels, efx->max_vis);
199 netif_warn(efx, drv, efx->net_dev,
200 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
201 } else if (n_channels + n_xdp_ev > max_channels) {
202 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
203 netif_warn(efx, drv, efx->net_dev,
204 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
205 n_xdp_ev, n_channels, max_channels);
207 n_xdp_ev = max_channels - n_channels;
208 netif_warn(efx, drv, efx->net_dev,
209 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
210 DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
212 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
215 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
216 efx->n_xdp_channels = n_xdp_ev;
217 efx->xdp_tx_per_channel = tx_per_ev;
218 efx->xdp_tx_queue_count = n_xdp_tx;
219 n_channels += n_xdp_ev;
220 netif_dbg(efx, drv, efx->net_dev,
221 "Allocating %d TX and %d event queues for XDP\n",
222 n_xdp_ev * tx_per_ev, n_xdp_ev);
224 efx->n_xdp_channels = 0;
225 efx->xdp_tx_per_channel = 0;
226 efx->xdp_tx_queue_count = n_xdp_tx;
229 if (vec_count < n_channels) {
230 netif_err(efx, drv, efx->net_dev,
231 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
232 vec_count, n_channels);
233 netif_err(efx, drv, efx->net_dev,
234 "WARNING: Performance may be reduced.\n");
235 n_channels = vec_count;
238 n_channels = min(n_channels, max_channels);
240 efx->n_channels = n_channels;
242 /* Ignore XDP tx channels when creating rx channels. */
243 n_channels -= efx->n_xdp_channels;
245 if (efx_separate_tx_channels) {
247 min(max(n_channels / 2, 1U),
248 efx->max_tx_channels);
249 efx->tx_channel_offset =
250 n_channels - efx->n_tx_channels;
253 efx->n_tx_channels, 1U);
255 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
256 efx->tx_channel_offset = 0;
257 efx->n_rx_channels = n_channels;
260 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
261 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
263 efx->xdp_channel_offset = n_channels;
265 netif_dbg(efx, drv, efx->net_dev,
266 "Allocating %u RX channels\n",
269 return efx->n_channels;
272 /* Probe the number and type of interrupts we are able to obtain, and
273 * the resulting numbers of channels and RX queues.
275 int efx_probe_interrupts(struct efx_nic *efx)
277 unsigned int extra_channels = 0;
278 unsigned int rss_spread;
282 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
283 if (efx->extra_channel_type[i])
286 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
287 unsigned int parallelism = efx_wanted_parallelism(efx);
288 struct msix_entry xentries[EFX_MAX_CHANNELS];
289 unsigned int n_channels;
291 rc = efx_allocate_msix_channels(efx, efx->max_channels,
292 extra_channels, parallelism);
295 for (i = 0; i < n_channels; i++)
296 xentries[i].entry = i;
297 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
301 /* Fall back to single channel MSI */
302 netif_err(efx, drv, efx->net_dev,
303 "could not enable MSI-X\n");
304 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
305 efx->interrupt_mode = EFX_INT_MODE_MSI;
308 } else if (rc < n_channels) {
309 netif_err(efx, drv, efx->net_dev,
310 "WARNING: Insufficient MSI-X vectors"
311 " available (%d < %u).\n", rc, n_channels);
312 netif_err(efx, drv, efx->net_dev,
313 "WARNING: Performance may be reduced.\n");
318 for (i = 0; i < efx->n_channels; i++)
319 efx_get_channel(efx, i)->irq =
324 /* Try single interrupt MSI */
325 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
327 efx->n_rx_channels = 1;
328 efx->n_tx_channels = 1;
329 efx->n_xdp_channels = 0;
330 efx->xdp_channel_offset = efx->n_channels;
331 rc = pci_enable_msi(efx->pci_dev);
333 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
335 netif_err(efx, drv, efx->net_dev,
336 "could not enable MSI\n");
337 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
338 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
344 /* Assume legacy interrupts */
345 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
346 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
347 efx->n_rx_channels = 1;
348 efx->n_tx_channels = 1;
349 efx->n_xdp_channels = 0;
350 efx->xdp_channel_offset = efx->n_channels;
351 efx->legacy_irq = efx->pci_dev->irq;
354 /* Assign extra channels if possible, before XDP channels */
355 efx->n_extra_tx_channels = 0;
356 j = efx->xdp_channel_offset;
357 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
358 if (!efx->extra_channel_type[i])
360 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
361 efx->extra_channel_type[i]->handle_no_channel(efx);
364 efx_get_channel(efx, j)->type =
365 efx->extra_channel_type[i];
366 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
367 efx->n_extra_tx_channels++;
371 rss_spread = efx->n_rx_channels;
372 /* RSS might be usable on VFs even if it is disabled on the PF */
373 #ifdef CONFIG_SFC_SRIOV
374 if (efx->type->sriov_wanted) {
375 efx->rss_spread = ((rss_spread > 1 ||
376 !efx->type->sriov_wanted(efx)) ?
377 rss_spread : efx_vf_size(efx));
381 efx->rss_spread = rss_spread;
386 #if defined(CONFIG_SMP)
387 void efx_set_interrupt_affinity(struct efx_nic *efx)
389 int numa_node = pcibus_to_node(efx->pci_dev->bus);
390 const struct cpumask *numa_mask = cpumask_of_node(numa_node);
391 struct efx_channel *channel;
394 /* If no online CPUs in local node, fallback to any online CPU */
395 if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
396 numa_mask = cpu_online_mask;
399 efx_for_each_channel(channel, efx) {
400 cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
401 if (cpu >= nr_cpu_ids)
402 cpu = cpumask_first_and(cpu_online_mask, numa_mask);
403 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
407 void efx_clear_interrupt_affinity(struct efx_nic *efx)
409 struct efx_channel *channel;
411 efx_for_each_channel(channel, efx)
412 irq_set_affinity_hint(channel->irq, NULL);
416 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
421 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
424 #endif /* CONFIG_SMP */
426 void efx_remove_interrupts(struct efx_nic *efx)
428 struct efx_channel *channel;
430 /* Remove MSI/MSI-X interrupts */
431 efx_for_each_channel(channel, efx)
433 pci_disable_msi(efx->pci_dev);
434 pci_disable_msix(efx->pci_dev);
436 /* Remove legacy interrupt */
444 /* Create event queue
445 * Event queue memory allocations are done only once. If the channel
446 * is reset, the memory buffer will be reused; this guards against
447 * errors during channel reset and also simplifies interrupt handling.
449 int efx_probe_eventq(struct efx_channel *channel)
451 struct efx_nic *efx = channel->efx;
452 unsigned long entries;
454 netif_dbg(efx, probe, efx->net_dev,
455 "chan %d create event queue\n", channel->channel);
457 /* Build an event queue with room for one event per tx and rx buffer,
458 * plus some extra for link state events and MCDI completions.
460 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
461 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
462 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
464 return efx_nic_probe_eventq(channel);
467 /* Prepare channel's event queue */
468 int efx_init_eventq(struct efx_channel *channel)
470 struct efx_nic *efx = channel->efx;
473 EFX_WARN_ON_PARANOID(channel->eventq_init);
475 netif_dbg(efx, drv, efx->net_dev,
476 "chan %d init event queue\n", channel->channel);
478 rc = efx_nic_init_eventq(channel);
480 efx->type->push_irq_moderation(channel);
481 channel->eventq_read_ptr = 0;
482 channel->eventq_init = true;
487 /* Enable event queue processing and NAPI */
488 void efx_start_eventq(struct efx_channel *channel)
490 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
491 "chan %d start event queue\n", channel->channel);
493 /* Make sure the NAPI handler sees the enabled flag set */
494 channel->enabled = true;
497 napi_enable(&channel->napi_str);
498 efx_nic_eventq_read_ack(channel);
501 /* Disable event queue processing and NAPI */
502 void efx_stop_eventq(struct efx_channel *channel)
504 if (!channel->enabled)
507 napi_disable(&channel->napi_str);
508 channel->enabled = false;
511 void efx_fini_eventq(struct efx_channel *channel)
513 if (!channel->eventq_init)
516 netif_dbg(channel->efx, drv, channel->efx->net_dev,
517 "chan %d fini event queue\n", channel->channel);
519 efx_nic_fini_eventq(channel);
520 channel->eventq_init = false;
523 void efx_remove_eventq(struct efx_channel *channel)
525 netif_dbg(channel->efx, drv, channel->efx->net_dev,
526 "chan %d remove event queue\n", channel->channel);
528 efx_nic_remove_eventq(channel);
531 /**************************************************************************
535 *************************************************************************/
537 #ifdef CONFIG_RFS_ACCEL
538 static void efx_filter_rfs_expire(struct work_struct *data)
540 struct delayed_work *dwork = to_delayed_work(data);
541 struct efx_channel *channel;
542 unsigned int time, quota;
544 channel = container_of(dwork, struct efx_channel, filter_work);
545 time = jiffies - channel->rfs_last_expiry;
546 quota = channel->rfs_filter_count * time / (30 * HZ);
547 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
548 channel->rfs_last_expiry += time;
549 /* Ensure we do more work eventually even if NAPI poll is not happening */
550 schedule_delayed_work(dwork, 30 * HZ);
554 /* Allocate and initialise a channel structure. */
555 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
557 struct efx_rx_queue *rx_queue;
558 struct efx_tx_queue *tx_queue;
559 struct efx_channel *channel;
562 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
567 channel->channel = i;
568 channel->type = &efx_default_channel_type;
570 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
571 tx_queue = &channel->tx_queue[j];
573 tx_queue->queue = -1;
575 tx_queue->channel = channel;
578 #ifdef CONFIG_RFS_ACCEL
579 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
582 rx_queue = &channel->rx_queue;
584 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
589 int efx_init_channels(struct efx_nic *efx)
593 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
594 efx->channel[i] = efx_alloc_channel(efx, i);
595 if (!efx->channel[i])
597 efx->msi_context[i].efx = efx;
598 efx->msi_context[i].index = i;
601 /* Higher numbered interrupt modes are less capable! */
602 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
605 efx->max_channels = EFX_MAX_CHANNELS;
606 efx->max_tx_channels = EFX_MAX_CHANNELS;
611 void efx_fini_channels(struct efx_nic *efx)
615 for (i = 0; i < EFX_MAX_CHANNELS; i++)
616 if (efx->channel[i]) {
617 kfree(efx->channel[i]);
618 efx->channel[i] = NULL;
622 /* Allocate and initialise a channel structure, copying parameters
623 * (but not resources) from an old channel structure.
625 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
627 struct efx_rx_queue *rx_queue;
628 struct efx_tx_queue *tx_queue;
629 struct efx_channel *channel;
632 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
636 *channel = *old_channel;
638 channel->napi_dev = NULL;
639 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
640 channel->napi_str.napi_id = 0;
641 channel->napi_str.state = 0;
642 memset(&channel->eventq, 0, sizeof(channel->eventq));
644 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
645 tx_queue = &channel->tx_queue[j];
646 if (tx_queue->channel)
647 tx_queue->channel = channel;
648 tx_queue->buffer = NULL;
649 tx_queue->cb_page = NULL;
650 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
653 rx_queue = &channel->rx_queue;
654 rx_queue->buffer = NULL;
655 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
656 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
657 #ifdef CONFIG_RFS_ACCEL
658 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
664 static int efx_probe_channel(struct efx_channel *channel)
666 struct efx_tx_queue *tx_queue;
667 struct efx_rx_queue *rx_queue;
670 netif_dbg(channel->efx, probe, channel->efx->net_dev,
671 "creating channel %d\n", channel->channel);
673 rc = channel->type->pre_probe(channel);
677 rc = efx_probe_eventq(channel);
681 efx_for_each_channel_tx_queue(tx_queue, channel) {
682 rc = efx_probe_tx_queue(tx_queue);
687 efx_for_each_channel_rx_queue(rx_queue, channel) {
688 rc = efx_probe_rx_queue(rx_queue);
693 channel->rx_list = NULL;
698 efx_remove_channel(channel);
702 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
704 struct efx_nic *efx = channel->efx;
708 number = channel->channel;
710 if (number >= efx->xdp_channel_offset &&
711 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
713 number -= efx->xdp_channel_offset;
714 } else if (efx->tx_channel_offset == 0) {
716 } else if (number < efx->tx_channel_offset) {
720 number -= efx->tx_channel_offset;
722 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
725 void efx_set_channel_names(struct efx_nic *efx)
727 struct efx_channel *channel;
729 efx_for_each_channel(channel, efx)
730 channel->type->get_name(channel,
731 efx->msi_context[channel->channel].name,
732 sizeof(efx->msi_context[0].name));
735 int efx_probe_channels(struct efx_nic *efx)
737 struct efx_channel *channel;
740 /* Restart special buffer allocation */
741 efx->next_buffer_table = 0;
743 /* Probe channels in reverse, so that any 'extra' channels
744 * use the start of the buffer table. This allows the traffic
745 * channels to be resized without moving them or wasting the
746 * entries before them.
748 efx_for_each_channel_rev(channel, efx) {
749 rc = efx_probe_channel(channel);
751 netif_err(efx, probe, efx->net_dev,
752 "failed to create channel %d\n",
757 efx_set_channel_names(efx);
762 efx_remove_channels(efx);
766 void efx_remove_channel(struct efx_channel *channel)
768 struct efx_tx_queue *tx_queue;
769 struct efx_rx_queue *rx_queue;
771 netif_dbg(channel->efx, drv, channel->efx->net_dev,
772 "destroy chan %d\n", channel->channel);
774 efx_for_each_channel_rx_queue(rx_queue, channel)
775 efx_remove_rx_queue(rx_queue);
776 efx_for_each_channel_tx_queue(tx_queue, channel)
777 efx_remove_tx_queue(tx_queue);
778 efx_remove_eventq(channel);
779 channel->type->post_remove(channel);
782 void efx_remove_channels(struct efx_nic *efx)
784 struct efx_channel *channel;
786 efx_for_each_channel(channel, efx)
787 efx_remove_channel(channel);
789 kfree(efx->xdp_tx_queues);
792 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
794 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
795 unsigned int i, next_buffer_table = 0;
796 u32 old_rxq_entries, old_txq_entries;
799 rc = efx_check_disabled(efx);
803 /* Not all channels should be reallocated. We must avoid
804 * reallocating their buffer table entries.
806 efx_for_each_channel(channel, efx) {
807 struct efx_rx_queue *rx_queue;
808 struct efx_tx_queue *tx_queue;
810 if (channel->type->copy)
812 next_buffer_table = max(next_buffer_table,
813 channel->eventq.index +
814 channel->eventq.entries);
815 efx_for_each_channel_rx_queue(rx_queue, channel)
816 next_buffer_table = max(next_buffer_table,
817 rx_queue->rxd.index +
818 rx_queue->rxd.entries);
819 efx_for_each_channel_tx_queue(tx_queue, channel)
820 next_buffer_table = max(next_buffer_table,
821 tx_queue->txd.index +
822 tx_queue->txd.entries);
825 efx_device_detach_sync(efx);
827 efx_soft_disable_interrupts(efx);
829 /* Clone channels (where possible) */
830 memset(other_channel, 0, sizeof(other_channel));
831 for (i = 0; i < efx->n_channels; i++) {
832 channel = efx->channel[i];
833 if (channel->type->copy)
834 channel = channel->type->copy(channel);
839 other_channel[i] = channel;
842 /* Swap entry counts and channel pointers */
843 old_rxq_entries = efx->rxq_entries;
844 old_txq_entries = efx->txq_entries;
845 efx->rxq_entries = rxq_entries;
846 efx->txq_entries = txq_entries;
847 for (i = 0; i < efx->n_channels; i++)
848 swap(efx->channel[i], other_channel[i]);
850 /* Restart buffer table allocation */
851 efx->next_buffer_table = next_buffer_table;
853 for (i = 0; i < efx->n_channels; i++) {
854 channel = efx->channel[i];
855 if (!channel->type->copy)
857 rc = efx_probe_channel(channel);
860 efx_init_napi_channel(efx->channel[i]);
864 /* Destroy unused channel structures */
865 for (i = 0; i < efx->n_channels; i++) {
866 channel = other_channel[i];
867 if (channel && channel->type->copy) {
868 efx_fini_napi_channel(channel);
869 efx_remove_channel(channel);
874 rc2 = efx_soft_enable_interrupts(efx);
877 netif_err(efx, drv, efx->net_dev,
878 "unable to restart interrupts on channel reallocation\n");
879 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
882 efx_device_attach_if_not_resetting(efx);
888 efx->rxq_entries = old_rxq_entries;
889 efx->txq_entries = old_txq_entries;
890 for (i = 0; i < efx->n_channels; i++)
891 swap(efx->channel[i], other_channel[i]);
896 efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
897 struct efx_tx_queue *tx_queue)
899 if (xdp_queue_number >= efx->xdp_tx_queue_count)
902 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
903 tx_queue->channel->channel, tx_queue->label,
904 xdp_queue_number, tx_queue->queue);
905 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
909 int efx_set_channels(struct efx_nic *efx)
911 struct efx_tx_queue *tx_queue;
912 struct efx_channel *channel;
913 unsigned int next_queue = 0;
914 int xdp_queue_number;
917 efx->tx_channel_offset =
918 efx_separate_tx_channels ?
919 efx->n_channels - efx->n_tx_channels : 0;
921 if (efx->xdp_tx_queue_count) {
922 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
924 /* Allocate array for XDP TX queue lookup. */
925 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
926 sizeof(*efx->xdp_tx_queues),
928 if (!efx->xdp_tx_queues)
932 /* We need to mark which channels really have RX and TX
933 * queues, and adjust the TX queue numbers if we have separate
934 * RX-only and TX-only channels.
936 xdp_queue_number = 0;
937 efx_for_each_channel(channel, efx) {
938 if (channel->channel < efx->n_rx_channels)
939 channel->rx_queue.core_index = channel->channel;
941 channel->rx_queue.core_index = -1;
943 if (channel->channel >= efx->tx_channel_offset) {
944 if (efx_channel_is_xdp_tx(channel)) {
945 efx_for_each_channel_tx_queue(tx_queue, channel) {
946 tx_queue->queue = next_queue++;
947 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
952 efx_for_each_channel_tx_queue(tx_queue, channel) {
953 tx_queue->queue = next_queue++;
954 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
955 channel->channel, tx_queue->label,
959 /* If XDP is borrowing queues from net stack, it must use the queue
960 * with no csum offload, which is the first one of the channel
961 * (note: channel->tx_queue_by_type is not initialized yet)
963 if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
964 tx_queue = &channel->tx_queue[0];
965 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
972 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
973 xdp_queue_number != efx->xdp_tx_queue_count);
974 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
975 xdp_queue_number > efx->xdp_tx_queue_count);
977 /* If we have more CPUs than assigned XDP TX queues, assign the already
978 * existing queues to the exceeding CPUs
981 while (xdp_queue_number < efx->xdp_tx_queue_count) {
982 tx_queue = efx->xdp_tx_queues[next_queue++];
983 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
988 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
991 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
994 bool efx_default_channel_want_txqs(struct efx_channel *channel)
996 return channel->channel - channel->efx->tx_channel_offset <
997 channel->efx->n_tx_channels;
1004 int efx_soft_enable_interrupts(struct efx_nic *efx)
1006 struct efx_channel *channel, *end_channel;
1009 BUG_ON(efx->state == STATE_DISABLED);
1011 efx->irq_soft_enabled = true;
1014 efx_for_each_channel(channel, efx) {
1015 if (!channel->type->keep_eventq) {
1016 rc = efx_init_eventq(channel);
1020 efx_start_eventq(channel);
1023 efx_mcdi_mode_event(efx);
1027 end_channel = channel;
1028 efx_for_each_channel(channel, efx) {
1029 if (channel == end_channel)
1031 efx_stop_eventq(channel);
1032 if (!channel->type->keep_eventq)
1033 efx_fini_eventq(channel);
1039 void efx_soft_disable_interrupts(struct efx_nic *efx)
1041 struct efx_channel *channel;
1043 if (efx->state == STATE_DISABLED)
1046 efx_mcdi_mode_poll(efx);
1048 efx->irq_soft_enabled = false;
1051 if (efx->legacy_irq)
1052 synchronize_irq(efx->legacy_irq);
1054 efx_for_each_channel(channel, efx) {
1056 synchronize_irq(channel->irq);
1058 efx_stop_eventq(channel);
1059 if (!channel->type->keep_eventq)
1060 efx_fini_eventq(channel);
1063 /* Flush the asynchronous MCDI request queue */
1064 efx_mcdi_flush_async(efx);
1067 int efx_enable_interrupts(struct efx_nic *efx)
1069 struct efx_channel *channel, *end_channel;
1072 /* TODO: Is this really a bug? */
1073 BUG_ON(efx->state == STATE_DISABLED);
1075 if (efx->eeh_disabled_legacy_irq) {
1076 enable_irq(efx->legacy_irq);
1077 efx->eeh_disabled_legacy_irq = false;
1080 efx->type->irq_enable_master(efx);
1082 efx_for_each_channel(channel, efx) {
1083 if (channel->type->keep_eventq) {
1084 rc = efx_init_eventq(channel);
1090 rc = efx_soft_enable_interrupts(efx);
1097 end_channel = channel;
1098 efx_for_each_channel(channel, efx) {
1099 if (channel == end_channel)
1101 if (channel->type->keep_eventq)
1102 efx_fini_eventq(channel);
1105 efx->type->irq_disable_non_ev(efx);
1110 void efx_disable_interrupts(struct efx_nic *efx)
1112 struct efx_channel *channel;
1114 efx_soft_disable_interrupts(efx);
1116 efx_for_each_channel(channel, efx) {
1117 if (channel->type->keep_eventq)
1118 efx_fini_eventq(channel);
1121 efx->type->irq_disable_non_ev(efx);
1124 void efx_start_channels(struct efx_nic *efx)
1126 struct efx_tx_queue *tx_queue;
1127 struct efx_rx_queue *rx_queue;
1128 struct efx_channel *channel;
1130 efx_for_each_channel(channel, efx) {
1131 efx_for_each_channel_tx_queue(tx_queue, channel) {
1132 efx_init_tx_queue(tx_queue);
1133 atomic_inc(&efx->active_queues);
1136 efx_for_each_channel_rx_queue(rx_queue, channel) {
1137 efx_init_rx_queue(rx_queue);
1138 atomic_inc(&efx->active_queues);
1139 efx_stop_eventq(channel);
1140 efx_fast_push_rx_descriptors(rx_queue, false);
1141 efx_start_eventq(channel);
1144 WARN_ON(channel->rx_pkt_n_frags);
1148 void efx_stop_channels(struct efx_nic *efx)
1150 struct efx_tx_queue *tx_queue;
1151 struct efx_rx_queue *rx_queue;
1152 struct efx_channel *channel;
1155 /* Stop RX refill */
1156 efx_for_each_channel(channel, efx) {
1157 efx_for_each_channel_rx_queue(rx_queue, channel)
1158 rx_queue->refill_enabled = false;
1161 efx_for_each_channel(channel, efx) {
1162 /* RX packet processing is pipelined, so wait for the
1163 * NAPI handler to complete. At least event queue 0
1164 * might be kept active by non-data events, so don't
1165 * use napi_synchronize() but actually disable NAPI
1168 if (efx_channel_has_rx_queue(channel)) {
1169 efx_stop_eventq(channel);
1170 efx_start_eventq(channel);
1174 if (efx->type->fini_dmaq)
1175 rc = efx->type->fini_dmaq(efx);
1178 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1180 netif_dbg(efx, drv, efx->net_dev,
1181 "successfully flushed all queues\n");
1184 efx_for_each_channel(channel, efx) {
1185 efx_for_each_channel_rx_queue(rx_queue, channel)
1186 efx_fini_rx_queue(rx_queue);
1187 efx_for_each_channel_tx_queue(tx_queue, channel)
1188 efx_fini_tx_queue(tx_queue);
1192 /**************************************************************************
1196 *************************************************************************/
1198 /* Process channel's event queue
1200 * This function is responsible for processing the event queue of a
1201 * single channel. The caller must guarantee that this function will
1202 * never be concurrently called more than once on the same channel,
1203 * though different channels may be being processed concurrently.
1205 static int efx_process_channel(struct efx_channel *channel, int budget)
1207 struct efx_tx_queue *tx_queue;
1208 struct list_head rx_list;
1211 if (unlikely(!channel->enabled))
1214 /* Prepare the batch receive list */
1215 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1216 INIT_LIST_HEAD(&rx_list);
1217 channel->rx_list = &rx_list;
1219 efx_for_each_channel_tx_queue(tx_queue, channel) {
1220 tx_queue->pkts_compl = 0;
1221 tx_queue->bytes_compl = 0;
1224 spent = efx_nic_process_eventq(channel, budget);
1225 if (spent && efx_channel_has_rx_queue(channel)) {
1226 struct efx_rx_queue *rx_queue =
1227 efx_channel_get_rx_queue(channel);
1229 efx_rx_flush_packet(channel);
1230 efx_fast_push_rx_descriptors(rx_queue, true);
1234 efx_for_each_channel_tx_queue(tx_queue, channel) {
1235 if (tx_queue->bytes_compl) {
1236 netdev_tx_completed_queue(tx_queue->core_txq,
1237 tx_queue->pkts_compl,
1238 tx_queue->bytes_compl);
1242 /* Receive any packets we queued up */
1243 netif_receive_skb_list(channel->rx_list);
1244 channel->rx_list = NULL;
1249 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1251 int step = efx->irq_mod_step_us;
1253 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1254 if (channel->irq_moderation_us > step) {
1255 channel->irq_moderation_us -= step;
1256 efx->type->push_irq_moderation(channel);
1258 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1259 if (channel->irq_moderation_us <
1260 efx->irq_rx_moderation_us) {
1261 channel->irq_moderation_us += step;
1262 efx->type->push_irq_moderation(channel);
1266 channel->irq_count = 0;
1267 channel->irq_mod_score = 0;
1270 /* NAPI poll handler
1272 * NAPI guarantees serialisation of polls of the same device, which
1273 * provides the guarantee required by efx_process_channel().
1275 static int efx_poll(struct napi_struct *napi, int budget)
1277 struct efx_channel *channel =
1278 container_of(napi, struct efx_channel, napi_str);
1279 struct efx_nic *efx = channel->efx;
1280 #ifdef CONFIG_RFS_ACCEL
1285 netif_vdbg(efx, intr, efx->net_dev,
1286 "channel %d NAPI poll executing on CPU %d\n",
1287 channel->channel, raw_smp_processor_id());
1289 spent = efx_process_channel(channel, budget);
1293 if (spent < budget) {
1294 if (efx_channel_has_rx_queue(channel) &&
1295 efx->irq_rx_adaptive &&
1296 unlikely(++channel->irq_count == 1000)) {
1297 efx_update_irq_mod(efx, channel);
1300 #ifdef CONFIG_RFS_ACCEL
1301 /* Perhaps expire some ARFS filters */
1302 time = jiffies - channel->rfs_last_expiry;
1303 /* Would our quota be >= 20? */
1304 if (channel->rfs_filter_count * time >= 600 * HZ)
1305 mod_delayed_work(system_wq, &channel->filter_work, 0);
1308 /* There is no race here; although napi_disable() will
1309 * only wait for napi_complete(), this isn't a problem
1310 * since efx_nic_eventq_read_ack() will have no effect if
1311 * interrupts have already been disabled.
1313 if (napi_complete_done(napi, spent))
1314 efx_nic_eventq_read_ack(channel);
1320 void efx_init_napi_channel(struct efx_channel *channel)
1322 struct efx_nic *efx = channel->efx;
1324 channel->napi_dev = efx->net_dev;
1325 netif_napi_add(channel->napi_dev, &channel->napi_str,
1326 efx_poll, napi_weight);
1329 void efx_init_napi(struct efx_nic *efx)
1331 struct efx_channel *channel;
1333 efx_for_each_channel(channel, efx)
1334 efx_init_napi_channel(channel);
1337 void efx_fini_napi_channel(struct efx_channel *channel)
1339 if (channel->napi_dev)
1340 netif_napi_del(&channel->napi_str);
1342 channel->napi_dev = NULL;
1345 void efx_fini_napi(struct efx_nic *efx)
1347 struct efx_channel *channel;
1349 efx_for_each_channel(channel, efx)
1350 efx_fini_napi_channel(channel);