1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/filter.h>
14 #include "efx_channels.h"
16 #include "efx_common.h"
17 #include "tx_common.h"
18 #include "rx_common.h"
21 #include "workarounds.h"
23 /* This is the first interrupt mode to try out of:
28 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
30 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
31 * i.e. the number of CPUs among which we may distribute simultaneous
34 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
35 * The default (0) means to assign an interrupt to each core.
37 unsigned int rss_cpus;
39 static unsigned int irq_adapt_low_thresh = 8000;
40 module_param(irq_adapt_low_thresh, uint, 0644);
41 MODULE_PARM_DESC(irq_adapt_low_thresh,
42 "Threshold score for reducing IRQ moderation");
44 static unsigned int irq_adapt_high_thresh = 16000;
45 module_param(irq_adapt_high_thresh, uint, 0644);
46 MODULE_PARM_DESC(irq_adapt_high_thresh,
47 "Threshold score for increasing IRQ moderation");
49 /* This is the weight assigned to each of the (per-channel) virtual
52 static int napi_weight = 64;
58 int efx_channel_dummy_op_int(struct efx_channel *channel)
63 void efx_channel_dummy_op_void(struct efx_channel *channel)
67 static const struct efx_channel_type efx_default_channel_type = {
68 .pre_probe = efx_channel_dummy_op_int,
69 .post_remove = efx_channel_dummy_op_void,
70 .get_name = efx_get_channel_name,
71 .copy = efx_copy_channel,
72 .want_txqs = efx_default_channel_want_txqs,
81 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
83 cpumask_var_t filter_mask;
87 if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
88 netif_warn(efx, probe, efx->net_dev,
89 "RSS disabled due to allocation failure\n");
93 cpumask_copy(filter_mask, cpu_online_mask);
95 cpumask_and(filter_mask, filter_mask,
96 cpumask_of_pcibus(efx->pci_dev->bus));
99 for_each_cpu(cpu, filter_mask) {
101 cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
104 free_cpumask_var(filter_mask);
109 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
116 count = count_online_cores(efx, true);
118 /* If no online CPUs in local node, fallback to any online CPUs */
120 count = count_online_cores(efx, false);
123 if (count > EFX_MAX_RX_QUEUES) {
124 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
125 "Reducing number of rx queues from %u to %u.\n",
126 count, EFX_MAX_RX_QUEUES);
127 count = EFX_MAX_RX_QUEUES;
130 /* If RSS is requested for the PF *and* VFs then we can't write RSS
131 * table entries that are inaccessible to VFs
133 #ifdef CONFIG_SFC_SRIOV
134 if (efx->type->sriov_wanted) {
135 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
136 count > efx_vf_size(efx)) {
137 netif_warn(efx, probe, efx->net_dev,
138 "Reducing number of RSS channels from %u to %u for "
139 "VF support. Increase vf-msix-limit to use more "
140 "channels on the PF.\n",
141 count, efx_vf_size(efx));
142 count = efx_vf_size(efx);
150 static int efx_allocate_msix_channels(struct efx_nic *efx,
151 unsigned int max_channels,
152 unsigned int extra_channels,
153 unsigned int parallelism)
155 unsigned int n_channels = parallelism;
161 if (efx_separate_tx_channels)
163 n_channels += extra_channels;
165 /* To allow XDP transmit to happen from arbitrary NAPI contexts
166 * we allocate a TX queue per CPU. We share event queues across
167 * multiple tx queues, assuming tx and ev queues are both
170 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
171 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
172 n_xdp_tx = num_possible_cpus();
173 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
175 vec_count = pci_msix_vec_count(efx->pci_dev);
179 max_channels = min_t(unsigned int, vec_count, max_channels);
182 * We need a channel per event queue, plus a VI per tx queue.
183 * This may be more pessimistic than it needs to be.
185 if (n_channels >= max_channels) {
186 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
187 netif_warn(efx, drv, efx->net_dev,
188 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
189 n_xdp_ev, n_channels, max_channels);
190 netif_warn(efx, drv, efx->net_dev,
191 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
192 } else if (n_channels + n_xdp_tx > efx->max_vis) {
193 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
194 netif_warn(efx, drv, efx->net_dev,
195 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
196 n_xdp_tx, n_channels, efx->max_vis);
197 netif_warn(efx, drv, efx->net_dev,
198 "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
199 } else if (n_channels + n_xdp_ev > max_channels) {
200 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
201 netif_warn(efx, drv, efx->net_dev,
202 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
203 n_xdp_ev, n_channels, max_channels);
205 n_xdp_ev = max_channels - n_channels;
206 netif_warn(efx, drv, efx->net_dev,
207 "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
208 DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
210 efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
213 if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
214 efx->n_xdp_channels = n_xdp_ev;
215 efx->xdp_tx_per_channel = tx_per_ev;
216 efx->xdp_tx_queue_count = n_xdp_tx;
217 n_channels += n_xdp_ev;
218 netif_dbg(efx, drv, efx->net_dev,
219 "Allocating %d TX and %d event queues for XDP\n",
220 n_xdp_ev * tx_per_ev, n_xdp_ev);
222 efx->n_xdp_channels = 0;
223 efx->xdp_tx_per_channel = 0;
224 efx->xdp_tx_queue_count = n_xdp_tx;
227 if (vec_count < n_channels) {
228 netif_err(efx, drv, efx->net_dev,
229 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
230 vec_count, n_channels);
231 netif_err(efx, drv, efx->net_dev,
232 "WARNING: Performance may be reduced.\n");
233 n_channels = vec_count;
236 n_channels = min(n_channels, max_channels);
238 efx->n_channels = n_channels;
240 /* Ignore XDP tx channels when creating rx channels. */
241 n_channels -= efx->n_xdp_channels;
243 if (efx_separate_tx_channels) {
245 min(max(n_channels / 2, 1U),
246 efx->max_tx_channels);
247 efx->tx_channel_offset =
248 n_channels - efx->n_tx_channels;
251 efx->n_tx_channels, 1U);
253 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
254 efx->tx_channel_offset = 0;
255 efx->n_rx_channels = n_channels;
258 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
259 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
261 efx->xdp_channel_offset = n_channels;
263 netif_dbg(efx, drv, efx->net_dev,
264 "Allocating %u RX channels\n",
267 return efx->n_channels;
270 /* Probe the number and type of interrupts we are able to obtain, and
271 * the resulting numbers of channels and RX queues.
273 int efx_probe_interrupts(struct efx_nic *efx)
275 unsigned int extra_channels = 0;
276 unsigned int rss_spread;
280 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
281 if (efx->extra_channel_type[i])
284 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
285 unsigned int parallelism = efx_wanted_parallelism(efx);
286 struct msix_entry xentries[EFX_MAX_CHANNELS];
287 unsigned int n_channels;
289 rc = efx_allocate_msix_channels(efx, efx->max_channels,
290 extra_channels, parallelism);
293 for (i = 0; i < n_channels; i++)
294 xentries[i].entry = i;
295 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
299 /* Fall back to single channel MSI */
300 netif_err(efx, drv, efx->net_dev,
301 "could not enable MSI-X\n");
302 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
303 efx->interrupt_mode = EFX_INT_MODE_MSI;
306 } else if (rc < n_channels) {
307 netif_err(efx, drv, efx->net_dev,
308 "WARNING: Insufficient MSI-X vectors"
309 " available (%d < %u).\n", rc, n_channels);
310 netif_err(efx, drv, efx->net_dev,
311 "WARNING: Performance may be reduced.\n");
316 for (i = 0; i < efx->n_channels; i++)
317 efx_get_channel(efx, i)->irq =
322 /* Try single interrupt MSI */
323 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
325 efx->n_rx_channels = 1;
326 efx->n_tx_channels = 1;
327 efx->n_xdp_channels = 0;
328 efx->xdp_channel_offset = efx->n_channels;
329 rc = pci_enable_msi(efx->pci_dev);
331 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
333 netif_err(efx, drv, efx->net_dev,
334 "could not enable MSI\n");
335 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
336 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
342 /* Assume legacy interrupts */
343 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
344 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
345 efx->n_rx_channels = 1;
346 efx->n_tx_channels = 1;
347 efx->n_xdp_channels = 0;
348 efx->xdp_channel_offset = efx->n_channels;
349 efx->legacy_irq = efx->pci_dev->irq;
352 /* Assign extra channels if possible, before XDP channels */
353 efx->n_extra_tx_channels = 0;
354 j = efx->xdp_channel_offset;
355 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
356 if (!efx->extra_channel_type[i])
358 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
359 efx->extra_channel_type[i]->handle_no_channel(efx);
362 efx_get_channel(efx, j)->type =
363 efx->extra_channel_type[i];
364 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
365 efx->n_extra_tx_channels++;
369 rss_spread = efx->n_rx_channels;
370 /* RSS might be usable on VFs even if it is disabled on the PF */
371 #ifdef CONFIG_SFC_SRIOV
372 if (efx->type->sriov_wanted) {
373 efx->rss_spread = ((rss_spread > 1 ||
374 !efx->type->sriov_wanted(efx)) ?
375 rss_spread : efx_vf_size(efx));
379 efx->rss_spread = rss_spread;
384 #if defined(CONFIG_SMP)
385 void efx_set_interrupt_affinity(struct efx_nic *efx)
387 const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
388 struct efx_channel *channel;
391 /* If no online CPUs in local node, fallback to any online CPU */
392 if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
393 numa_mask = cpu_online_mask;
396 efx_for_each_channel(channel, efx) {
397 cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
398 if (cpu >= nr_cpu_ids)
399 cpu = cpumask_first_and(cpu_online_mask, numa_mask);
400 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
404 void efx_clear_interrupt_affinity(struct efx_nic *efx)
406 struct efx_channel *channel;
408 efx_for_each_channel(channel, efx)
409 irq_set_affinity_hint(channel->irq, NULL);
413 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
418 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
421 #endif /* CONFIG_SMP */
423 void efx_remove_interrupts(struct efx_nic *efx)
425 struct efx_channel *channel;
427 /* Remove MSI/MSI-X interrupts */
428 efx_for_each_channel(channel, efx)
430 pci_disable_msi(efx->pci_dev);
431 pci_disable_msix(efx->pci_dev);
433 /* Remove legacy interrupt */
441 /* Create event queue
442 * Event queue memory allocations are done only once. If the channel
443 * is reset, the memory buffer will be reused; this guards against
444 * errors during channel reset and also simplifies interrupt handling.
446 int efx_probe_eventq(struct efx_channel *channel)
448 struct efx_nic *efx = channel->efx;
449 unsigned long entries;
451 netif_dbg(efx, probe, efx->net_dev,
452 "chan %d create event queue\n", channel->channel);
454 /* Build an event queue with room for one event per tx and rx buffer,
455 * plus some extra for link state events and MCDI completions.
457 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
458 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
459 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
461 return efx_nic_probe_eventq(channel);
464 /* Prepare channel's event queue */
465 int efx_init_eventq(struct efx_channel *channel)
467 struct efx_nic *efx = channel->efx;
470 EFX_WARN_ON_PARANOID(channel->eventq_init);
472 netif_dbg(efx, drv, efx->net_dev,
473 "chan %d init event queue\n", channel->channel);
475 rc = efx_nic_init_eventq(channel);
477 efx->type->push_irq_moderation(channel);
478 channel->eventq_read_ptr = 0;
479 channel->eventq_init = true;
484 /* Enable event queue processing and NAPI */
485 void efx_start_eventq(struct efx_channel *channel)
487 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
488 "chan %d start event queue\n", channel->channel);
490 /* Make sure the NAPI handler sees the enabled flag set */
491 channel->enabled = true;
494 napi_enable(&channel->napi_str);
495 efx_nic_eventq_read_ack(channel);
498 /* Disable event queue processing and NAPI */
499 void efx_stop_eventq(struct efx_channel *channel)
501 if (!channel->enabled)
504 napi_disable(&channel->napi_str);
505 channel->enabled = false;
508 void efx_fini_eventq(struct efx_channel *channel)
510 if (!channel->eventq_init)
513 netif_dbg(channel->efx, drv, channel->efx->net_dev,
514 "chan %d fini event queue\n", channel->channel);
516 efx_nic_fini_eventq(channel);
517 channel->eventq_init = false;
520 void efx_remove_eventq(struct efx_channel *channel)
522 netif_dbg(channel->efx, drv, channel->efx->net_dev,
523 "chan %d remove event queue\n", channel->channel);
525 efx_nic_remove_eventq(channel);
528 /**************************************************************************
532 *************************************************************************/
534 #ifdef CONFIG_RFS_ACCEL
535 static void efx_filter_rfs_expire(struct work_struct *data)
537 struct delayed_work *dwork = to_delayed_work(data);
538 struct efx_channel *channel;
539 unsigned int time, quota;
541 channel = container_of(dwork, struct efx_channel, filter_work);
542 time = jiffies - channel->rfs_last_expiry;
543 quota = channel->rfs_filter_count * time / (30 * HZ);
544 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
545 channel->rfs_last_expiry += time;
546 /* Ensure we do more work eventually even if NAPI poll is not happening */
547 schedule_delayed_work(dwork, 30 * HZ);
551 /* Allocate and initialise a channel structure. */
552 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
554 struct efx_rx_queue *rx_queue;
555 struct efx_tx_queue *tx_queue;
556 struct efx_channel *channel;
559 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
564 channel->channel = i;
565 channel->type = &efx_default_channel_type;
567 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
568 tx_queue = &channel->tx_queue[j];
570 tx_queue->queue = -1;
572 tx_queue->channel = channel;
575 #ifdef CONFIG_RFS_ACCEL
576 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
579 rx_queue = &channel->rx_queue;
581 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
586 int efx_init_channels(struct efx_nic *efx)
590 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
591 efx->channel[i] = efx_alloc_channel(efx, i);
592 if (!efx->channel[i])
594 efx->msi_context[i].efx = efx;
595 efx->msi_context[i].index = i;
598 /* Higher numbered interrupt modes are less capable! */
599 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
602 efx->max_channels = EFX_MAX_CHANNELS;
603 efx->max_tx_channels = EFX_MAX_CHANNELS;
608 void efx_fini_channels(struct efx_nic *efx)
612 for (i = 0; i < EFX_MAX_CHANNELS; i++)
613 if (efx->channel[i]) {
614 kfree(efx->channel[i]);
615 efx->channel[i] = NULL;
619 /* Allocate and initialise a channel structure, copying parameters
620 * (but not resources) from an old channel structure.
622 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
624 struct efx_rx_queue *rx_queue;
625 struct efx_tx_queue *tx_queue;
626 struct efx_channel *channel;
629 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
633 *channel = *old_channel;
635 channel->napi_dev = NULL;
636 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
637 channel->napi_str.napi_id = 0;
638 channel->napi_str.state = 0;
639 memset(&channel->eventq, 0, sizeof(channel->eventq));
641 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
642 tx_queue = &channel->tx_queue[j];
643 if (tx_queue->channel)
644 tx_queue->channel = channel;
645 tx_queue->buffer = NULL;
646 tx_queue->cb_page = NULL;
647 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
650 rx_queue = &channel->rx_queue;
651 rx_queue->buffer = NULL;
652 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
653 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
654 #ifdef CONFIG_RFS_ACCEL
655 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
661 static int efx_probe_channel(struct efx_channel *channel)
663 struct efx_tx_queue *tx_queue;
664 struct efx_rx_queue *rx_queue;
667 netif_dbg(channel->efx, probe, channel->efx->net_dev,
668 "creating channel %d\n", channel->channel);
670 rc = channel->type->pre_probe(channel);
674 rc = efx_probe_eventq(channel);
678 efx_for_each_channel_tx_queue(tx_queue, channel) {
679 rc = efx_probe_tx_queue(tx_queue);
684 efx_for_each_channel_rx_queue(rx_queue, channel) {
685 rc = efx_probe_rx_queue(rx_queue);
690 channel->rx_list = NULL;
695 efx_remove_channel(channel);
699 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
701 struct efx_nic *efx = channel->efx;
705 number = channel->channel;
707 if (number >= efx->xdp_channel_offset &&
708 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
710 number -= efx->xdp_channel_offset;
711 } else if (efx->tx_channel_offset == 0) {
713 } else if (number < efx->tx_channel_offset) {
717 number -= efx->tx_channel_offset;
719 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
722 void efx_set_channel_names(struct efx_nic *efx)
724 struct efx_channel *channel;
726 efx_for_each_channel(channel, efx)
727 channel->type->get_name(channel,
728 efx->msi_context[channel->channel].name,
729 sizeof(efx->msi_context[0].name));
732 int efx_probe_channels(struct efx_nic *efx)
734 struct efx_channel *channel;
737 /* Restart special buffer allocation */
738 efx->next_buffer_table = 0;
740 /* Probe channels in reverse, so that any 'extra' channels
741 * use the start of the buffer table. This allows the traffic
742 * channels to be resized without moving them or wasting the
743 * entries before them.
745 efx_for_each_channel_rev(channel, efx) {
746 rc = efx_probe_channel(channel);
748 netif_err(efx, probe, efx->net_dev,
749 "failed to create channel %d\n",
754 efx_set_channel_names(efx);
759 efx_remove_channels(efx);
763 void efx_remove_channel(struct efx_channel *channel)
765 struct efx_tx_queue *tx_queue;
766 struct efx_rx_queue *rx_queue;
768 netif_dbg(channel->efx, drv, channel->efx->net_dev,
769 "destroy chan %d\n", channel->channel);
771 efx_for_each_channel_rx_queue(rx_queue, channel)
772 efx_remove_rx_queue(rx_queue);
773 efx_for_each_channel_tx_queue(tx_queue, channel)
774 efx_remove_tx_queue(tx_queue);
775 efx_remove_eventq(channel);
776 channel->type->post_remove(channel);
779 void efx_remove_channels(struct efx_nic *efx)
781 struct efx_channel *channel;
783 efx_for_each_channel(channel, efx)
784 efx_remove_channel(channel);
786 kfree(efx->xdp_tx_queues);
789 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
790 struct efx_tx_queue *tx_queue)
792 if (xdp_queue_number >= efx->xdp_tx_queue_count)
795 netif_dbg(efx, drv, efx->net_dev,
796 "Channel %u TXQ %u is XDP %u, HW %u\n",
797 tx_queue->channel->channel, tx_queue->label,
798 xdp_queue_number, tx_queue->queue);
799 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
803 static void efx_set_xdp_channels(struct efx_nic *efx)
805 struct efx_tx_queue *tx_queue;
806 struct efx_channel *channel;
807 unsigned int next_queue = 0;
808 int xdp_queue_number = 0;
811 /* We need to mark which channels really have RX and TX
812 * queues, and adjust the TX queue numbers if we have separate
813 * RX-only and TX-only channels.
815 efx_for_each_channel(channel, efx) {
816 if (channel->channel < efx->tx_channel_offset)
819 if (efx_channel_is_xdp_tx(channel)) {
820 efx_for_each_channel_tx_queue(tx_queue, channel) {
821 tx_queue->queue = next_queue++;
822 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
828 efx_for_each_channel_tx_queue(tx_queue, channel) {
829 tx_queue->queue = next_queue++;
830 netif_dbg(efx, drv, efx->net_dev,
831 "Channel %u TXQ %u is HW %u\n",
832 channel->channel, tx_queue->label,
836 /* If XDP is borrowing queues from net stack, it must
837 * use the queue with no csum offload, which is the
838 * first one of the channel
839 * (note: tx_queue_by_type is not initialized yet)
841 if (efx->xdp_txq_queues_mode ==
842 EFX_XDP_TX_QUEUES_BORROWED) {
843 tx_queue = &channel->tx_queue[0];
844 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
851 WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
852 xdp_queue_number != efx->xdp_tx_queue_count);
853 WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
854 xdp_queue_number > efx->xdp_tx_queue_count);
856 /* If we have more CPUs than assigned XDP TX queues, assign the already
857 * existing queues to the exceeding CPUs
860 while (xdp_queue_number < efx->xdp_tx_queue_count) {
861 tx_queue = efx->xdp_tx_queues[next_queue++];
862 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
868 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
870 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
871 unsigned int i, next_buffer_table = 0;
872 u32 old_rxq_entries, old_txq_entries;
875 rc = efx_check_disabled(efx);
879 /* Not all channels should be reallocated. We must avoid
880 * reallocating their buffer table entries.
882 efx_for_each_channel(channel, efx) {
883 struct efx_rx_queue *rx_queue;
884 struct efx_tx_queue *tx_queue;
886 if (channel->type->copy)
888 next_buffer_table = max(next_buffer_table,
889 channel->eventq.index +
890 channel->eventq.entries);
891 efx_for_each_channel_rx_queue(rx_queue, channel)
892 next_buffer_table = max(next_buffer_table,
893 rx_queue->rxd.index +
894 rx_queue->rxd.entries);
895 efx_for_each_channel_tx_queue(tx_queue, channel)
896 next_buffer_table = max(next_buffer_table,
897 tx_queue->txd.index +
898 tx_queue->txd.entries);
901 efx_device_detach_sync(efx);
903 efx_soft_disable_interrupts(efx);
905 /* Clone channels (where possible) */
906 memset(other_channel, 0, sizeof(other_channel));
907 for (i = 0; i < efx->n_channels; i++) {
908 channel = efx->channel[i];
909 if (channel->type->copy)
910 channel = channel->type->copy(channel);
915 other_channel[i] = channel;
918 /* Swap entry counts and channel pointers */
919 old_rxq_entries = efx->rxq_entries;
920 old_txq_entries = efx->txq_entries;
921 efx->rxq_entries = rxq_entries;
922 efx->txq_entries = txq_entries;
923 for (i = 0; i < efx->n_channels; i++)
924 swap(efx->channel[i], other_channel[i]);
926 /* Restart buffer table allocation */
927 efx->next_buffer_table = next_buffer_table;
929 for (i = 0; i < efx->n_channels; i++) {
930 channel = efx->channel[i];
931 if (!channel->type->copy)
933 rc = efx_probe_channel(channel);
936 efx_init_napi_channel(efx->channel[i]);
939 efx_set_xdp_channels(efx);
941 /* Destroy unused channel structures */
942 for (i = 0; i < efx->n_channels; i++) {
943 channel = other_channel[i];
944 if (channel && channel->type->copy) {
945 efx_fini_napi_channel(channel);
946 efx_remove_channel(channel);
951 rc2 = efx_soft_enable_interrupts(efx);
954 netif_err(efx, drv, efx->net_dev,
955 "unable to restart interrupts on channel reallocation\n");
956 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
959 efx_device_attach_if_not_resetting(efx);
965 efx->rxq_entries = old_rxq_entries;
966 efx->txq_entries = old_txq_entries;
967 for (i = 0; i < efx->n_channels; i++)
968 swap(efx->channel[i], other_channel[i]);
972 int efx_set_channels(struct efx_nic *efx)
974 struct efx_channel *channel;
977 efx->tx_channel_offset =
978 efx_separate_tx_channels ?
979 efx->n_channels - efx->n_tx_channels : 0;
981 if (efx->xdp_tx_queue_count) {
982 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
984 /* Allocate array for XDP TX queue lookup. */
985 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
986 sizeof(*efx->xdp_tx_queues),
988 if (!efx->xdp_tx_queues)
992 efx_for_each_channel(channel, efx) {
993 if (channel->channel < efx->n_rx_channels)
994 channel->rx_queue.core_index = channel->channel;
996 channel->rx_queue.core_index = -1;
999 efx_set_xdp_channels(efx);
1001 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1004 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1007 bool efx_default_channel_want_txqs(struct efx_channel *channel)
1009 return channel->channel - channel->efx->tx_channel_offset <
1010 channel->efx->n_tx_channels;
1017 int efx_soft_enable_interrupts(struct efx_nic *efx)
1019 struct efx_channel *channel, *end_channel;
1022 BUG_ON(efx->state == STATE_DISABLED);
1024 efx->irq_soft_enabled = true;
1027 efx_for_each_channel(channel, efx) {
1028 if (!channel->type->keep_eventq) {
1029 rc = efx_init_eventq(channel);
1033 efx_start_eventq(channel);
1036 efx_mcdi_mode_event(efx);
1040 end_channel = channel;
1041 efx_for_each_channel(channel, efx) {
1042 if (channel == end_channel)
1044 efx_stop_eventq(channel);
1045 if (!channel->type->keep_eventq)
1046 efx_fini_eventq(channel);
1052 void efx_soft_disable_interrupts(struct efx_nic *efx)
1054 struct efx_channel *channel;
1056 if (efx->state == STATE_DISABLED)
1059 efx_mcdi_mode_poll(efx);
1061 efx->irq_soft_enabled = false;
1064 if (efx->legacy_irq)
1065 synchronize_irq(efx->legacy_irq);
1067 efx_for_each_channel(channel, efx) {
1069 synchronize_irq(channel->irq);
1071 efx_stop_eventq(channel);
1072 if (!channel->type->keep_eventq)
1073 efx_fini_eventq(channel);
1076 /* Flush the asynchronous MCDI request queue */
1077 efx_mcdi_flush_async(efx);
1080 int efx_enable_interrupts(struct efx_nic *efx)
1082 struct efx_channel *channel, *end_channel;
1085 /* TODO: Is this really a bug? */
1086 BUG_ON(efx->state == STATE_DISABLED);
1088 if (efx->eeh_disabled_legacy_irq) {
1089 enable_irq(efx->legacy_irq);
1090 efx->eeh_disabled_legacy_irq = false;
1093 efx->type->irq_enable_master(efx);
1095 efx_for_each_channel(channel, efx) {
1096 if (channel->type->keep_eventq) {
1097 rc = efx_init_eventq(channel);
1103 rc = efx_soft_enable_interrupts(efx);
1110 end_channel = channel;
1111 efx_for_each_channel(channel, efx) {
1112 if (channel == end_channel)
1114 if (channel->type->keep_eventq)
1115 efx_fini_eventq(channel);
1118 efx->type->irq_disable_non_ev(efx);
1123 void efx_disable_interrupts(struct efx_nic *efx)
1125 struct efx_channel *channel;
1127 efx_soft_disable_interrupts(efx);
1129 efx_for_each_channel(channel, efx) {
1130 if (channel->type->keep_eventq)
1131 efx_fini_eventq(channel);
1134 efx->type->irq_disable_non_ev(efx);
1137 void efx_start_channels(struct efx_nic *efx)
1139 struct efx_tx_queue *tx_queue;
1140 struct efx_rx_queue *rx_queue;
1141 struct efx_channel *channel;
1143 efx_for_each_channel_rev(channel, efx) {
1144 efx_for_each_channel_tx_queue(tx_queue, channel) {
1145 efx_init_tx_queue(tx_queue);
1146 atomic_inc(&efx->active_queues);
1149 efx_for_each_channel_rx_queue(rx_queue, channel) {
1150 efx_init_rx_queue(rx_queue);
1151 atomic_inc(&efx->active_queues);
1152 efx_stop_eventq(channel);
1153 efx_fast_push_rx_descriptors(rx_queue, false);
1154 efx_start_eventq(channel);
1157 WARN_ON(channel->rx_pkt_n_frags);
1161 void efx_stop_channels(struct efx_nic *efx)
1163 struct efx_tx_queue *tx_queue;
1164 struct efx_rx_queue *rx_queue;
1165 struct efx_channel *channel;
1168 /* Stop RX refill */
1169 efx_for_each_channel(channel, efx) {
1170 efx_for_each_channel_rx_queue(rx_queue, channel)
1171 rx_queue->refill_enabled = false;
1174 efx_for_each_channel(channel, efx) {
1175 /* RX packet processing is pipelined, so wait for the
1176 * NAPI handler to complete. At least event queue 0
1177 * might be kept active by non-data events, so don't
1178 * use napi_synchronize() but actually disable NAPI
1181 if (efx_channel_has_rx_queue(channel)) {
1182 efx_stop_eventq(channel);
1183 efx_start_eventq(channel);
1187 if (efx->type->fini_dmaq)
1188 rc = efx->type->fini_dmaq(efx);
1191 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1193 netif_dbg(efx, drv, efx->net_dev,
1194 "successfully flushed all queues\n");
1197 efx_for_each_channel(channel, efx) {
1198 efx_for_each_channel_rx_queue(rx_queue, channel)
1199 efx_fini_rx_queue(rx_queue);
1200 efx_for_each_channel_tx_queue(tx_queue, channel)
1201 efx_fini_tx_queue(tx_queue);
1205 /**************************************************************************
1209 *************************************************************************/
1211 /* Process channel's event queue
1213 * This function is responsible for processing the event queue of a
1214 * single channel. The caller must guarantee that this function will
1215 * never be concurrently called more than once on the same channel,
1216 * though different channels may be being processed concurrently.
1218 static int efx_process_channel(struct efx_channel *channel, int budget)
1220 struct efx_tx_queue *tx_queue;
1221 struct list_head rx_list;
1224 if (unlikely(!channel->enabled))
1227 /* Prepare the batch receive list */
1228 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1229 INIT_LIST_HEAD(&rx_list);
1230 channel->rx_list = &rx_list;
1232 efx_for_each_channel_tx_queue(tx_queue, channel) {
1233 tx_queue->pkts_compl = 0;
1234 tx_queue->bytes_compl = 0;
1237 spent = efx_nic_process_eventq(channel, budget);
1238 if (spent && efx_channel_has_rx_queue(channel)) {
1239 struct efx_rx_queue *rx_queue =
1240 efx_channel_get_rx_queue(channel);
1242 efx_rx_flush_packet(channel);
1243 efx_fast_push_rx_descriptors(rx_queue, true);
1247 efx_for_each_channel_tx_queue(tx_queue, channel) {
1248 if (tx_queue->bytes_compl) {
1249 netdev_tx_completed_queue(tx_queue->core_txq,
1250 tx_queue->pkts_compl,
1251 tx_queue->bytes_compl);
1255 /* Receive any packets we queued up */
1256 netif_receive_skb_list(channel->rx_list);
1257 channel->rx_list = NULL;
1262 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1264 int step = efx->irq_mod_step_us;
1266 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1267 if (channel->irq_moderation_us > step) {
1268 channel->irq_moderation_us -= step;
1269 efx->type->push_irq_moderation(channel);
1271 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1272 if (channel->irq_moderation_us <
1273 efx->irq_rx_moderation_us) {
1274 channel->irq_moderation_us += step;
1275 efx->type->push_irq_moderation(channel);
1279 channel->irq_count = 0;
1280 channel->irq_mod_score = 0;
1283 /* NAPI poll handler
1285 * NAPI guarantees serialisation of polls of the same device, which
1286 * provides the guarantee required by efx_process_channel().
1288 static int efx_poll(struct napi_struct *napi, int budget)
1290 struct efx_channel *channel =
1291 container_of(napi, struct efx_channel, napi_str);
1292 struct efx_nic *efx = channel->efx;
1293 #ifdef CONFIG_RFS_ACCEL
1298 netif_vdbg(efx, intr, efx->net_dev,
1299 "channel %d NAPI poll executing on CPU %d\n",
1300 channel->channel, raw_smp_processor_id());
1302 spent = efx_process_channel(channel, budget);
1306 if (spent < budget) {
1307 if (efx_channel_has_rx_queue(channel) &&
1308 efx->irq_rx_adaptive &&
1309 unlikely(++channel->irq_count == 1000)) {
1310 efx_update_irq_mod(efx, channel);
1313 #ifdef CONFIG_RFS_ACCEL
1314 /* Perhaps expire some ARFS filters */
1315 time = jiffies - channel->rfs_last_expiry;
1316 /* Would our quota be >= 20? */
1317 if (channel->rfs_filter_count * time >= 600 * HZ)
1318 mod_delayed_work(system_wq, &channel->filter_work, 0);
1321 /* There is no race here; although napi_disable() will
1322 * only wait for napi_complete(), this isn't a problem
1323 * since efx_nic_eventq_read_ack() will have no effect if
1324 * interrupts have already been disabled.
1326 if (napi_complete_done(napi, spent))
1327 efx_nic_eventq_read_ack(channel);
1333 void efx_init_napi_channel(struct efx_channel *channel)
1335 struct efx_nic *efx = channel->efx;
1337 channel->napi_dev = efx->net_dev;
1338 netif_napi_add(channel->napi_dev, &channel->napi_str,
1339 efx_poll, napi_weight);
1342 void efx_init_napi(struct efx_nic *efx)
1344 struct efx_channel *channel;
1346 efx_for_each_channel(channel, efx)
1347 efx_init_napi_channel(channel);
1350 void efx_fini_napi_channel(struct efx_channel *channel)
1352 if (channel->napi_dev)
1353 netif_napi_del(&channel->napi_str);
1355 channel->napi_dev = NULL;
1358 void efx_fini_napi(struct efx_nic *efx)
1360 struct efx_channel *channel;
1362 efx_for_each_channel(channel, efx)
1363 efx_fini_napi_channel(channel);