1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include "efx_channels.h"
15 #include "efx_common.h"
16 #include "tx_common.h"
17 #include "rx_common.h"
20 #include "workarounds.h"
22 /* This is the first interrupt mode to try out of:
27 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
29 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
30 * i.e. the number of CPUs among which we may distribute simultaneous
33 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
34 * The default (0) means to assign an interrupt to each core.
36 unsigned int rss_cpus;
38 static unsigned int irq_adapt_low_thresh = 8000;
39 module_param(irq_adapt_low_thresh, uint, 0644);
40 MODULE_PARM_DESC(irq_adapt_low_thresh,
41 "Threshold score for reducing IRQ moderation");
43 static unsigned int irq_adapt_high_thresh = 16000;
44 module_param(irq_adapt_high_thresh, uint, 0644);
45 MODULE_PARM_DESC(irq_adapt_high_thresh,
46 "Threshold score for increasing IRQ moderation");
48 /* This is the weight assigned to each of the (per-channel) virtual
51 static int napi_weight = 64;
57 int efx_channel_dummy_op_int(struct efx_channel *channel)
62 void efx_channel_dummy_op_void(struct efx_channel *channel)
66 static const struct efx_channel_type efx_default_channel_type = {
67 .pre_probe = efx_channel_dummy_op_int,
68 .post_remove = efx_channel_dummy_op_void,
69 .get_name = efx_get_channel_name,
70 .copy = efx_copy_channel,
71 .want_txqs = efx_default_channel_want_txqs,
80 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
82 cpumask_var_t thread_mask;
89 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
90 netif_warn(efx, probe, efx->net_dev,
91 "RSS disabled due to allocation failure\n");
96 for_each_online_cpu(cpu) {
97 if (!cpumask_test_cpu(cpu, thread_mask)) {
99 cpumask_or(thread_mask, thread_mask,
100 topology_sibling_cpumask(cpu));
104 free_cpumask_var(thread_mask);
107 if (count > EFX_MAX_RX_QUEUES) {
108 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
109 "Reducing number of rx queues from %u to %u.\n",
110 count, EFX_MAX_RX_QUEUES);
111 count = EFX_MAX_RX_QUEUES;
114 /* If RSS is requested for the PF *and* VFs then we can't write RSS
115 * table entries that are inaccessible to VFs
117 #ifdef CONFIG_SFC_SRIOV
118 if (efx->type->sriov_wanted) {
119 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
120 count > efx_vf_size(efx)) {
121 netif_warn(efx, probe, efx->net_dev,
122 "Reducing number of RSS channels from %u to %u for "
123 "VF support. Increase vf-msix-limit to use more "
124 "channels on the PF.\n",
125 count, efx_vf_size(efx));
126 count = efx_vf_size(efx);
134 static int efx_allocate_msix_channels(struct efx_nic *efx,
135 unsigned int max_channels,
136 unsigned int extra_channels,
137 unsigned int parallelism)
139 unsigned int n_channels = parallelism;
145 if (efx_separate_tx_channels)
147 n_channels += extra_channels;
149 /* To allow XDP transmit to happen from arbitrary NAPI contexts
150 * we allocate a TX queue per CPU. We share event queues across
151 * multiple tx queues, assuming tx and ev queues are both
154 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
155 n_xdp_tx = num_possible_cpus();
156 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
158 vec_count = pci_msix_vec_count(efx->pci_dev);
162 max_channels = min_t(unsigned int, vec_count, max_channels);
165 * We need a channel per event queue, plus a VI per tx queue.
166 * This may be more pessimistic than it needs to be.
168 if (n_channels + n_xdp_ev > max_channels) {
169 netif_err(efx, drv, efx->net_dev,
170 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
171 n_xdp_ev, n_channels, max_channels);
172 efx->n_xdp_channels = 0;
173 efx->xdp_tx_per_channel = 0;
174 efx->xdp_tx_queue_count = 0;
175 } else if (n_channels + n_xdp_tx > efx->max_vis) {
176 netif_err(efx, drv, efx->net_dev,
177 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
178 n_xdp_tx, n_channels, efx->max_vis);
179 efx->n_xdp_channels = 0;
180 efx->xdp_tx_per_channel = 0;
181 efx->xdp_tx_queue_count = 0;
183 efx->n_xdp_channels = n_xdp_ev;
184 efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
185 efx->xdp_tx_queue_count = n_xdp_tx;
186 n_channels += n_xdp_ev;
187 netif_dbg(efx, drv, efx->net_dev,
188 "Allocating %d TX and %d event queues for XDP\n",
192 if (vec_count < n_channels) {
193 netif_err(efx, drv, efx->net_dev,
194 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
195 vec_count, n_channels);
196 netif_err(efx, drv, efx->net_dev,
197 "WARNING: Performance may be reduced.\n");
198 n_channels = vec_count;
201 n_channels = min(n_channels, max_channels);
203 efx->n_channels = n_channels;
205 /* Ignore XDP tx channels when creating rx channels. */
206 n_channels -= efx->n_xdp_channels;
208 if (efx_separate_tx_channels) {
210 min(max(n_channels / 2, 1U),
211 efx->max_tx_channels);
212 efx->tx_channel_offset =
213 n_channels - efx->n_tx_channels;
216 efx->n_tx_channels, 1U);
218 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
219 efx->tx_channel_offset = 0;
220 efx->n_rx_channels = n_channels;
223 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
224 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
226 efx->xdp_channel_offset = n_channels;
228 netif_dbg(efx, drv, efx->net_dev,
229 "Allocating %u RX channels\n",
232 return efx->n_channels;
235 /* Probe the number and type of interrupts we are able to obtain, and
236 * the resulting numbers of channels and RX queues.
238 int efx_probe_interrupts(struct efx_nic *efx)
240 unsigned int extra_channels = 0;
241 unsigned int rss_spread;
245 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
246 if (efx->extra_channel_type[i])
249 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
250 unsigned int parallelism = efx_wanted_parallelism(efx);
251 struct msix_entry xentries[EFX_MAX_CHANNELS];
252 unsigned int n_channels;
254 rc = efx_allocate_msix_channels(efx, efx->max_channels,
255 extra_channels, parallelism);
258 for (i = 0; i < n_channels; i++)
259 xentries[i].entry = i;
260 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
264 /* Fall back to single channel MSI */
265 netif_err(efx, drv, efx->net_dev,
266 "could not enable MSI-X\n");
267 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
268 efx->interrupt_mode = EFX_INT_MODE_MSI;
271 } else if (rc < n_channels) {
272 netif_err(efx, drv, efx->net_dev,
273 "WARNING: Insufficient MSI-X vectors"
274 " available (%d < %u).\n", rc, n_channels);
275 netif_err(efx, drv, efx->net_dev,
276 "WARNING: Performance may be reduced.\n");
281 for (i = 0; i < efx->n_channels; i++)
282 efx_get_channel(efx, i)->irq =
287 /* Try single interrupt MSI */
288 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
290 efx->n_rx_channels = 1;
291 efx->n_tx_channels = 1;
292 efx->n_xdp_channels = 0;
293 efx->xdp_channel_offset = efx->n_channels;
294 rc = pci_enable_msi(efx->pci_dev);
296 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
298 netif_err(efx, drv, efx->net_dev,
299 "could not enable MSI\n");
300 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
301 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
307 /* Assume legacy interrupts */
308 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
309 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
310 efx->n_rx_channels = 1;
311 efx->n_tx_channels = 1;
312 efx->n_xdp_channels = 0;
313 efx->xdp_channel_offset = efx->n_channels;
314 efx->legacy_irq = efx->pci_dev->irq;
317 /* Assign extra channels if possible, before XDP channels */
318 efx->n_extra_tx_channels = 0;
319 j = efx->xdp_channel_offset;
320 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
321 if (!efx->extra_channel_type[i])
323 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
324 efx->extra_channel_type[i]->handle_no_channel(efx);
327 efx_get_channel(efx, j)->type =
328 efx->extra_channel_type[i];
329 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
330 efx->n_extra_tx_channels++;
334 rss_spread = efx->n_rx_channels;
335 /* RSS might be usable on VFs even if it is disabled on the PF */
336 #ifdef CONFIG_SFC_SRIOV
337 if (efx->type->sriov_wanted) {
338 efx->rss_spread = ((rss_spread > 1 ||
339 !efx->type->sriov_wanted(efx)) ?
340 rss_spread : efx_vf_size(efx));
344 efx->rss_spread = rss_spread;
349 #if defined(CONFIG_SMP)
350 void efx_set_interrupt_affinity(struct efx_nic *efx)
352 struct efx_channel *channel;
355 efx_for_each_channel(channel, efx) {
356 cpu = cpumask_local_spread(channel->channel,
357 pcibus_to_node(efx->pci_dev->bus));
358 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
362 void efx_clear_interrupt_affinity(struct efx_nic *efx)
364 struct efx_channel *channel;
366 efx_for_each_channel(channel, efx)
367 irq_set_affinity_hint(channel->irq, NULL);
371 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
376 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
379 #endif /* CONFIG_SMP */
381 void efx_remove_interrupts(struct efx_nic *efx)
383 struct efx_channel *channel;
385 /* Remove MSI/MSI-X interrupts */
386 efx_for_each_channel(channel, efx)
388 pci_disable_msi(efx->pci_dev);
389 pci_disable_msix(efx->pci_dev);
391 /* Remove legacy interrupt */
399 /* Create event queue
400 * Event queue memory allocations are done only once. If the channel
401 * is reset, the memory buffer will be reused; this guards against
402 * errors during channel reset and also simplifies interrupt handling.
404 int efx_probe_eventq(struct efx_channel *channel)
406 struct efx_nic *efx = channel->efx;
407 unsigned long entries;
409 netif_dbg(efx, probe, efx->net_dev,
410 "chan %d create event queue\n", channel->channel);
412 /* Build an event queue with room for one event per tx and rx buffer,
413 * plus some extra for link state events and MCDI completions.
415 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
416 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
417 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
419 return efx_nic_probe_eventq(channel);
422 /* Prepare channel's event queue */
423 int efx_init_eventq(struct efx_channel *channel)
425 struct efx_nic *efx = channel->efx;
428 EFX_WARN_ON_PARANOID(channel->eventq_init);
430 netif_dbg(efx, drv, efx->net_dev,
431 "chan %d init event queue\n", channel->channel);
433 rc = efx_nic_init_eventq(channel);
435 efx->type->push_irq_moderation(channel);
436 channel->eventq_read_ptr = 0;
437 channel->eventq_init = true;
442 /* Enable event queue processing and NAPI */
443 void efx_start_eventq(struct efx_channel *channel)
445 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
446 "chan %d start event queue\n", channel->channel);
448 /* Make sure the NAPI handler sees the enabled flag set */
449 channel->enabled = true;
452 napi_enable(&channel->napi_str);
453 efx_nic_eventq_read_ack(channel);
456 /* Disable event queue processing and NAPI */
457 void efx_stop_eventq(struct efx_channel *channel)
459 if (!channel->enabled)
462 napi_disable(&channel->napi_str);
463 channel->enabled = false;
466 void efx_fini_eventq(struct efx_channel *channel)
468 if (!channel->eventq_init)
471 netif_dbg(channel->efx, drv, channel->efx->net_dev,
472 "chan %d fini event queue\n", channel->channel);
474 efx_nic_fini_eventq(channel);
475 channel->eventq_init = false;
478 void efx_remove_eventq(struct efx_channel *channel)
480 netif_dbg(channel->efx, drv, channel->efx->net_dev,
481 "chan %d remove event queue\n", channel->channel);
483 efx_nic_remove_eventq(channel);
486 /**************************************************************************
490 *************************************************************************/
492 #ifdef CONFIG_RFS_ACCEL
493 static void efx_filter_rfs_expire(struct work_struct *data)
495 struct delayed_work *dwork = to_delayed_work(data);
496 struct efx_channel *channel;
497 unsigned int time, quota;
499 channel = container_of(dwork, struct efx_channel, filter_work);
500 time = jiffies - channel->rfs_last_expiry;
501 quota = channel->rfs_filter_count * time / (30 * HZ);
502 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
503 channel->rfs_last_expiry += time;
504 /* Ensure we do more work eventually even if NAPI poll is not happening */
505 schedule_delayed_work(dwork, 30 * HZ);
509 /* Allocate and initialise a channel structure. */
510 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
512 struct efx_rx_queue *rx_queue;
513 struct efx_tx_queue *tx_queue;
514 struct efx_channel *channel;
517 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
522 channel->channel = i;
523 channel->type = &efx_default_channel_type;
525 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
526 tx_queue = &channel->tx_queue[j];
528 tx_queue->queue = -1;
530 tx_queue->channel = channel;
533 #ifdef CONFIG_RFS_ACCEL
534 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
537 rx_queue = &channel->rx_queue;
539 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
544 int efx_init_channels(struct efx_nic *efx)
548 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
549 efx->channel[i] = efx_alloc_channel(efx, i);
550 if (!efx->channel[i])
552 efx->msi_context[i].efx = efx;
553 efx->msi_context[i].index = i;
556 /* Higher numbered interrupt modes are less capable! */
557 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
560 efx->max_channels = EFX_MAX_CHANNELS;
561 efx->max_tx_channels = EFX_MAX_CHANNELS;
566 void efx_fini_channels(struct efx_nic *efx)
570 for (i = 0; i < EFX_MAX_CHANNELS; i++)
571 if (efx->channel[i]) {
572 kfree(efx->channel[i]);
573 efx->channel[i] = NULL;
577 /* Allocate and initialise a channel structure, copying parameters
578 * (but not resources) from an old channel structure.
580 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
582 struct efx_rx_queue *rx_queue;
583 struct efx_tx_queue *tx_queue;
584 struct efx_channel *channel;
587 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
591 *channel = *old_channel;
593 channel->napi_dev = NULL;
594 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
595 channel->napi_str.napi_id = 0;
596 channel->napi_str.state = 0;
597 memset(&channel->eventq, 0, sizeof(channel->eventq));
599 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
600 tx_queue = &channel->tx_queue[j];
601 if (tx_queue->channel)
602 tx_queue->channel = channel;
603 tx_queue->buffer = NULL;
604 tx_queue->cb_page = NULL;
605 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
608 rx_queue = &channel->rx_queue;
609 rx_queue->buffer = NULL;
610 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
611 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
612 #ifdef CONFIG_RFS_ACCEL
613 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
619 static int efx_probe_channel(struct efx_channel *channel)
621 struct efx_tx_queue *tx_queue;
622 struct efx_rx_queue *rx_queue;
625 netif_dbg(channel->efx, probe, channel->efx->net_dev,
626 "creating channel %d\n", channel->channel);
628 rc = channel->type->pre_probe(channel);
632 rc = efx_probe_eventq(channel);
636 efx_for_each_channel_tx_queue(tx_queue, channel) {
637 rc = efx_probe_tx_queue(tx_queue);
642 efx_for_each_channel_rx_queue(rx_queue, channel) {
643 rc = efx_probe_rx_queue(rx_queue);
648 channel->rx_list = NULL;
653 efx_remove_channel(channel);
657 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
659 struct efx_nic *efx = channel->efx;
663 number = channel->channel;
665 if (number >= efx->xdp_channel_offset &&
666 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
668 number -= efx->xdp_channel_offset;
669 } else if (efx->tx_channel_offset == 0) {
671 } else if (number < efx->tx_channel_offset) {
675 number -= efx->tx_channel_offset;
677 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
680 void efx_set_channel_names(struct efx_nic *efx)
682 struct efx_channel *channel;
684 efx_for_each_channel(channel, efx)
685 channel->type->get_name(channel,
686 efx->msi_context[channel->channel].name,
687 sizeof(efx->msi_context[0].name));
690 int efx_probe_channels(struct efx_nic *efx)
692 struct efx_channel *channel;
695 /* Restart special buffer allocation */
696 efx->next_buffer_table = 0;
698 /* Probe channels in reverse, so that any 'extra' channels
699 * use the start of the buffer table. This allows the traffic
700 * channels to be resized without moving them or wasting the
701 * entries before them.
703 efx_for_each_channel_rev(channel, efx) {
704 rc = efx_probe_channel(channel);
706 netif_err(efx, probe, efx->net_dev,
707 "failed to create channel %d\n",
712 efx_set_channel_names(efx);
717 efx_remove_channels(efx);
721 void efx_remove_channel(struct efx_channel *channel)
723 struct efx_tx_queue *tx_queue;
724 struct efx_rx_queue *rx_queue;
726 netif_dbg(channel->efx, drv, channel->efx->net_dev,
727 "destroy chan %d\n", channel->channel);
729 efx_for_each_channel_rx_queue(rx_queue, channel)
730 efx_remove_rx_queue(rx_queue);
731 efx_for_each_channel_tx_queue(tx_queue, channel)
732 efx_remove_tx_queue(tx_queue);
733 efx_remove_eventq(channel);
734 channel->type->post_remove(channel);
737 void efx_remove_channels(struct efx_nic *efx)
739 struct efx_channel *channel;
741 efx_for_each_channel(channel, efx)
742 efx_remove_channel(channel);
744 kfree(efx->xdp_tx_queues);
747 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
749 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
750 unsigned int i, next_buffer_table = 0;
751 u32 old_rxq_entries, old_txq_entries;
754 rc = efx_check_disabled(efx);
758 /* Not all channels should be reallocated. We must avoid
759 * reallocating their buffer table entries.
761 efx_for_each_channel(channel, efx) {
762 struct efx_rx_queue *rx_queue;
763 struct efx_tx_queue *tx_queue;
765 if (channel->type->copy)
767 next_buffer_table = max(next_buffer_table,
768 channel->eventq.index +
769 channel->eventq.entries);
770 efx_for_each_channel_rx_queue(rx_queue, channel)
771 next_buffer_table = max(next_buffer_table,
772 rx_queue->rxd.index +
773 rx_queue->rxd.entries);
774 efx_for_each_channel_tx_queue(tx_queue, channel)
775 next_buffer_table = max(next_buffer_table,
776 tx_queue->txd.index +
777 tx_queue->txd.entries);
780 efx_device_detach_sync(efx);
782 efx_soft_disable_interrupts(efx);
784 /* Clone channels (where possible) */
785 memset(other_channel, 0, sizeof(other_channel));
786 for (i = 0; i < efx->n_channels; i++) {
787 channel = efx->channel[i];
788 if (channel->type->copy)
789 channel = channel->type->copy(channel);
794 other_channel[i] = channel;
797 /* Swap entry counts and channel pointers */
798 old_rxq_entries = efx->rxq_entries;
799 old_txq_entries = efx->txq_entries;
800 efx->rxq_entries = rxq_entries;
801 efx->txq_entries = txq_entries;
802 for (i = 0; i < efx->n_channels; i++) {
803 channel = efx->channel[i];
804 efx->channel[i] = other_channel[i];
805 other_channel[i] = channel;
808 /* Restart buffer table allocation */
809 efx->next_buffer_table = next_buffer_table;
811 for (i = 0; i < efx->n_channels; i++) {
812 channel = efx->channel[i];
813 if (!channel->type->copy)
815 rc = efx_probe_channel(channel);
818 efx_init_napi_channel(efx->channel[i]);
822 /* Destroy unused channel structures */
823 for (i = 0; i < efx->n_channels; i++) {
824 channel = other_channel[i];
825 if (channel && channel->type->copy) {
826 efx_fini_napi_channel(channel);
827 efx_remove_channel(channel);
832 rc2 = efx_soft_enable_interrupts(efx);
835 netif_err(efx, drv, efx->net_dev,
836 "unable to restart interrupts on channel reallocation\n");
837 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
840 efx_device_attach_if_not_resetting(efx);
846 efx->rxq_entries = old_rxq_entries;
847 efx->txq_entries = old_txq_entries;
848 for (i = 0; i < efx->n_channels; i++) {
849 channel = efx->channel[i];
850 efx->channel[i] = other_channel[i];
851 other_channel[i] = channel;
856 int efx_set_channels(struct efx_nic *efx)
858 struct efx_tx_queue *tx_queue;
859 struct efx_channel *channel;
860 unsigned int next_queue = 0;
861 int xdp_queue_number;
864 efx->tx_channel_offset =
865 efx_separate_tx_channels ?
866 efx->n_channels - efx->n_tx_channels : 0;
868 if (efx->xdp_tx_queue_count) {
869 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
871 /* Allocate array for XDP TX queue lookup. */
872 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
873 sizeof(*efx->xdp_tx_queues),
875 if (!efx->xdp_tx_queues)
879 /* We need to mark which channels really have RX and TX
880 * queues, and adjust the TX queue numbers if we have separate
881 * RX-only and TX-only channels.
883 xdp_queue_number = 0;
884 efx_for_each_channel(channel, efx) {
885 if (channel->channel < efx->n_rx_channels)
886 channel->rx_queue.core_index = channel->channel;
888 channel->rx_queue.core_index = -1;
890 if (channel->channel >= efx->tx_channel_offset) {
891 if (efx_channel_is_xdp_tx(channel)) {
892 efx_for_each_channel_tx_queue(tx_queue, channel) {
893 tx_queue->queue = next_queue++;
894 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
895 channel->channel, tx_queue->label,
896 xdp_queue_number, tx_queue->queue);
897 /* We may have a few left-over XDP TX
898 * queues owing to xdp_tx_queue_count
899 * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
900 * We still allocate and probe those
901 * TXQs, but never use them.
903 if (xdp_queue_number < efx->xdp_tx_queue_count)
904 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
908 efx_for_each_channel_tx_queue(tx_queue, channel) {
909 tx_queue->queue = next_queue++;
910 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
911 channel->channel, tx_queue->label,
918 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
921 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
924 bool efx_default_channel_want_txqs(struct efx_channel *channel)
926 return channel->channel - channel->efx->tx_channel_offset <
927 channel->efx->n_tx_channels;
934 int efx_soft_enable_interrupts(struct efx_nic *efx)
936 struct efx_channel *channel, *end_channel;
939 BUG_ON(efx->state == STATE_DISABLED);
941 efx->irq_soft_enabled = true;
944 efx_for_each_channel(channel, efx) {
945 if (!channel->type->keep_eventq) {
946 rc = efx_init_eventq(channel);
950 efx_start_eventq(channel);
953 efx_mcdi_mode_event(efx);
957 end_channel = channel;
958 efx_for_each_channel(channel, efx) {
959 if (channel == end_channel)
961 efx_stop_eventq(channel);
962 if (!channel->type->keep_eventq)
963 efx_fini_eventq(channel);
969 void efx_soft_disable_interrupts(struct efx_nic *efx)
971 struct efx_channel *channel;
973 if (efx->state == STATE_DISABLED)
976 efx_mcdi_mode_poll(efx);
978 efx->irq_soft_enabled = false;
982 synchronize_irq(efx->legacy_irq);
984 efx_for_each_channel(channel, efx) {
986 synchronize_irq(channel->irq);
988 efx_stop_eventq(channel);
989 if (!channel->type->keep_eventq)
990 efx_fini_eventq(channel);
993 /* Flush the asynchronous MCDI request queue */
994 efx_mcdi_flush_async(efx);
997 int efx_enable_interrupts(struct efx_nic *efx)
999 struct efx_channel *channel, *end_channel;
1002 /* TODO: Is this really a bug? */
1003 BUG_ON(efx->state == STATE_DISABLED);
1005 if (efx->eeh_disabled_legacy_irq) {
1006 enable_irq(efx->legacy_irq);
1007 efx->eeh_disabled_legacy_irq = false;
1010 efx->type->irq_enable_master(efx);
1012 efx_for_each_channel(channel, efx) {
1013 if (channel->type->keep_eventq) {
1014 rc = efx_init_eventq(channel);
1020 rc = efx_soft_enable_interrupts(efx);
1027 end_channel = channel;
1028 efx_for_each_channel(channel, efx) {
1029 if (channel == end_channel)
1031 if (channel->type->keep_eventq)
1032 efx_fini_eventq(channel);
1035 efx->type->irq_disable_non_ev(efx);
1040 void efx_disable_interrupts(struct efx_nic *efx)
1042 struct efx_channel *channel;
1044 efx_soft_disable_interrupts(efx);
1046 efx_for_each_channel(channel, efx) {
1047 if (channel->type->keep_eventq)
1048 efx_fini_eventq(channel);
1051 efx->type->irq_disable_non_ev(efx);
1054 void efx_start_channels(struct efx_nic *efx)
1056 struct efx_tx_queue *tx_queue;
1057 struct efx_rx_queue *rx_queue;
1058 struct efx_channel *channel;
1060 efx_for_each_channel(channel, efx) {
1061 efx_for_each_channel_tx_queue(tx_queue, channel) {
1062 efx_init_tx_queue(tx_queue);
1063 atomic_inc(&efx->active_queues);
1066 efx_for_each_channel_rx_queue(rx_queue, channel) {
1067 efx_init_rx_queue(rx_queue);
1068 atomic_inc(&efx->active_queues);
1069 efx_stop_eventq(channel);
1070 efx_fast_push_rx_descriptors(rx_queue, false);
1071 efx_start_eventq(channel);
1074 WARN_ON(channel->rx_pkt_n_frags);
1078 void efx_stop_channels(struct efx_nic *efx)
1080 struct efx_tx_queue *tx_queue;
1081 struct efx_rx_queue *rx_queue;
1082 struct efx_channel *channel;
1085 /* Stop RX refill */
1086 efx_for_each_channel(channel, efx) {
1087 efx_for_each_channel_rx_queue(rx_queue, channel)
1088 rx_queue->refill_enabled = false;
1091 efx_for_each_channel(channel, efx) {
1092 /* RX packet processing is pipelined, so wait for the
1093 * NAPI handler to complete. At least event queue 0
1094 * might be kept active by non-data events, so don't
1095 * use napi_synchronize() but actually disable NAPI
1098 if (efx_channel_has_rx_queue(channel)) {
1099 efx_stop_eventq(channel);
1100 efx_start_eventq(channel);
1104 if (efx->type->fini_dmaq)
1105 rc = efx->type->fini_dmaq(efx);
1108 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1110 netif_dbg(efx, drv, efx->net_dev,
1111 "successfully flushed all queues\n");
1114 efx_for_each_channel(channel, efx) {
1115 efx_for_each_channel_rx_queue(rx_queue, channel)
1116 efx_fini_rx_queue(rx_queue);
1117 efx_for_each_channel_tx_queue(tx_queue, channel)
1118 efx_fini_tx_queue(tx_queue);
1122 /**************************************************************************
1126 *************************************************************************/
1128 /* Process channel's event queue
1130 * This function is responsible for processing the event queue of a
1131 * single channel. The caller must guarantee that this function will
1132 * never be concurrently called more than once on the same channel,
1133 * though different channels may be being processed concurrently.
1135 static int efx_process_channel(struct efx_channel *channel, int budget)
1137 struct efx_tx_queue *tx_queue;
1138 struct list_head rx_list;
1141 if (unlikely(!channel->enabled))
1144 /* Prepare the batch receive list */
1145 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1146 INIT_LIST_HEAD(&rx_list);
1147 channel->rx_list = &rx_list;
1149 efx_for_each_channel_tx_queue(tx_queue, channel) {
1150 tx_queue->pkts_compl = 0;
1151 tx_queue->bytes_compl = 0;
1154 spent = efx_nic_process_eventq(channel, budget);
1155 if (spent && efx_channel_has_rx_queue(channel)) {
1156 struct efx_rx_queue *rx_queue =
1157 efx_channel_get_rx_queue(channel);
1159 efx_rx_flush_packet(channel);
1160 efx_fast_push_rx_descriptors(rx_queue, true);
1164 efx_for_each_channel_tx_queue(tx_queue, channel) {
1165 if (tx_queue->bytes_compl) {
1166 netdev_tx_completed_queue(tx_queue->core_txq,
1167 tx_queue->pkts_compl,
1168 tx_queue->bytes_compl);
1172 /* Receive any packets we queued up */
1173 netif_receive_skb_list(channel->rx_list);
1174 channel->rx_list = NULL;
1179 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1181 int step = efx->irq_mod_step_us;
1183 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1184 if (channel->irq_moderation_us > step) {
1185 channel->irq_moderation_us -= step;
1186 efx->type->push_irq_moderation(channel);
1188 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1189 if (channel->irq_moderation_us <
1190 efx->irq_rx_moderation_us) {
1191 channel->irq_moderation_us += step;
1192 efx->type->push_irq_moderation(channel);
1196 channel->irq_count = 0;
1197 channel->irq_mod_score = 0;
1200 /* NAPI poll handler
1202 * NAPI guarantees serialisation of polls of the same device, which
1203 * provides the guarantee required by efx_process_channel().
1205 static int efx_poll(struct napi_struct *napi, int budget)
1207 struct efx_channel *channel =
1208 container_of(napi, struct efx_channel, napi_str);
1209 struct efx_nic *efx = channel->efx;
1210 #ifdef CONFIG_RFS_ACCEL
1215 netif_vdbg(efx, intr, efx->net_dev,
1216 "channel %d NAPI poll executing on CPU %d\n",
1217 channel->channel, raw_smp_processor_id());
1219 spent = efx_process_channel(channel, budget);
1223 if (spent < budget) {
1224 if (efx_channel_has_rx_queue(channel) &&
1225 efx->irq_rx_adaptive &&
1226 unlikely(++channel->irq_count == 1000)) {
1227 efx_update_irq_mod(efx, channel);
1230 #ifdef CONFIG_RFS_ACCEL
1231 /* Perhaps expire some ARFS filters */
1232 time = jiffies - channel->rfs_last_expiry;
1233 /* Would our quota be >= 20? */
1234 if (channel->rfs_filter_count * time >= 600 * HZ)
1235 mod_delayed_work(system_wq, &channel->filter_work, 0);
1238 /* There is no race here; although napi_disable() will
1239 * only wait for napi_complete(), this isn't a problem
1240 * since efx_nic_eventq_read_ack() will have no effect if
1241 * interrupts have already been disabled.
1243 if (napi_complete_done(napi, spent))
1244 efx_nic_eventq_read_ack(channel);
1250 void efx_init_napi_channel(struct efx_channel *channel)
1252 struct efx_nic *efx = channel->efx;
1254 channel->napi_dev = efx->net_dev;
1255 netif_napi_add(channel->napi_dev, &channel->napi_str,
1256 efx_poll, napi_weight);
1259 void efx_init_napi(struct efx_nic *efx)
1261 struct efx_channel *channel;
1263 efx_for_each_channel(channel, efx)
1264 efx_init_napi_channel(channel);
1267 void efx_fini_napi_channel(struct efx_channel *channel)
1269 if (channel->napi_dev)
1270 netif_napi_del(&channel->napi_str);
1272 channel->napi_dev = NULL;
1275 void efx_fini_napi(struct efx_nic *efx)
1277 struct efx_channel *channel;
1279 efx_for_each_channel(channel, efx)
1280 efx_fini_napi_channel(channel);