*
*************************************************************************/
-module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
+module_param_named(interrupt_mode, efx_siena_interrupt_mode, uint, 0444);
MODULE_PARM_DESC(interrupt_mode,
"Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
-module_param(rss_cpus, uint, 0444);
+module_param_named(rss_cpus, efx_siena_rss_cpus, uint, 0444);
MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
/*
efx->port_initialized = false;
efx->link_state.up = false;
- efx_link_status_changed(efx);
+ efx_siena_link_status_changed(efx);
}
static void efx_remove_port(struct efx_nic *efx)
/* Determine the number of channels and queues by trying
* to hook in MSI-X interrupts.
*/
- rc = efx_probe_interrupts(efx);
+ rc = efx_siena_probe_interrupts(efx);
if (rc)
goto fail1;
- rc = efx_set_channels(efx);
+ rc = efx_siena_set_channels(efx);
if (rc)
goto fail1;
if (rc == -EAGAIN)
/* try again with new max_channels */
- efx_remove_interrupts(efx);
+ efx_siena_remove_interrupts(efx);
} while (rc == -EAGAIN);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
- efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
- true);
+ efx_siena_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec,
+ true, true);
return 0;
fail2:
- efx_remove_interrupts(efx);
+ efx_siena_remove_interrupts(efx);
fail1:
efx->type->remove(efx);
return rc;
{
netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
- efx_remove_interrupts(efx);
+ efx_siena_remove_interrupts(efx);
efx->type->remove(efx);
}
goto fail4;
}
- rc = efx_probe_channels(efx);
+ rc = efx_siena_probe_channels(efx);
if (rc)
goto fail5;
efx_xdp_setup_prog(efx, NULL);
rtnl_unlock();
- efx_remove_channels(efx);
+ efx_siena_remove_channels(efx);
efx_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx);
* Interrupt moderation
*
**************************************************************************/
-unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
+unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
{
if (usecs == 0)
return 0;
return usecs * 1000 / efx->timer_quantum_ns;
}
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
-{
- /* We must round up when converting ticks to microseconds
- * because we round down when converting the other way.
- */
- return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
-}
-
/* Set interrupt moderation parameters */
-int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
- unsigned int rx_usecs, bool rx_adaptive,
- bool rx_may_override_tx)
+int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx)
{
struct efx_channel *channel;
unsigned int timer_max_us;
return 0;
}
-void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
- unsigned int *rx_usecs, bool *rx_adaptive)
+void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive)
{
*rx_adaptive = efx->irq_rx_adaptive;
*rx_usecs = efx->irq_rx_moderation_us;
*************************************************************************/
/* Context: process, rtnl_lock() held. */
-int efx_net_open(struct net_device *net_dev)
+static int efx_net_open(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY;
- if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+ if (efx_mcdi_poll_reboot(efx) && efx_siena_reset(efx, RESET_TYPE_ALL))
return -EIO;
/* Notify the kernel of the link state polled during driver load,
* before the monitor starts running */
- efx_link_status_changed(efx);
+ efx_siena_link_status_changed(efx);
- efx_start_all(efx);
+ efx_siena_start_all(efx);
if (efx->state == STATE_DISABLED || efx->reset_pending)
netif_device_detach(efx->net_dev);
efx_selftest_async_start(efx);
* Note that the kernel will ignore our return code; this method
* should really be a void.
*/
-int efx_net_stop(struct net_device *net_dev)
+static int efx_net_stop(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
raw_smp_processor_id());
/* Stop the device and flush all the channels */
- efx_stop_all(efx);
+ efx_siena_stop_all(efx);
return 0;
}
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
- .ndo_get_stats64 = efx_net_stats,
- .ndo_tx_timeout = efx_watchdog,
- .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_get_stats64 = efx_siena_net_stats,
+ .ndo_tx_timeout = efx_siena_watchdog,
+ .ndo_start_xmit = efx_siena_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = efx_ioctl,
- .ndo_change_mtu = efx_change_mtu,
- .ndo_set_mac_address = efx_set_mac_address,
- .ndo_set_rx_mode = efx_set_rx_mode,
- .ndo_set_features = efx_set_features,
- .ndo_features_check = efx_features_check,
+ .ndo_change_mtu = efx_siena_change_mtu,
+ .ndo_set_mac_address = efx_siena_set_mac_address,
+ .ndo_set_rx_mode = efx_siena_set_rx_mode,
+ .ndo_set_features = efx_siena_set_features,
+ .ndo_features_check = efx_siena_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.ndo_get_vf_config = efx_sriov_get_vf_config,
.ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
#endif
- .ndo_get_phys_port_id = efx_get_phys_port_id,
- .ndo_get_phys_port_name = efx_get_phys_port_name,
- .ndo_setup_tc = efx_setup_tc,
+ .ndo_get_phys_port_id = efx_siena_get_phys_port_id,
+ .ndo_get_phys_port_name = efx_siena_get_phys_port_name,
+ .ndo_setup_tc = efx_siena_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
return -EINVAL;
}
- if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+ if (prog && efx->net_dev->mtu > efx_siena_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev,
"Unable to configure XDP with MTU of %d (max: %d)\n",
- efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+ efx->net_dev->mtu, efx_siena_xdp_max_mtu(efx));
return -EINVAL;
}
if (!netif_running(dev))
return -EINVAL;
- return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+ return efx_siena_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
}
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
- efx_mtd_rename(efx);
- efx_set_channel_names(efx);
+ efx_siena_mtd_rename(efx);
+ efx_siena_set_channel_names(efx);
}
static int efx_netdev_event(struct notifier_block *this,
net_dev->netdev_ops = &efx_netdev_ops;
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
net_dev->priv_flags |= IFF_UNICAST_FLT;
- net_dev->ethtool_ops = &efx_ethtool_ops;
+ net_dev->ethtool_ops = &efx_siena_ethtool_ops;
netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
net_dev->min_mtu = EFX_MIN_MTU;
net_dev->max_mtu = EFX_MAX_MTU;
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_init_tx_queue_core_txq(tx_queue);
+ efx_siena_init_tx_queue_core_txq(tx_queue);
}
efx_associate(efx);
goto fail_registered;
}
- efx_init_mcdi_logging(efx);
+ efx_siena_init_mcdi_logging(efx);
return 0;
if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- efx_fini_mcdi_logging(efx);
+ efx_siena_fini_mcdi_logging(efx);
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
}
*
**************************************************************************/
-void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
+void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats)
{
u64 n_rx_nodesc_trunc = 0;
struct efx_channel *channel;
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
- efx_flush_reset_workqueue(efx);
+ efx_siena_flush_reset_workqueue(efx);
- efx_disable_interrupts(efx);
- efx_clear_interrupt_affinity(efx);
+ efx_siena_disable_interrupts(efx);
+ efx_siena_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
- efx_fini_napi(efx);
+ efx_siena_fini_napi(efx);
efx_remove_all(efx);
}
rtnl_lock();
efx_dissociate(efx);
dev_close(efx->net_dev);
- efx_disable_interrupts(efx);
+ efx_siena_disable_interrupts(efx);
efx->state = STATE_UNINIT;
rtnl_unlock();
efx_unregister_netdev(efx);
- efx_mtd_remove(efx);
+ efx_siena_mtd_remove(efx);
efx_pci_remove_main(efx);
- efx_fini_io(efx);
+ efx_siena_fini_io(efx);
netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
- efx_fini_struct(efx);
+ efx_siena_fini_struct(efx);
free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
if (rc)
goto fail1;
- efx_init_napi(efx);
+ efx_siena_init_napi(efx);
down_write(&efx->filter_sem);
rc = efx->type->init(efx);
if (rc)
goto fail5;
- efx_set_interrupt_affinity(efx);
- rc = efx_enable_interrupts(efx);
+ efx_siena_set_interrupt_affinity(efx);
+ rc = efx_siena_enable_interrupts(efx);
if (rc)
goto fail6;
return 0;
fail6:
- efx_clear_interrupt_affinity(efx);
+ efx_siena_clear_interrupt_affinity(efx);
efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
fail4:
efx->type->fini(efx);
fail3:
- efx_fini_napi(efx);
+ efx_siena_fini_napi(efx);
efx_remove_all(efx);
fail1:
return rc;
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
- rc = efx_init_struct(efx, pci_dev, net_dev);
+ rc = efx_siena_init_struct(efx, pci_dev, net_dev);
if (rc)
goto fail1;
efx_probe_vpd_strings(efx);
/* Set up basic I/O (BAR mappings etc) */
- rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
- efx->type->mem_map_size(efx));
+ rc = efx_siena_init_io(efx, efx->type->mem_bar(efx),
+ efx->type->max_dma_mask,
+ efx->type->mem_map_size(efx));
if (rc)
goto fail2;
return 0;
fail3:
- efx_fini_io(efx);
+ efx_siena_fini_io(efx);
fail2:
- efx_fini_struct(efx);
+ efx_siena_fini_struct(efx);
fail1:
WARN_ON(rc > 0);
netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
}
rtnl_unlock();
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- rc = efx_enable_interrupts(efx);
+ rc = efx_siena_enable_interrupts(efx);
if (rc)
goto fail;
efx_mcdi_port_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
- efx_start_all(efx);
+ efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx);
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
- efx_queue_reset_work(efx);
+ efx_siena_queue_reset_work(efx);
return 0;
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
- .err_handler = &efx_err_handlers,
+ .err_handler = &efx_siena_err_handlers,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_pci_sriov_configure,
#endif
if (rc)
goto err_notifier;
- rc = efx_create_reset_workqueue();
+ rc = efx_siena_create_reset_workqueue();
if (rc)
goto err_reset;
return 0;
err_pci:
- efx_destroy_reset_workqueue();
+ efx_siena_destroy_reset_workqueue();
err_reset:
unregister_netdevice_notifier(&efx_netdev_notifier);
err_notifier:
printk(KERN_INFO "Solarflare NET driver unloading\n");
pci_unregister_driver(&efx_pci_driver);
- efx_destroy_reset_workqueue();
+ efx_siena_destroy_reset_workqueue();
unregister_netdevice_notifier(&efx_netdev_notifier);
}
#include "net_driver.h"
#include "filter.h"
-int efx_net_open(struct net_device *net_dev);
-int efx_net_stop(struct net_device *net_dev);
-
/* TX */
-void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
- struct net_device *net_dev);
-netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev);
+netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb);
static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
- __efx_enqueue_skb, tx_queue, skb);
+ __efx_siena_enqueue_skb, tx_queue, skb);
}
-void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
-int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- void *type_data);
-extern unsigned int efx_piobuf_size;
+int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data);
/* RX */
-void __efx_rx_packet(struct efx_channel *channel);
-void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int n_frags, unsigned int len, u16 flags);
+void __efx_siena_rx_packet(struct efx_channel *channel);
+void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
- __efx_rx_packet(channel);
-}
-static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
-{
- return true;
+ __efx_siena_rx_packet(channel);
}
/* Maximum number of TCP segments we support for soft-TSO */
}
/* Ethtool support */
-extern const struct ethtool_ops efx_ethtool_ops;
+extern const struct ethtool_ops efx_siena_ethtool_ops;
/* Global */
-unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
-unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
-int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
- unsigned int rx_usecs, bool rx_adaptive,
- bool rx_may_override_tx);
-void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
- unsigned int *rx_usecs, bool *rx_adaptive);
+unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
+int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+ unsigned int rx_usecs, bool rx_adaptive,
+ bool rx_may_override_tx);
+void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+ unsigned int *rx_usecs, bool *rx_adaptive);
/* Update the generic software stats in the passed stats array */
-void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
+void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats);
/* MTD */
#ifdef CONFIG_SFC_MTD
-int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
- size_t n_parts, size_t sizeof_part);
+int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
static inline int efx_mtd_probe(struct efx_nic *efx)
{
return efx->type->mtd_probe(efx);
}
-void efx_mtd_rename(struct efx_nic *efx);
-void efx_mtd_remove(struct efx_nic *efx);
+void efx_siena_mtd_rename(struct efx_nic *efx);
+void efx_siena_mtd_remove(struct efx_nic *efx);
#else
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
-static inline void efx_mtd_rename(struct efx_nic *efx) {}
-static inline void efx_mtd_remove(struct efx_nic *efx) {}
+static inline void efx_siena_mtd_rename(struct efx_nic *efx) {}
+static inline void efx_siena_mtd_remove(struct efx_nic *efx) {}
#endif
#ifdef CONFIG_SFC_SRIOV
return true;
}
-int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
- bool flush);
+int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n,
+ struct xdp_frame **xdpfs, bool flush);
#endif /* EFX_EFX_H */
* 1 => MSI
* 2 => legacy
*/
-unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
+unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX;
/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
* i.e. the number of CPUs among which we may distribute simultaneous
* Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
* The default (0) means to assign an interrupt to each core.
*/
-unsigned int rss_cpus;
+unsigned int efx_siena_rss_cpus;
static unsigned int irq_adapt_low_thresh = 8000;
module_param(irq_adapt_low_thresh, uint, 0644);
{
unsigned int count;
- if (rss_cpus) {
- count = rss_cpus;
+ if (efx_siena_rss_cpus) {
+ count = efx_siena_rss_cpus;
} else {
count = count_online_cores(efx, true);
}
if (count > EFX_MAX_RX_QUEUES) {
- netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+ netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus,
+ warn,
"Reducing number of rx queues from %u to %u.\n",
count, EFX_MAX_RX_QUEUES);
count = EFX_MAX_RX_QUEUES;
/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
-int efx_probe_interrupts(struct efx_nic *efx)
+int efx_siena_probe_interrupts(struct efx_nic *efx)
{
unsigned int extra_channels = 0;
unsigned int rss_spread;
}
#if defined(CONFIG_SMP)
-void efx_set_interrupt_affinity(struct efx_nic *efx)
+void efx_siena_set_interrupt_affinity(struct efx_nic *efx)
{
const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
struct efx_channel *channel;
}
}
-void efx_clear_interrupt_affinity(struct efx_nic *efx)
+void efx_siena_clear_interrupt_affinity(struct efx_nic *efx)
{
struct efx_channel *channel;
}
#else
void
-efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused)
{
}
void
-efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused)
{
}
#endif /* CONFIG_SMP */
-void efx_remove_interrupts(struct efx_nic *efx)
+void efx_siena_remove_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
* is reset, the memory buffer will be reused; this guards against
* errors during channel reset and also simplifies interrupt handling.
*/
-int efx_probe_eventq(struct efx_channel *channel)
+static int efx_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned long entries;
}
/* Prepare channel's event queue */
-int efx_init_eventq(struct efx_channel *channel)
+static int efx_init_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
int rc;
}
/* Enable event queue processing and NAPI */
-void efx_start_eventq(struct efx_channel *channel)
+void efx_siena_start_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
}
/* Disable event queue processing and NAPI */
-void efx_stop_eventq(struct efx_channel *channel)
+void efx_siena_stop_eventq(struct efx_channel *channel)
{
if (!channel->enabled)
return;
channel->enabled = false;
}
-void efx_fini_eventq(struct efx_channel *channel)
+static void efx_fini_eventq(struct efx_channel *channel)
{
if (!channel->eventq_init)
return;
channel->eventq_init = false;
}
-void efx_remove_eventq(struct efx_channel *channel)
+static void efx_remove_eventq(struct efx_channel *channel)
{
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d remove event queue\n", channel->channel);
return channel;
}
-int efx_init_channels(struct efx_nic *efx)
+int efx_siena_init_channels(struct efx_nic *efx)
{
unsigned int i;
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = min(efx->type->min_interrupt_mode,
- efx_interrupt_mode);
+ efx_siena_interrupt_mode);
efx->max_channels = EFX_MAX_CHANNELS;
efx->max_tx_channels = EFX_MAX_CHANNELS;
return 0;
}
-void efx_fini_channels(struct efx_nic *efx)
+void efx_siena_fini_channels(struct efx_nic *efx)
{
unsigned int i;
return 0;
fail:
- efx_remove_channel(channel);
+ efx_siena_remove_channel(channel);
return rc;
}
snprintf(buf, len, "%s%s-%d", efx->name, type, number);
}
-void efx_set_channel_names(struct efx_nic *efx)
+void efx_siena_set_channel_names(struct efx_nic *efx)
{
struct efx_channel *channel;
sizeof(efx->msi_context[0].name));
}
-int efx_probe_channels(struct efx_nic *efx)
+int efx_siena_probe_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
goto fail;
}
}
- efx_set_channel_names(efx);
+ efx_siena_set_channel_names(efx);
return 0;
fail:
- efx_remove_channels(efx);
+ efx_siena_remove_channels(efx);
return rc;
}
-void efx_remove_channel(struct efx_channel *channel)
+void efx_siena_remove_channel(struct efx_channel *channel)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
channel->type->post_remove(channel);
}
-void efx_remove_channels(struct efx_nic *efx)
+void efx_siena_remove_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_for_each_channel(channel, efx)
- efx_remove_channel(channel);
+ efx_siena_remove_channel(channel);
kfree(efx->xdp_tx_queues);
}
}
}
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
+static void efx_init_napi_channel(struct efx_channel *channel);
+static void efx_fini_napi_channel(struct efx_channel *channel);
+
+int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
+ u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
unsigned int i, next_buffer_table = 0;
}
efx_device_detach_sync(efx);
- efx_stop_all(efx);
+ efx_siena_stop_all(efx);
efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
channel = other_channel[i];
if (channel && channel->type->copy) {
efx_fini_napi_channel(channel);
- efx_remove_channel(channel);
+ efx_siena_remove_channel(channel);
kfree(channel);
}
}
rc = rc ? rc : rc2;
netif_err(efx, drv, efx->net_dev,
"unable to restart interrupts on channel reallocation\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
} else {
- efx_start_all(efx);
+ efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx);
}
return rc;
goto out;
}
-int efx_set_channels(struct efx_nic *efx)
+int efx_siena_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
* START/STOP
*************/
-int efx_soft_enable_interrupts(struct efx_nic *efx)
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel, *end_channel;
int rc;
if (rc)
goto fail;
}
- efx_start_eventq(channel);
+ efx_siena_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
efx_for_each_channel(channel, efx) {
if (channel == end_channel)
break;
- efx_stop_eventq(channel);
+ efx_siena_stop_eventq(channel);
if (!channel->type->keep_eventq)
efx_fini_eventq(channel);
}
return rc;
}
-void efx_soft_disable_interrupts(struct efx_nic *efx)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
if (channel->irq)
synchronize_irq(channel->irq);
- efx_stop_eventq(channel);
+ efx_siena_stop_eventq(channel);
if (!channel->type->keep_eventq)
efx_fini_eventq(channel);
}
efx_mcdi_flush_async(efx);
}
-int efx_enable_interrupts(struct efx_nic *efx)
+int efx_siena_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel, *end_channel;
int rc;
return rc;
}
-void efx_disable_interrupts(struct efx_nic *efx)
+void efx_siena_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
efx->type->irq_disable_non_ev(efx);
}
-void efx_start_channels(struct efx_nic *efx)
+void efx_siena_start_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
- efx_stop_eventq(channel);
+ efx_siena_stop_eventq(channel);
efx_fast_push_rx_descriptors(rx_queue, false);
- efx_start_eventq(channel);
+ efx_siena_start_eventq(channel);
}
WARN_ON(channel->rx_pkt_n_frags);
}
}
-void efx_stop_channels(struct efx_nic *efx)
+void efx_siena_stop_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
* temporarily.
*/
if (efx_channel_has_rx_queue(channel)) {
- efx_stop_eventq(channel);
- efx_start_eventq(channel);
+ efx_siena_stop_eventq(channel);
+ efx_siena_start_eventq(channel);
}
}
return spent;
}
-void efx_init_napi_channel(struct efx_channel *channel)
+static void efx_init_napi_channel(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
napi_weight);
}
-void efx_init_napi(struct efx_nic *efx)
+void efx_siena_init_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_init_napi_channel(channel);
}
-void efx_fini_napi_channel(struct efx_channel *channel)
+static void efx_fini_napi_channel(struct efx_channel *channel)
{
if (channel->napi_dev)
netif_napi_del(&channel->napi_str);
channel->napi_dev = NULL;
}
-void efx_fini_napi(struct efx_nic *efx)
+void efx_siena_fini_napi(struct efx_nic *efx)
{
struct efx_channel *channel;
return 0;
}
-void efx_channel_dummy_op_void(struct efx_channel *channel)
+void efx_siena_channel_dummy_op_void(struct efx_channel *channel)
{
}
static const struct efx_channel_type efx_default_channel_type = {
.pre_probe = efx_channel_dummy_op_int,
- .post_remove = efx_channel_dummy_op_void,
+ .post_remove = efx_siena_channel_dummy_op_void,
.get_name = efx_get_channel_name,
.copy = efx_copy_channel,
.want_txqs = efx_default_channel_want_txqs,
#ifndef EFX_CHANNELS_H
#define EFX_CHANNELS_H
-extern unsigned int efx_interrupt_mode;
-extern unsigned int rss_cpus;
-
-int efx_probe_interrupts(struct efx_nic *efx);
-void efx_remove_interrupts(struct efx_nic *efx);
-int efx_soft_enable_interrupts(struct efx_nic *efx);
-void efx_soft_disable_interrupts(struct efx_nic *efx);
-int efx_enable_interrupts(struct efx_nic *efx);
-void efx_disable_interrupts(struct efx_nic *efx);
-
-void efx_set_interrupt_affinity(struct efx_nic *efx);
-void efx_clear_interrupt_affinity(struct efx_nic *efx);
-
-int efx_probe_eventq(struct efx_channel *channel);
-int efx_init_eventq(struct efx_channel *channel);
-void efx_start_eventq(struct efx_channel *channel);
-void efx_stop_eventq(struct efx_channel *channel);
-void efx_fini_eventq(struct efx_channel *channel);
-void efx_remove_eventq(struct efx_channel *channel);
-
-int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
-void efx_set_channel_names(struct efx_nic *efx);
-int efx_init_channels(struct efx_nic *efx);
-int efx_probe_channels(struct efx_nic *efx);
-int efx_set_channels(struct efx_nic *efx);
-void efx_remove_channel(struct efx_channel *channel);
-void efx_remove_channels(struct efx_nic *efx);
-void efx_fini_channels(struct efx_nic *efx);
-void efx_start_channels(struct efx_nic *efx);
-void efx_stop_channels(struct efx_nic *efx);
-
-void efx_init_napi_channel(struct efx_channel *channel);
-void efx_init_napi(struct efx_nic *efx);
-void efx_fini_napi_channel(struct efx_channel *channel);
-void efx_fini_napi(struct efx_nic *efx);
-
-void efx_channel_dummy_op_void(struct efx_channel *channel);
+extern unsigned int efx_siena_interrupt_mode;
+extern unsigned int efx_siena_rss_cpus;
+
+int efx_siena_probe_interrupts(struct efx_nic *efx);
+void efx_siena_remove_interrupts(struct efx_nic *efx);
+int efx_siena_enable_interrupts(struct efx_nic *efx);
+void efx_siena_disable_interrupts(struct efx_nic *efx);
+
+void efx_siena_set_interrupt_affinity(struct efx_nic *efx);
+void efx_siena_clear_interrupt_affinity(struct efx_nic *efx);
+
+void efx_siena_start_eventq(struct efx_channel *channel);
+void efx_siena_stop_eventq(struct efx_channel *channel);
+
+int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
+ u32 txq_entries);
+void efx_siena_set_channel_names(struct efx_nic *efx);
+int efx_siena_init_channels(struct efx_nic *efx);
+int efx_siena_probe_channels(struct efx_nic *efx);
+int efx_siena_set_channels(struct efx_nic *efx);
+void efx_siena_remove_channel(struct efx_channel *channel);
+void efx_siena_remove_channels(struct efx_nic *efx);
+void efx_siena_fini_channels(struct efx_nic *efx);
+void efx_siena_start_channels(struct efx_nic *efx);
+void efx_siena_stop_channels(struct efx_nic *efx);
+
+void efx_siena_init_napi(struct efx_nic *efx);
+void efx_siena_fini_napi(struct efx_nic *efx);
+
+void efx_siena_channel_dummy_op_void(struct efx_channel *channel);
#endif
*/
static struct workqueue_struct *reset_workqueue;
-int efx_create_reset_workqueue(void)
+int efx_siena_create_reset_workqueue(void)
{
reset_workqueue = create_singlethread_workqueue("sfc_reset");
if (!reset_workqueue) {
return 0;
}
-void efx_queue_reset_work(struct efx_nic *efx)
+void efx_siena_queue_reset_work(struct efx_nic *efx)
{
queue_work(reset_workqueue, &efx->reset_work);
}
-void efx_flush_reset_workqueue(struct efx_nic *efx)
+void efx_siena_flush_reset_workqueue(struct efx_nic *efx)
{
cancel_work_sync(&efx->reset_work);
}
-void efx_destroy_reset_workqueue(void)
+void efx_siena_destroy_reset_workqueue(void)
{
if (reset_workqueue) {
destroy_workqueue(reset_workqueue);
/* We assume that efx->type->reconfigure_mac will always try to sync RX
* filters and therefore needs to read-lock the filter table against freeing
*/
-void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
+void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
{
if (efx->type->reconfigure_mac) {
down_read(&efx->filter_sem);
mutex_lock(&efx->mac_lock);
if (efx->port_enabled)
- efx_mac_reconfigure(efx, false);
+ efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock);
}
-int efx_set_mac_address(struct net_device *net_dev, void *data)
+int efx_siena_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
/* Reconfigure the MAC */
mutex_lock(&efx->mac_lock);
- efx_mac_reconfigure(efx, false);
+ efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock);
return 0;
}
/* Context: netif_addr_lock held, BHs disabled. */
-void efx_set_rx_mode(struct net_device *net_dev)
+void efx_siena_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
/* Otherwise efx_start_port() will do this */
}
-int efx_set_features(struct net_device *net_dev, netdev_features_t data)
+int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
*/
if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_RXFCS)) {
- /* efx_set_rx_mode() will schedule MAC work to update filters
+ /* efx_siena_set_rx_mode() will schedule MAC work to update filters
* when a new features are finally set in net_dev.
*/
- efx_set_rx_mode(net_dev);
+ efx_siena_set_rx_mode(net_dev);
}
return 0;
* netif_carrier_on/off) of the link status, and also maintains the
* link status's stop on the port's TX queue.
*/
-void efx_link_status_changed(struct efx_nic *efx)
+void efx_siena_link_status_changed(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
netif_info(efx, link, efx->net_dev, "link down\n");
}
-unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx)
{
/* The maximum MTU that we can fit in a single page, allowing for
* framing, overhead and XDP headroom + tailroom.
}
/* Context: process, rtnl_lock() held. */
-int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
{
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
return rc;
if (rtnl_dereference(efx->xdp_prog) &&
- new_mtu > efx_xdp_max_mtu(efx)) {
+ new_mtu > efx_siena_xdp_max_mtu(efx)) {
netif_err(efx, drv, efx->net_dev,
"Requested MTU of %d too big for XDP (max: %d)\n",
- new_mtu, efx_xdp_max_mtu(efx));
+ new_mtu, efx_siena_xdp_max_mtu(efx));
return -EINVAL;
}
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
- efx_stop_all(efx);
+ efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu;
- efx_mac_reconfigure(efx, true);
+ efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
- efx_start_all(efx);
+ efx_siena_start_all(efx);
efx_device_attach_if_not_resetting(efx);
return 0;
}
mutex_unlock(&efx->mac_lock);
}
- efx_start_monitor(efx);
+ efx_siena_start_monitor(efx);
}
-void efx_start_monitor(struct efx_nic *efx)
+void efx_siena_start_monitor(struct efx_nic *efx)
{
if (efx->type->monitor)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */
- efx_start_channels(efx);
+ efx_siena_start_channels(efx);
efx_ptp_start_datapath(efx);
efx_ptp_stop_datapath(efx);
- efx_stop_channels(efx);
+ efx_siena_stop_channels(efx);
}
/**************************************************************************
/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
* force the Autoneg bit on.
*/
-void efx_link_clear_advertising(struct efx_nic *efx)
+void efx_siena_link_clear_advertising(struct efx_nic *efx)
{
bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
}
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
{
efx->wanted_fc = wanted_fc;
if (efx->link_advertising[0]) {
efx->port_enabled = true;
/* Ensure MAC ingress/egress is enabled */
- efx_mac_reconfigure(efx, false);
+ efx_siena_mac_reconfigure(efx, false);
mutex_unlock(&efx->mac_lock);
}
* is safe to call multiple times, so long as the NIC is not disabled.
* Requires the RTNL lock.
*/
-void efx_start_all(struct efx_nic *efx)
+void efx_siena_start_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->state == STATE_DISABLED);
efx_start_datapath(efx);
/* Start the hardware monitor if there is one */
- efx_start_monitor(efx);
+ efx_siena_start_monitor(efx);
/* Link state detection is normally event-driven; we have
* to poll now because we could have missed a change
*/
mutex_lock(&efx->mac_lock);
if (efx_mcdi_phy_poll(efx))
- efx_link_status_changed(efx);
+ efx_siena_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
if (efx->type->start_stats) {
* times with the NIC in almost any state, but interrupts should be
* enabled. Requires the RTNL lock.
*/
-void efx_stop_all(struct efx_nic *efx)
+void efx_siena_stop_all(struct efx_nic *efx)
{
EFX_ASSERT_RESET_SERIALISED(efx);
}
/* Context: process, dev_base_lock or RTNL held, non-blocking. */
-void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+void efx_siena_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
*
* Callers must hold the mac_lock
*/
-int __efx_reconfigure_port(struct efx_nic *efx)
+int __efx_siena_reconfigure_port(struct efx_nic *efx)
{
enum efx_phy_mode phy_mode;
int rc = 0;
/* Reinitialise the MAC to pick up new PHY settings, even if the port is
* disabled.
*/
-int efx_reconfigure_port(struct efx_nic *efx)
+int efx_siena_reconfigure_port(struct efx_nic *efx)
{
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
mutex_lock(&efx->mac_lock);
- rc = __efx_reconfigure_port(efx);
+ rc = __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
return rc;
* Returns 0 if the recovery mechanisms are unsuccessful.
* Returns a non-zero value otherwise.
*/
-int efx_try_recovery(struct efx_nic *efx)
+int efx_siena_try_recovery(struct efx_nic *efx)
{
#ifdef CONFIG_EEH
/* A PCI error can occur and not be seen by EEH because nothing
/* Tears down the entire software state and most of the hardware state
* before reset.
*/
-void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
if (method == RESET_TYPE_MCDI_TIMEOUT)
efx->type->prepare_flr(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
}
/* Context: netif_tx_lock held, BHs disabled. */
-void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
+void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue)
{
struct efx_nic *efx = netdev_priv(net_dev);
"TX stuck with port_enabled=%d: resetting channels\n",
efx->port_enabled);
- efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+ efx_siena_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
}
/* This function will always ensure that the locks acquired in
- * efx_reset_down() are released. A failure return code indicates
+ * efx_siena_reset_down() are released. A failure return code indicates
* that we were unable to reinitialise the hardware, and the
* driver should be disabled. If ok is false, then the rx and tx
* engines are not restarted, pending a RESET_DISABLE.
*/
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
{
int rc;
"could not restore PHY settings\n");
}
- rc = efx_enable_interrupts(efx);
+ rc = efx_siena_enable_interrupts(efx);
if (rc)
goto fail;
mutex_unlock(&efx->mac_lock);
- efx_start_all(efx);
+ efx_siena_start_all(efx);
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
*
* Caller must hold the rtnl_lock.
*/
-int efx_reset(struct efx_nic *efx, enum reset_type method)
+int efx_siena_reset(struct efx_nic *efx, enum reset_type method)
{
int rc, rc2 = 0;
bool disabled;
RESET_TYPE(method));
efx_device_detach_sync(efx);
- /* efx_reset_down() grabs locks that prevent recovery on EF100.
+ /* efx_siena_reset_down() grabs locks that prevent recovery on EF100.
* EF100 reset is handled in the efx_nic_type callback below.
*/
if (efx_nic_rev(efx) != EFX_REV_EF100)
- efx_reset_down(efx, method);
+ efx_siena_reset_down(efx, method);
rc = efx->type->reset(efx, method);
if (rc) {
method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE;
if (efx_nic_rev(efx) != EFX_REV_EF100)
- rc2 = efx_reset_up(efx, method, !disabled);
+ rc2 = efx_siena_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
if (!rc)
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
- efx_try_recovery(efx))
+ efx_siena_try_recovery(efx))
return;
if (!pending)
rtnl_lock();
- /* We checked the state in efx_schedule_reset() but it may
+ /* We checked the state in efx_siena_schedule_reset() but it may
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
if (efx->state == STATE_READY)
- (void)efx_reset(efx, method);
+ (void)efx_siena_reset(efx, method);
rtnl_unlock();
}
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
*/
efx_mcdi_mode_poll(efx);
- efx_queue_reset_work(efx);
+ efx_siena_queue_reset_work(efx);
}
/**************************************************************************
* before use
*
**************************************************************************/
-int efx_port_dummy_op_int(struct efx_nic *efx)
+int efx_siena_port_dummy_op_int(struct efx_nic *efx)
{
return 0;
}
-void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+void efx_siena_port_dummy_op_void(struct efx_nic *efx) {}
/**************************************************************************
*
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-int efx_init_struct(struct efx_nic *efx,
- struct pci_dev *pci_dev, struct net_device *net_dev)
+int efx_siena_init_struct(struct efx_nic *efx,
+ struct pci_dev *pci_dev, struct net_device *net_dev)
{
int rc = -ENOMEM;
efx->mem_bar = UINT_MAX;
- rc = efx_init_channels(efx);
+ rc = efx_siena_init_channels(efx);
if (rc)
goto fail;
return 0;
fail:
- efx_fini_struct(efx);
+ efx_siena_fini_struct(efx);
return rc;
}
-void efx_fini_struct(struct efx_nic *efx)
+void efx_siena_fini_struct(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_hash_table);
#endif
- efx_fini_channels(efx);
+ efx_siena_fini_channels(efx);
kfree(efx->vpd_sn);
}
/* This configures the PCI device to enable I/O and DMA. */
-int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
- unsigned int mem_map_size)
+int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size)
{
struct pci_dev *pci_dev = efx->pci_dev;
int rc;
return rc;
}
-void efx_fini_io(struct efx_nic *efx)
+void efx_siena_fini_io(struct efx_nic *efx)
{
netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
static DEVICE_ATTR_RW(mcdi_logging);
-void efx_init_mcdi_logging(struct efx_nic *efx)
+void efx_siena_init_mcdi_logging(struct efx_nic *efx)
{
int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
}
}
-void efx_fini_mcdi_logging(struct efx_nic *efx)
+void efx_siena_fini_mcdi_logging(struct efx_nic *efx)
{
device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
}
efx_device_detach_sync(efx);
- efx_stop_all(efx);
- efx_disable_interrupts(efx);
+ efx_siena_stop_all(efx);
+ efx_siena_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
if (efx->state == STATE_DISABLED)
goto out;
- rc = efx_reset(efx, RESET_TYPE_ALL);
+ rc = efx_siena_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, hw, efx->net_dev,
- "efx_reset failed after PCI error (%d)\n", rc);
+ "efx_siena_reset failed after PCI error (%d)\n", rc);
} else {
efx->state = STATE_READY;
netif_dbg(efx, hw, efx->net_dev,
* with our request for slot reset the mmio_enabled callback will never be
* called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
-const struct pci_error_handlers efx_err_handlers = {
+const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected,
.slot_reset = efx_io_slot_reset,
.resume = efx_io_resume,
}
}
-netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
- netdev_features_t features)
+netdev_features_t efx_siena_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
struct efx_nic *efx = netdev_priv(dev);
return features;
}
-int efx_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid)
+int efx_siena_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
{
struct efx_nic *efx = netdev_priv(net_dev);
return -EOPNOTSUPP;
}
-int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
+int efx_siena_get_phys_port_name(struct net_device *net_dev,
+ char *name, size_t len)
{
struct efx_nic *efx = netdev_priv(net_dev);
#ifndef EFX_COMMON_H
#define EFX_COMMON_H
-int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
- unsigned int mem_map_size);
-void efx_fini_io(struct efx_nic *efx);
-int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
- struct net_device *net_dev);
-void efx_fini_struct(struct efx_nic *efx);
+int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+ unsigned int mem_map_size);
+void efx_siena_fini_io(struct efx_nic *efx);
+int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+ struct net_device *net_dev);
+void efx_siena_fini_struct(struct efx_nic *efx);
#define EFX_MAX_DMAQ_SIZE 4096UL
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
-void efx_link_clear_advertising(struct efx_nic *efx);
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_siena_link_clear_advertising(struct efx_nic *efx);
+void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc);
-void efx_start_all(struct efx_nic *efx);
-void efx_stop_all(struct efx_nic *efx);
+void efx_siena_start_all(struct efx_nic *efx);
+void efx_siena_stop_all(struct efx_nic *efx);
-void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
+void efx_siena_net_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats);
-int efx_create_reset_workqueue(void);
-void efx_queue_reset_work(struct efx_nic *efx);
-void efx_flush_reset_workqueue(struct efx_nic *efx);
-void efx_destroy_reset_workqueue(void);
+int efx_siena_create_reset_workqueue(void);
+void efx_siena_queue_reset_work(struct efx_nic *efx);
+void efx_siena_flush_reset_workqueue(struct efx_nic *efx);
+void efx_siena_destroy_reset_workqueue(void);
-void efx_start_monitor(struct efx_nic *efx);
+void efx_siena_start_monitor(struct efx_nic *efx);
-int __efx_reconfigure_port(struct efx_nic *efx);
-int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_siena_reconfigure_port(struct efx_nic *efx);
+int efx_siena_reconfigure_port(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
ASSERT_RTNL(); \
} while (0)
-int efx_try_recovery(struct efx_nic *efx);
-void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-void efx_watchdog(struct net_device *net_dev, unsigned int txqueue);
-int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-int efx_reset(struct efx_nic *efx, enum reset_type method);
-void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_siena_try_recovery(struct efx_nic *efx);
+void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method);
+void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue);
+int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_siena_reset(struct efx_nic *efx, enum reset_type method);
+void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type);
/* Dummy PHY ops for PHY drivers */
-int efx_port_dummy_op_int(struct efx_nic *efx);
-void efx_port_dummy_op_void(struct efx_nic *efx);
+int efx_siena_port_dummy_op_int(struct efx_nic *efx);
+void efx_siena_port_dummy_op_void(struct efx_nic *efx);
static inline int efx_check_disabled(struct efx_nic *efx)
{
}
#ifdef CONFIG_SFC_MCDI_LOGGING
-void efx_init_mcdi_logging(struct efx_nic *efx);
-void efx_fini_mcdi_logging(struct efx_nic *efx);
+void efx_siena_init_mcdi_logging(struct efx_nic *efx);
+void efx_siena_fini_mcdi_logging(struct efx_nic *efx);
#else
-static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
-static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {}
#endif
-void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
-int efx_set_mac_address(struct net_device *net_dev, void *data);
-void efx_set_rx_mode(struct net_device *net_dev);
-int efx_set_features(struct net_device *net_dev, netdev_features_t data);
-void efx_link_status_changed(struct efx_nic *efx);
-unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
-int efx_change_mtu(struct net_device *net_dev, int new_mtu);
+void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
+int efx_siena_set_mac_address(struct net_device *net_dev, void *data);
+void efx_siena_set_rx_mode(struct net_device *net_dev);
+int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data);
+void efx_siena_link_status_changed(struct efx_nic *efx);
+unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx);
+int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu);
-extern const struct pci_error_handlers efx_err_handlers;
+extern const struct pci_error_handlers efx_siena_err_handlers;
-netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
- netdev_features_t features);
+netdev_features_t efx_siena_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
-int efx_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid);
+int efx_siena_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
-int efx_get_phys_port_name(struct net_device *net_dev,
- char *name, size_t len);
+int efx_siena_get_phys_port_name(struct net_device *net_dev,
+ char *name, size_t len);
#endif
*
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
- * other valuesspecify reasons, which efx_schedule_reset() will choose
+ * other valuesspecify reasons, which efx_siena_schedule_reset() will choose
* a method for.
*
* Reset methods are numbered in order of increasing scope.
unsigned int tx_usecs, rx_usecs;
bool rx_adaptive;
- efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+ efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
coalesce->tx_coalesce_usecs = tx_usecs;
coalesce->tx_coalesce_usecs_irq = tx_usecs;
bool adaptive, rx_may_override_tx;
int rc;
- efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+ efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
if (coalesce->rx_coalesce_usecs != rx_usecs)
rx_usecs = coalesce->rx_coalesce_usecs;
else
tx_usecs = coalesce->tx_coalesce_usecs_irq;
- rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
- rx_may_override_tx);
+ rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+ rx_may_override_tx);
if (rc != 0)
return rc;
"increasing TX queue size to minimum of %u\n",
txq_entries);
- return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+ return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries);
}
static void efx_ethtool_get_wol(struct net_device *net_dev,
return 0;
}
-const struct ethtool_ops efx_ethtool_ops = {
+const struct ethtool_ops efx_siena_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
old_adv = efx->link_advertising[0];
old_fc = efx->wanted_fc;
- efx_link_set_wanted_fc(efx, wanted_fc);
+ efx_siena_link_set_wanted_fc(efx, wanted_fc);
if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
rc = efx_mcdi_port_reconfigure(efx);
/* Reconfigure the MAC. The PHY *may* generate a link state change event
* if the user just changed the advertised capabilities, but there's no
* harm doing this twice */
- efx_mac_reconfigure(efx, false);
+ efx_siena_mac_reconfigure(efx, false);
out:
mutex_unlock(&efx->mac_lock);
if (rc < 0)
return rc;
- return efx_reset(efx, rc);
+ return efx_siena_reset(efx, rc);
}
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
* completion events. This means that efx->rxq_flush_outstanding remained at 4
* after the FLR; also, efx->active_queues was non-zero (as no flush completion
* events were received, and we didn't go through efx_check_tx_flush_complete())
- * If we don't fix this up, on the next call to efx_realloc_channels() we won't
- * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
- * for batched flush requests; and the efx->active_queues gets messed up because
- * we keep incrementing for the newly initialised queues, but it never went to
- * zero previously. Then we get a timeout every time we try to restart the
- * queues, as it doesn't go back to zero when we should be flushing the queues.
+ * If we don't fix this up, on the next call to efx_siena_realloc_channels() we
+ * won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
+ * of 4 for batched flush requests; and the efx->active_queues gets messed up
+ * because we keep incrementing for the newly initialised queues, but it never
+ * went to zero previously. Then we get a timeout every time we try to restart
+ * the queues, as it doesn't go back to zero when we should be flushing the
+ * queues.
*/
void efx_farch_finish_flr(struct efx_nic *efx)
{
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = channel->tx_queue +
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return false;
}
/* Discard all pending fragments */
if (rx_queue->scatter_n) {
- efx_rx_packet(
+ efx_siena_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
/* Discard new fragment if not SOP */
if (!rx_ev_sop) {
- efx_rx_packet(
+ efx_siena_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ efx_siena_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
}
#ifdef CONFIG_SFC_SRIOV
else
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
}
#ifdef CONFIG_SFC_SRIOV
else
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
* code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones.
*/
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
cmd, -rc);
if (efx->type->mcdi_reboot_detected)
efx->type->mcdi_reboot_detected(efx);
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else if (proxy_handle && (rc == -EPROTO) &&
efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
proxy_handle)) {
cmd, rc);
if (rc == -EINTR || rc == -EIO)
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
efx_mcdi_release(mcdi);
}
}
mcdi->new_epoch = true;
/* Nobody was waiting for an MCDI request, so trigger a reset */
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
}
spin_unlock(&mcdi->iface_lock);
}
}
mcdi->new_epoch = true;
- efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST);
spin_unlock(&mcdi->iface_lock);
}
if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
return; /* it had already been done */
netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
- efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
+ efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
}
static void efx_handle_drain_event(struct efx_nic *efx)
"%s DMA error (event: "EFX_QWORD_FMT")\n",
code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
EFX_QWORD_VAL(*event));
- efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
break;
case MCDI_EVENT_CODE_PROXY_RESPONSE:
efx_mcdi_ev_proxy_response(efx,
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
efx->wanted_fc |= EFX_FC_AUTO;
- efx_link_set_wanted_fc(efx, efx->wanted_fc);
+ efx_siena_link_set_wanted_fc(efx, efx->wanted_fc);
return 0;
efx_link_set_advertising(efx, cmd->link_modes.advertising);
phy_cfg->forced_cap = 0;
} else {
- efx_link_clear_advertising(efx);
+ efx_siena_link_clear_advertising(efx);
phy_cfg->forced_cap = caps;
}
return 0;
efx_mcdi_phy_check_fcntl(efx, lpa);
- efx_link_status_changed(efx);
+ efx_siena_link_status_changed(efx);
}
part->name, part->dev_type_name, rc);
}
-static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
+static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part)
{
int rc;
list_del(&part->node);
}
-int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
- size_t n_parts, size_t sizeof_part)
+int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
size_t i;
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
- /* Add to list in order - efx_mtd_remove() depends on this */
+ /* Add to list in order - efx_siena_mtd_remove() depends on this */
list_add_tail(&part->node, &efx->mtd_list);
}
while (i--) {
part = (struct efx_mtd_partition *)((char *)parts +
i * sizeof_part);
- efx_mtd_remove_partition(part);
+ efx_siena_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
return -ENOMEM;
}
-void efx_mtd_remove(struct efx_nic *efx)
+void efx_siena_mtd_remove(struct efx_nic *efx)
{
struct efx_mtd_partition *parts, *part, *next;
node);
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
- efx_mtd_remove_partition(part);
+ efx_siena_mtd_remove_partition(part);
kfree(parts);
}
-void efx_mtd_rename(struct efx_nic *efx)
+void efx_siena_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
* @piobuf: PIO buffer region for this TX queue (shared with its partner).
- * Size of the region is efx_piobuf_size.
* @piobuf_offset: Buffer offset to be specified in PIO descriptors
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
* @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
* @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
- * __efx_rx_packet(), or zero if there is none
+ * __efx_siena_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
- * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * by __efx_siena_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
* @nic_data: Hardware dependent state
* @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
- * efx_monitor() and efx_reconfigure_port()
+ * efx_monitor() and efx_siena_reconfigure_port()
* @port_enabled: Port enabled indicator.
- * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
- * efx_mac_work() with kernel interfaces. Safe to read under any
- * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
- * be held to modify it.
+ * Serialises efx_siena_stop_all(), efx_siena_start_all(),
+ * efx_monitor() and efx_mac_work() with kernel interfaces.
+ * Safe to read under any one of the rtnl_lock, mac_lock, or netif_tx_lock,
+ * but all three must be held to modify it.
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
* @fixed_features: Features which cannot be turned off
* This must check whether the specified table entry is used by RFS
* and that rps_may_expire_flow() returns true for it.
* @mtd_probe: Probe and add MTD partitions associated with this net device,
- * using efx_mtd_add()
+ * using efx_siena_mtd_add()
* @mtd_rename: Set an MTD partition name using the net device name
* @mtd_read: Read from an MTD partition
* @mtd_erase: Erase part of an MTD partition
return skb;
}
-void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int n_frags, unsigned int len, u16 flags)
+void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+ unsigned int n_frags, unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
case XDP_TX:
/* Buffer ownership passes to tx on success. */
xdpf = xdp_convert_buff_to_frame(&xdp);
- err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+ err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
}
/* Handle a received packet. Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel)
+void __efx_siena_rx_packet(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf =
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
- efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
+ efx_siena_rx_packet_gro(channel, rx_buf,
+ channel->rx_pkt_n_frags, eh, 0);
else
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
out:
* regardless of checksum state and skbs with a good checksum.
*/
void
-efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
- unsigned int n_frags, u8 *eh, __wsum csum)
+efx_siena_rx_packet_gro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh, __wsum csum)
{
struct napi_struct *napi = &channel->napi_str;
struct efx_nic *efx = channel->efx;
return;
}
- if (efx->net_dev->features & NETIF_F_RXHASH &&
- efx_rx_buf_hash_valid(efx, eh))
+ if (efx->net_dev->features & NETIF_F_RXHASH)
skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
PKT_HASH_TYPE_L3);
if (csum) {
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
void
-efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
- unsigned int n_frags, u8 *eh, __wsum csum);
+efx_siena_rx_packet_gro(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh, __wsum csum);
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
"Hello world! This is an Efx loopback test in progress!";
/* Interrupt mode names */
-static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
-static const char *const efx_interrupt_mode_names[] = {
+static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
+static const char *const efx_siena_interrupt_mode_names[] = {
[EFX_INT_MODE_MSIX] = "MSI-X",
[EFX_INT_MODE_MSI] = "MSI",
[EFX_INT_MODE_LEGACY] = "legacy",
};
#define INT_MODE(efx) \
- STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
+ STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
/**
* struct efx_loopback_state - persistent state during a loopback selftest
schedule_timeout_uninterruptible(wait);
efx_for_each_channel(channel, efx) {
- efx_stop_eventq(channel);
+ efx_siena_stop_eventq(channel);
if (channel->eventq_read_ptr !=
read_ptr[channel->channel]) {
set_bit(channel->channel, &napi_ran);
if (efx_nic_event_test_irq_cpu(channel) >= 0)
clear_bit(channel->channel, &int_pend);
}
- efx_start_eventq(channel);
+ efx_siena_start_eventq(channel);
}
wait *= 2;
state->flush = true;
mutex_lock(&efx->mac_lock);
efx->loopback_mode = mode;
- rc = __efx_reconfigure_port(efx);
+ rc = __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
if (rc) {
netif_err(efx, drv, efx->net_dev,
if (rc_reset) {
netif_err(efx, hw, efx->net_dev,
"Unable to recover from chip test\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
- __efx_reconfigure_port(efx);
+ __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags);
mutex_lock(&efx->mac_lock);
efx->phy_mode = phy_mode;
efx->loopback_mode = loopback_mode;
- __efx_reconfigure_port(efx);
+ __efx_siena_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
efx_device_attach_if_not_resetting(efx);
if (channel->irq_moderation_us) {
unsigned int ticks;
- ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
+ ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us);
EFX_POPULATE_DWORD_2(timer_cmd,
FRF_CZ_TC_TIMER_MODE,
FFE_CZ_TIMER_MODE_INT_HLDOFF,
enum reset_type reset_method = RESET_TYPE_ALL;
int rc, rc2;
- efx_reset_down(efx, reset_method);
+ efx_siena_reset_down(efx, reset_method);
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
rc = efx_mcdi_reset(efx, reset_method);
out:
- rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ rc2 = efx_siena_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
}
efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
stats[SIENA_STAT_rx_bytes] -
stats[SIENA_STAT_rx_bad_bytes]);
- efx_update_sw_stats(efx, stats);
+ efx_siena_update_sw_stats(efx, stats);
return 0;
}
if (rc)
goto fail;
- rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
fail:
if (rc)
kfree(parts);
.remove = siena_remove_nic,
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = efx_siena_port_dummy_op_void,
#ifdef CONFIG_EEH
.monitor = siena_monitor,
#else
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = efx_siena_prepare_flush,
.finish_flush = siena_finish_flush,
- .prepare_flr = efx_port_dummy_op_void,
+ .prepare_flr = efx_siena_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
- .tx_enqueue = __efx_enqueue_skb,
+ .tx_enqueue = __efx_siena_enqueue_skb,
.rx_push_rss_config = siena_rx_push_rss_config,
.rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
- .rx_packet = __efx_rx_packet,
+ .rx_packet = __efx_siena_rx_packet,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
.sriov_get_vf_config = efx_siena_sriov_get_vf_config,
- .vswitching_probe = efx_port_dummy_op_int,
- .vswitching_restore = efx_port_dummy_op_int,
- .vswitching_remove = efx_port_dummy_op_void,
+ .vswitching_probe = efx_siena_port_dummy_op_int,
+ .vswitching_restore = efx_siena_port_dummy_op_int,
+ .vswitching_remove = efx_siena_port_dummy_op_void,
.set_mac_address = efx_siena_sriov_mac_address_changed,
#endif
static const struct efx_channel_type efx_siena_sriov_channel_type = {
.handle_no_channel = efx_siena_sriov_handle_no_channel,
.pre_probe = efx_siena_sriov_probe_channel,
- .post_remove = efx_channel_dummy_op_void,
+ .post_remove = efx_siena_channel_dummy_op_void,
.get_name = efx_siena_sriov_get_channel_name,
/* no copy operation; channel must not be reallocated */
.keep_eventq = true,
* If any DMA mapping fails, any mapped fragments will be unmapped,
* the queue's insert pointer will be restored to its original value.
*
- * This function is split out from efx_hard_start_xmit to allow the
+ * This function is split out from efx_siena_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues.
*
* Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function.
*/
-netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
bool xmit_more = netdev_xmit_more();
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
* (for XDP redirect).
*/
-int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
- bool flush)
+int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+ bool flush)
{
struct efx_tx_buffer *tx_buffer;
struct efx_tx_queue *tx_queue;
* Context: non-blocking.
* Should always return NETDEV_TX_OK and consume the skb.
*/
-netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
- struct net_device *net_dev)
+netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
return NETDEV_TX_OK;
}
- return __efx_enqueue_skb(tx_queue, skb);
+ return __efx_siena_enqueue_skb(tx_queue, skb);
}
-void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
-{
- unsigned int pkts_compl = 0, bytes_compl = 0;
- unsigned int read_ptr;
- bool finished = false;
-
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
-
- while (!finished) {
- struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
-
- if (!efx_tx_buffer_in_use(buffer)) {
- struct efx_nic *efx = tx_queue->efx;
-
- netif_err(efx, hw, efx->net_dev,
- "TX queue %d spurious single TX completion\n",
- tx_queue->queue);
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
- return;
- }
-
- /* Need to check the flag before dequeueing. */
- if (buffer->flags & EFX_TX_BUF_SKB)
- finished = true;
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
- }
-
- tx_queue->pkts_compl += pkts_compl;
- tx_queue->bytes_compl += bytes_compl;
-
- EFX_WARN_ON_PARANOID(pkts_compl != 1);
-
- efx_xmit_done_check_empty(tx_queue);
-}
-
-void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
- /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->channel->channel +
efx->n_tx_channels : 0));
}
-int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
- void *type_data)
+int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+ void *type_data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data;
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %d\n",
tx_queue->queue, read_ptr);
- efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+ efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP);
return;
}
}
}
-void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
{
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
}
}
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx;
netif_tx_wake_queue(tx_queue->core_txq);
}
- efx_xmit_done_check_empty(tx_queue);
+ efx_siena_xmit_done_check_empty(tx_queue);
}
/* Remove buffers put into a tx_queue for the current packet.
return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
}
-void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
-void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
+void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count);