1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
7 /* File aq_nic.c: Definition of common code for NIC. */
13 #include "aq_pci_func.h"
16 #include <linux/moduleparam.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/timer.h>
20 #include <linux/cpu.h>
22 #include <linux/tcp.h>
25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26 module_param_named(aq_itr, aq_itr, uint, 0644);
27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
29 static unsigned int aq_itr_tx;
30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
33 static unsigned int aq_itr_rx;
34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
61 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
62 void aq_nic_cfg_start(struct aq_nic_s *self)
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
66 cfg->tcs = AQ_CFG_TCS_DEF;
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
92 cfg->vecs = min(cfg->vecs, num_online_cpus());
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
95 /* cfg->vecs should be power of 2 for RSS */
98 else if (cfg->vecs >= 4U)
100 else if (cfg->vecs >= 2U)
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
107 aq_nic_rss_init(self, cfg->num_rss_queues);
109 cfg->irq_type = aq_pci_func_get_irq_type(self);
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
112 (cfg->aq_hw_caps->vecs == 1U) ||
118 /* Check if we have enough vectors allocated for
119 * link status IRQ. If no - we'll know link state from
120 * slower service task.
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
125 cfg->link_irq_vec = 0;
127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
128 cfg->features = cfg->aq_hw_caps->hw_features;
129 cfg->is_vlan_force_promisc = true;
132 static int aq_nic_update_link_status(struct aq_nic_s *self)
134 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
140 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
141 pr_info("%s: link change old %d new %d\n",
142 AQ_CFG_DRV_NAME, self->link_status.mbps,
143 self->aq_hw->aq_link_status.mbps);
144 aq_nic_update_interrupt_moderation_settings(self);
146 /* Driver has to update flow control settings on RX block
148 * We should query FW whether it negotiated FC.
150 if (self->aq_fw_ops->get_flow_control)
151 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
152 if (self->aq_hw_ops->hw_set_fc)
153 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
156 self->link_status = self->aq_hw->aq_link_status;
157 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
158 aq_utils_obj_set(&self->flags,
159 AQ_NIC_FLAG_STARTED);
160 aq_utils_obj_clear(&self->flags,
162 netif_carrier_on(self->ndev);
163 netif_tx_wake_all_queues(self->ndev);
165 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
166 netif_carrier_off(self->ndev);
167 netif_tx_disable(self->ndev);
168 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
173 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
175 struct aq_nic_s *self = private;
180 aq_nic_update_link_status(self);
182 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
183 BIT(self->aq_nic_cfg.link_irq_vec));
187 static void aq_nic_service_task(struct work_struct *work)
189 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
193 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
196 err = aq_nic_update_link_status(self);
200 mutex_lock(&self->fwreq_mutex);
201 if (self->aq_fw_ops->update_stats)
202 self->aq_fw_ops->update_stats(self->aq_hw);
203 mutex_unlock(&self->fwreq_mutex);
205 aq_nic_update_ndev_stats(self);
208 static void aq_nic_service_timer_cb(struct timer_list *t)
210 struct aq_nic_s *self = from_timer(self, t, service_timer);
212 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
214 aq_ndev_schedule_work(&self->service_task);
217 static void aq_nic_polling_timer_cb(struct timer_list *t)
219 struct aq_nic_s *self = from_timer(self, t, polling_timer);
220 struct aq_vec_s *aq_vec = NULL;
223 for (i = 0U, aq_vec = self->aq_vec[0];
224 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
225 aq_vec_isr(i, (void *)aq_vec);
227 mod_timer(&self->polling_timer, jiffies +
228 AQ_CFG_POLLING_TIMER_INTERVAL);
231 int aq_nic_ndev_register(struct aq_nic_s *self)
240 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
244 mutex_lock(&self->fwreq_mutex);
245 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
246 self->ndev->dev_addr);
247 mutex_unlock(&self->fwreq_mutex);
251 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
253 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
255 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
259 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
261 self->aq_vec[self->aq_vecs] =
262 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
263 if (!self->aq_vec[self->aq_vecs]) {
269 netif_carrier_off(self->ndev);
271 netif_tx_disable(self->ndev);
273 err = register_netdev(self->ndev);
281 void aq_nic_ndev_init(struct aq_nic_s *self)
283 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
284 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
286 self->ndev->hw_features |= aq_hw_caps->hw_features;
287 self->ndev->features = aq_hw_caps->hw_features;
288 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
289 NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
290 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
291 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
293 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
294 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
298 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
299 struct aq_ring_s *ring)
301 self->aq_ring_tx[idx] = ring;
304 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
309 int aq_nic_init(struct aq_nic_s *self)
311 struct aq_vec_s *aq_vec = NULL;
315 self->power_state = AQ_HW_POWER_STATE_D0;
316 mutex_lock(&self->fwreq_mutex);
317 err = self->aq_hw_ops->hw_reset(self->aq_hw);
318 mutex_unlock(&self->fwreq_mutex);
322 err = self->aq_hw_ops->hw_init(self->aq_hw,
323 aq_nic_get_ndev(self)->dev_addr);
327 for (i = 0U, aq_vec = self->aq_vec[0];
328 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
329 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
331 netif_carrier_off(self->ndev);
337 int aq_nic_start(struct aq_nic_s *self)
339 struct aq_vec_s *aq_vec = NULL;
343 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
345 self->mc_list.count);
349 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
350 self->packet_filter);
354 for (i = 0U, aq_vec = self->aq_vec[0];
355 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
356 err = aq_vec_start(aq_vec);
361 err = self->aq_hw_ops->hw_start(self->aq_hw);
365 err = aq_nic_update_interrupt_moderation_settings(self);
369 INIT_WORK(&self->service_task, aq_nic_service_task);
371 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
372 aq_nic_service_timer_cb(&self->service_timer);
374 if (self->aq_nic_cfg.is_polling) {
375 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
376 mod_timer(&self->polling_timer, jiffies +
377 AQ_CFG_POLLING_TIMER_INTERVAL);
379 for (i = 0U, aq_vec = self->aq_vec[0];
380 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
381 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
383 aq_vec_get_affinity_mask(aq_vec));
388 if (self->aq_nic_cfg.link_irq_vec) {
389 int irqvec = pci_irq_vector(self->pdev,
390 self->aq_nic_cfg.link_irq_vec);
391 err = request_threaded_irq(irqvec, NULL,
392 aq_linkstate_threaded_isr,
394 self->ndev->name, self);
397 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
400 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
406 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
410 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
414 netif_tx_start_all_queues(self->ndev);
420 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
422 struct aq_ring_s *ring)
424 unsigned int ret = 0U;
425 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
426 unsigned int frag_count = 0U;
427 unsigned int dx = ring->sw_tail;
428 struct aq_ring_buff_s *first = NULL;
429 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
431 if (unlikely(skb_is_gso(skb))) {
433 dx_buff->len_pkt = skb->len;
434 dx_buff->len_l2 = ETH_HLEN;
435 dx_buff->len_l3 = ip_hdrlen(skb);
436 dx_buff->len_l4 = tcp_hdrlen(skb);
437 dx_buff->mss = skb_shinfo(skb)->gso_size;
438 dx_buff->is_txc = 1U;
439 dx_buff->eop_index = 0xffffU;
442 (ip_hdr(skb)->version == 6) ? 1U : 0U;
444 dx = aq_ring_next_dx(ring, dx);
445 dx_buff = &ring->buff_ring[dx];
450 dx_buff->len = skb_headlen(skb);
451 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
456 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
460 dx_buff->len_pkt = skb->len;
461 dx_buff->is_sop = 1U;
462 dx_buff->is_mapped = 1U;
465 if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
469 if (ip_hdr(skb)->version == 4) {
470 dx_buff->is_tcp_cso =
471 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
473 dx_buff->is_udp_cso =
474 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
476 } else if (ip_hdr(skb)->version == 6) {
477 dx_buff->is_tcp_cso =
478 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
480 dx_buff->is_udp_cso =
481 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
486 for (; nr_frags--; ++frag_count) {
487 unsigned int frag_len = 0U;
488 unsigned int buff_offset = 0U;
489 unsigned int buff_size = 0U;
491 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
493 frag_len = skb_frag_size(frag);
496 if (frag_len > AQ_CFG_TX_FRAME_MAX)
497 buff_size = AQ_CFG_TX_FRAME_MAX;
499 buff_size = frag_len;
501 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
507 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
511 dx = aq_ring_next_dx(ring, dx);
512 dx_buff = &ring->buff_ring[dx];
515 dx_buff->len = buff_size;
516 dx_buff->pa = frag_pa;
517 dx_buff->is_mapped = 1U;
518 dx_buff->eop_index = 0xffffU;
520 frag_len -= buff_size;
521 buff_offset += buff_size;
527 first->eop_index = dx;
528 dx_buff->is_eop = 1U;
533 for (dx = ring->sw_tail;
535 --ret, dx = aq_ring_next_dx(ring, dx)) {
536 dx_buff = &ring->buff_ring[dx];
538 if (!dx_buff->is_txc && dx_buff->pa) {
539 if (unlikely(dx_buff->is_sop)) {
540 dma_unmap_single(aq_nic_get_dev(self),
545 dma_unmap_page(aq_nic_get_dev(self),
557 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
559 struct aq_ring_s *ring = NULL;
560 unsigned int frags = 0U;
561 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
562 unsigned int tc = 0U;
563 int err = NETDEV_TX_OK;
565 frags = skb_shinfo(skb)->nr_frags + 1;
567 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
569 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
570 dev_kfree_skb_any(skb);
574 aq_ring_update_queue_state(ring);
576 /* Above status update may stop the queue. Check this. */
577 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
578 err = NETDEV_TX_BUSY;
582 frags = aq_nic_map_skb(self, skb, ring);
585 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
588 ++ring->stats.tx.packets;
589 ring->stats.tx.bytes += skb->len;
592 err = NETDEV_TX_BUSY;
599 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
601 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
604 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
608 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
612 self->packet_filter = flags;
618 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
620 unsigned int packet_filter = self->packet_filter;
621 struct netdev_hw_addr *ha = NULL;
624 self->mc_list.count = 0;
625 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
626 packet_filter |= IFF_PROMISC;
628 netdev_for_each_uc_addr(ha, ndev) {
629 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
631 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
636 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
637 packet_filter |= IFF_ALLMULTI;
639 netdev_for_each_mc_addr(ha, ndev) {
640 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
642 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
647 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
648 packet_filter |= IFF_MULTICAST;
649 self->mc_list.count = i;
650 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
652 self->mc_list.count);
654 return aq_nic_set_packet_filter(self, packet_filter);
657 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
659 self->aq_nic_cfg.mtu = new_mtu;
664 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
666 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
669 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
671 return self->link_status.mbps;
674 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
681 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
682 self->aq_nic_cfg.aq_hw_caps,
691 int aq_nic_get_regs_count(struct aq_nic_s *self)
693 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
696 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
699 unsigned int count = 0U;
700 struct aq_vec_s *aq_vec = NULL;
701 struct aq_stats_s *stats;
703 if (self->aq_fw_ops->update_stats) {
704 mutex_lock(&self->fwreq_mutex);
705 self->aq_fw_ops->update_stats(self->aq_hw);
706 mutex_unlock(&self->fwreq_mutex);
708 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
713 data[i] = stats->uprc + stats->mprc + stats->bprc;
714 data[++i] = stats->uprc;
715 data[++i] = stats->mprc;
716 data[++i] = stats->bprc;
717 data[++i] = stats->erpt;
718 data[++i] = stats->uptc + stats->mptc + stats->bptc;
719 data[++i] = stats->uptc;
720 data[++i] = stats->mptc;
721 data[++i] = stats->bptc;
722 data[++i] = stats->ubrc;
723 data[++i] = stats->ubtc;
724 data[++i] = stats->mbrc;
725 data[++i] = stats->mbtc;
726 data[++i] = stats->bbrc;
727 data[++i] = stats->bbtc;
728 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
729 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
730 data[++i] = stats->dma_pkt_rc;
731 data[++i] = stats->dma_pkt_tc;
732 data[++i] = stats->dma_oct_rc;
733 data[++i] = stats->dma_oct_tc;
734 data[++i] = stats->dpc;
740 for (i = 0U, aq_vec = self->aq_vec[0];
741 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
743 aq_vec_get_sw_stats(aq_vec, data, &count);
749 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
751 struct net_device *ndev = self->ndev;
752 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
754 ndev->stats.rx_packets = stats->dma_pkt_rc;
755 ndev->stats.rx_bytes = stats->dma_oct_rc;
756 ndev->stats.rx_errors = stats->erpr;
757 ndev->stats.rx_dropped = stats->dpc;
758 ndev->stats.tx_packets = stats->dma_pkt_tc;
759 ndev->stats.tx_bytes = stats->dma_oct_tc;
760 ndev->stats.tx_errors = stats->erpt;
761 ndev->stats.multicast = stats->mprc;
764 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
765 struct ethtool_link_ksettings *cmd)
767 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
768 cmd->base.port = PORT_FIBRE;
770 cmd->base.port = PORT_TP;
771 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
772 cmd->base.duplex = DUPLEX_FULL;
773 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
775 ethtool_link_ksettings_zero_link_mode(cmd, supported);
777 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
778 ethtool_link_ksettings_add_link_mode(cmd, supported,
781 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
782 ethtool_link_ksettings_add_link_mode(cmd, supported,
785 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
786 ethtool_link_ksettings_add_link_mode(cmd, supported,
789 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
790 ethtool_link_ksettings_add_link_mode(cmd, supported,
793 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
794 ethtool_link_ksettings_add_link_mode(cmd, supported,
797 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
798 ethtool_link_ksettings_add_link_mode(cmd, supported,
801 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
803 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
804 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
806 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
808 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
810 if (self->aq_nic_cfg.is_autoneg)
811 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
813 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
814 ethtool_link_ksettings_add_link_mode(cmd, advertising,
817 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
818 ethtool_link_ksettings_add_link_mode(cmd, advertising,
821 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
822 ethtool_link_ksettings_add_link_mode(cmd, advertising,
825 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
826 ethtool_link_ksettings_add_link_mode(cmd, advertising,
829 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
830 ethtool_link_ksettings_add_link_mode(cmd, advertising,
833 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
834 ethtool_link_ksettings_add_link_mode(cmd, advertising,
837 /* Asym is when either RX or TX, but not both */
838 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
839 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
840 ethtool_link_ksettings_add_link_mode(cmd, advertising,
843 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
844 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
846 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
849 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
850 const struct ethtool_link_ksettings *cmd)
856 if (cmd->base.autoneg == AUTONEG_ENABLE) {
857 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
858 self->aq_nic_cfg.is_autoneg = true;
860 speed = cmd->base.speed;
864 rate = AQ_NIC_RATE_100M;
868 rate = AQ_NIC_RATE_1G;
872 rate = AQ_NIC_RATE_2GS;
876 rate = AQ_NIC_RATE_5G;
880 rate = AQ_NIC_RATE_10G;
888 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
893 self->aq_nic_cfg.is_autoneg = false;
896 mutex_lock(&self->fwreq_mutex);
897 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
898 mutex_unlock(&self->fwreq_mutex);
902 self->aq_nic_cfg.link_speed_msk = rate;
908 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
910 return &self->aq_nic_cfg;
913 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
917 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
922 int aq_nic_stop(struct aq_nic_s *self)
924 struct aq_vec_s *aq_vec = NULL;
927 netif_tx_disable(self->ndev);
928 netif_carrier_off(self->ndev);
930 del_timer_sync(&self->service_timer);
931 cancel_work_sync(&self->service_task);
933 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
935 if (self->aq_nic_cfg.is_polling)
936 del_timer_sync(&self->polling_timer);
938 aq_pci_func_free_irqs(self);
940 for (i = 0U, aq_vec = self->aq_vec[0];
941 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
944 return self->aq_hw_ops->hw_stop(self->aq_hw);
947 void aq_nic_deinit(struct aq_nic_s *self)
949 struct aq_vec_s *aq_vec = NULL;
955 for (i = 0U, aq_vec = self->aq_vec[0];
956 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
957 aq_vec_deinit(aq_vec);
959 if (likely(self->aq_fw_ops->deinit)) {
960 mutex_lock(&self->fwreq_mutex);
961 self->aq_fw_ops->deinit(self->aq_hw);
962 mutex_unlock(&self->fwreq_mutex);
965 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
966 self->aq_hw->aq_nic_cfg->wol)
967 if (likely(self->aq_fw_ops->set_power)) {
968 mutex_lock(&self->fwreq_mutex);
969 self->aq_fw_ops->set_power(self->aq_hw,
971 self->ndev->dev_addr);
972 mutex_unlock(&self->fwreq_mutex);
979 void aq_nic_free_vectors(struct aq_nic_s *self)
986 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
987 if (self->aq_vec[i]) {
988 aq_vec_free(self->aq_vec[i]);
989 self->aq_vec[i] = NULL;
996 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
1000 if (!netif_running(self->ndev)) {
1005 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1006 self->power_state = AQ_HW_POWER_STATE_D3;
1007 netif_device_detach(self->ndev);
1008 netif_tx_stop_all_queues(self->ndev);
1010 err = aq_nic_stop(self);
1014 aq_nic_deinit(self);
1016 err = aq_nic_init(self);
1020 err = aq_nic_start(self);
1024 netif_device_attach(self->ndev);
1025 netif_tx_start_all_queues(self->ndev);
1034 void aq_nic_shutdown(struct aq_nic_s *self)
1043 netif_device_detach(self->ndev);
1045 if (netif_running(self->ndev)) {
1046 err = aq_nic_stop(self);
1050 aq_nic_deinit(self);