1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
7 /* File aq_nic.c: Definition of common code for NIC. */
13 #include "aq_pci_func.h"
16 #include <linux/moduleparam.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/timer.h>
20 #include <linux/cpu.h>
22 #include <linux/tcp.h>
25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26 module_param_named(aq_itr, aq_itr, uint, 0644);
27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
29 static unsigned int aq_itr_tx;
30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
33 static unsigned int aq_itr_rx;
34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
61 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
62 void aq_nic_cfg_start(struct aq_nic_s *self)
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
66 cfg->tcs = AQ_CFG_TCS_DEF;
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
92 cfg->vecs = min(cfg->vecs, num_online_cpus());
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
95 /* cfg->vecs should be power of 2 for RSS */
98 else if (cfg->vecs >= 4U)
100 else if (cfg->vecs >= 2U)
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
107 aq_nic_rss_init(self, cfg->num_rss_queues);
109 cfg->irq_type = aq_pci_func_get_irq_type(self);
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
112 (cfg->aq_hw_caps->vecs == 1U) ||
118 /* Check if we have enough vectors allocated for
119 * link status IRQ. If no - we'll know link state from
120 * slower service task.
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
125 cfg->link_irq_vec = 0;
127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
128 cfg->features = cfg->aq_hw_caps->hw_features;
131 static int aq_nic_update_link_status(struct aq_nic_s *self)
133 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
139 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
140 pr_info("%s: link change old %d new %d\n",
141 AQ_CFG_DRV_NAME, self->link_status.mbps,
142 self->aq_hw->aq_link_status.mbps);
143 aq_nic_update_interrupt_moderation_settings(self);
145 /* Driver has to update flow control settings on RX block
147 * We should query FW whether it negotiated FC.
149 if (self->aq_fw_ops->get_flow_control)
150 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
151 if (self->aq_hw_ops->hw_set_fc)
152 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
155 self->link_status = self->aq_hw->aq_link_status;
156 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
157 aq_utils_obj_set(&self->flags,
158 AQ_NIC_FLAG_STARTED);
159 aq_utils_obj_clear(&self->flags,
161 netif_carrier_on(self->ndev);
162 netif_tx_wake_all_queues(self->ndev);
164 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
165 netif_carrier_off(self->ndev);
166 netif_tx_disable(self->ndev);
167 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
172 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
174 struct aq_nic_s *self = private;
179 aq_nic_update_link_status(self);
181 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
182 BIT(self->aq_nic_cfg.link_irq_vec));
186 static void aq_nic_service_task(struct work_struct *work)
188 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
192 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
195 err = aq_nic_update_link_status(self);
199 mutex_lock(&self->fwreq_mutex);
200 if (self->aq_fw_ops->update_stats)
201 self->aq_fw_ops->update_stats(self->aq_hw);
202 mutex_unlock(&self->fwreq_mutex);
204 aq_nic_update_ndev_stats(self);
207 static void aq_nic_service_timer_cb(struct timer_list *t)
209 struct aq_nic_s *self = from_timer(self, t, service_timer);
211 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
213 aq_ndev_schedule_work(&self->service_task);
216 static void aq_nic_polling_timer_cb(struct timer_list *t)
218 struct aq_nic_s *self = from_timer(self, t, polling_timer);
219 struct aq_vec_s *aq_vec = NULL;
222 for (i = 0U, aq_vec = self->aq_vec[0];
223 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
224 aq_vec_isr(i, (void *)aq_vec);
226 mod_timer(&self->polling_timer, jiffies +
227 AQ_CFG_POLLING_TIMER_INTERVAL);
230 int aq_nic_ndev_register(struct aq_nic_s *self)
239 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
243 mutex_lock(&self->fwreq_mutex);
244 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
245 self->ndev->dev_addr);
246 mutex_unlock(&self->fwreq_mutex);
250 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
252 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
254 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
258 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
260 self->aq_vec[self->aq_vecs] =
261 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
262 if (!self->aq_vec[self->aq_vecs]) {
268 netif_carrier_off(self->ndev);
270 netif_tx_disable(self->ndev);
272 err = register_netdev(self->ndev);
280 void aq_nic_ndev_init(struct aq_nic_s *self)
282 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
283 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
285 self->ndev->hw_features |= aq_hw_caps->hw_features;
286 self->ndev->features = aq_hw_caps->hw_features;
287 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
288 NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
289 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
290 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
292 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
293 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
297 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
298 struct aq_ring_s *ring)
300 self->aq_ring_tx[idx] = ring;
303 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
308 int aq_nic_init(struct aq_nic_s *self)
310 struct aq_vec_s *aq_vec = NULL;
314 self->power_state = AQ_HW_POWER_STATE_D0;
315 mutex_lock(&self->fwreq_mutex);
316 err = self->aq_hw_ops->hw_reset(self->aq_hw);
317 mutex_unlock(&self->fwreq_mutex);
321 err = self->aq_hw_ops->hw_init(self->aq_hw,
322 aq_nic_get_ndev(self)->dev_addr);
326 for (i = 0U, aq_vec = self->aq_vec[0];
327 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
328 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
330 netif_carrier_off(self->ndev);
336 int aq_nic_start(struct aq_nic_s *self)
338 struct aq_vec_s *aq_vec = NULL;
342 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
344 self->mc_list.count);
348 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
349 self->packet_filter);
353 for (i = 0U, aq_vec = self->aq_vec[0];
354 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
355 err = aq_vec_start(aq_vec);
360 err = self->aq_hw_ops->hw_start(self->aq_hw);
364 err = aq_nic_update_interrupt_moderation_settings(self);
368 INIT_WORK(&self->service_task, aq_nic_service_task);
370 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
371 aq_nic_service_timer_cb(&self->service_timer);
373 if (self->aq_nic_cfg.is_polling) {
374 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
375 mod_timer(&self->polling_timer, jiffies +
376 AQ_CFG_POLLING_TIMER_INTERVAL);
378 for (i = 0U, aq_vec = self->aq_vec[0];
379 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
380 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
382 aq_vec_get_affinity_mask(aq_vec));
387 if (self->aq_nic_cfg.link_irq_vec) {
388 int irqvec = pci_irq_vector(self->pdev,
389 self->aq_nic_cfg.link_irq_vec);
390 err = request_threaded_irq(irqvec, NULL,
391 aq_linkstate_threaded_isr,
393 self->ndev->name, self);
396 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
399 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
405 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
409 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
413 netif_tx_start_all_queues(self->ndev);
419 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
421 struct aq_ring_s *ring)
423 unsigned int ret = 0U;
424 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
425 unsigned int frag_count = 0U;
426 unsigned int dx = ring->sw_tail;
427 struct aq_ring_buff_s *first = NULL;
428 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
430 if (unlikely(skb_is_gso(skb))) {
432 dx_buff->len_pkt = skb->len;
433 dx_buff->len_l2 = ETH_HLEN;
434 dx_buff->len_l3 = ip_hdrlen(skb);
435 dx_buff->len_l4 = tcp_hdrlen(skb);
436 dx_buff->mss = skb_shinfo(skb)->gso_size;
437 dx_buff->is_txc = 1U;
438 dx_buff->eop_index = 0xffffU;
441 (ip_hdr(skb)->version == 6) ? 1U : 0U;
443 dx = aq_ring_next_dx(ring, dx);
444 dx_buff = &ring->buff_ring[dx];
449 dx_buff->len = skb_headlen(skb);
450 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
455 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
459 dx_buff->len_pkt = skb->len;
460 dx_buff->is_sop = 1U;
461 dx_buff->is_mapped = 1U;
464 if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
468 if (ip_hdr(skb)->version == 4) {
469 dx_buff->is_tcp_cso =
470 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
472 dx_buff->is_udp_cso =
473 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
475 } else if (ip_hdr(skb)->version == 6) {
476 dx_buff->is_tcp_cso =
477 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
479 dx_buff->is_udp_cso =
480 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
485 for (; nr_frags--; ++frag_count) {
486 unsigned int frag_len = 0U;
487 unsigned int buff_offset = 0U;
488 unsigned int buff_size = 0U;
490 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
492 frag_len = skb_frag_size(frag);
495 if (frag_len > AQ_CFG_TX_FRAME_MAX)
496 buff_size = AQ_CFG_TX_FRAME_MAX;
498 buff_size = frag_len;
500 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
506 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
510 dx = aq_ring_next_dx(ring, dx);
511 dx_buff = &ring->buff_ring[dx];
514 dx_buff->len = buff_size;
515 dx_buff->pa = frag_pa;
516 dx_buff->is_mapped = 1U;
517 dx_buff->eop_index = 0xffffU;
519 frag_len -= buff_size;
520 buff_offset += buff_size;
526 first->eop_index = dx;
527 dx_buff->is_eop = 1U;
532 for (dx = ring->sw_tail;
534 --ret, dx = aq_ring_next_dx(ring, dx)) {
535 dx_buff = &ring->buff_ring[dx];
537 if (!dx_buff->is_txc && dx_buff->pa) {
538 if (unlikely(dx_buff->is_sop)) {
539 dma_unmap_single(aq_nic_get_dev(self),
544 dma_unmap_page(aq_nic_get_dev(self),
556 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
558 struct aq_ring_s *ring = NULL;
559 unsigned int frags = 0U;
560 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
561 unsigned int tc = 0U;
562 int err = NETDEV_TX_OK;
564 frags = skb_shinfo(skb)->nr_frags + 1;
566 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
568 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
569 dev_kfree_skb_any(skb);
573 aq_ring_update_queue_state(ring);
575 /* Above status update may stop the queue. Check this. */
576 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
577 err = NETDEV_TX_BUSY;
581 frags = aq_nic_map_skb(self, skb, ring);
584 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
587 ++ring->stats.tx.packets;
588 ring->stats.tx.bytes += skb->len;
591 err = NETDEV_TX_BUSY;
598 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
600 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
603 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
607 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
611 self->packet_filter = flags;
617 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
619 unsigned int packet_filter = self->packet_filter;
620 struct netdev_hw_addr *ha = NULL;
623 self->mc_list.count = 0;
624 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
625 packet_filter |= IFF_PROMISC;
627 netdev_for_each_uc_addr(ha, ndev) {
628 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
630 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
635 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
636 packet_filter |= IFF_ALLMULTI;
638 netdev_for_each_mc_addr(ha, ndev) {
639 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
641 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
646 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
647 packet_filter |= IFF_MULTICAST;
648 self->mc_list.count = i;
649 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
651 self->mc_list.count);
653 return aq_nic_set_packet_filter(self, packet_filter);
656 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
658 self->aq_nic_cfg.mtu = new_mtu;
663 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
665 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
668 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
670 return self->link_status.mbps;
673 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
680 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
681 self->aq_nic_cfg.aq_hw_caps,
690 int aq_nic_get_regs_count(struct aq_nic_s *self)
692 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
695 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
698 unsigned int count = 0U;
699 struct aq_vec_s *aq_vec = NULL;
700 struct aq_stats_s *stats;
702 if (self->aq_fw_ops->update_stats) {
703 mutex_lock(&self->fwreq_mutex);
704 self->aq_fw_ops->update_stats(self->aq_hw);
705 mutex_unlock(&self->fwreq_mutex);
707 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
712 data[i] = stats->uprc + stats->mprc + stats->bprc;
713 data[++i] = stats->uprc;
714 data[++i] = stats->mprc;
715 data[++i] = stats->bprc;
716 data[++i] = stats->erpt;
717 data[++i] = stats->uptc + stats->mptc + stats->bptc;
718 data[++i] = stats->uptc;
719 data[++i] = stats->mptc;
720 data[++i] = stats->bptc;
721 data[++i] = stats->ubrc;
722 data[++i] = stats->ubtc;
723 data[++i] = stats->mbrc;
724 data[++i] = stats->mbtc;
725 data[++i] = stats->bbrc;
726 data[++i] = stats->bbtc;
727 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
728 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
729 data[++i] = stats->dma_pkt_rc;
730 data[++i] = stats->dma_pkt_tc;
731 data[++i] = stats->dma_oct_rc;
732 data[++i] = stats->dma_oct_tc;
733 data[++i] = stats->dpc;
739 for (i = 0U, aq_vec = self->aq_vec[0];
740 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
742 aq_vec_get_sw_stats(aq_vec, data, &count);
748 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
750 struct net_device *ndev = self->ndev;
751 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
753 ndev->stats.rx_packets = stats->dma_pkt_rc;
754 ndev->stats.rx_bytes = stats->dma_oct_rc;
755 ndev->stats.rx_errors = stats->erpr;
756 ndev->stats.rx_dropped = stats->dpc;
757 ndev->stats.tx_packets = stats->dma_pkt_tc;
758 ndev->stats.tx_bytes = stats->dma_oct_tc;
759 ndev->stats.tx_errors = stats->erpt;
760 ndev->stats.multicast = stats->mprc;
763 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
764 struct ethtool_link_ksettings *cmd)
766 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
767 cmd->base.port = PORT_FIBRE;
769 cmd->base.port = PORT_TP;
770 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
771 cmd->base.duplex = DUPLEX_FULL;
772 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
774 ethtool_link_ksettings_zero_link_mode(cmd, supported);
776 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
777 ethtool_link_ksettings_add_link_mode(cmd, supported,
780 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
781 ethtool_link_ksettings_add_link_mode(cmd, supported,
784 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
785 ethtool_link_ksettings_add_link_mode(cmd, supported,
788 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
789 ethtool_link_ksettings_add_link_mode(cmd, supported,
792 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
793 ethtool_link_ksettings_add_link_mode(cmd, supported,
796 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
797 ethtool_link_ksettings_add_link_mode(cmd, supported,
800 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
802 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
803 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
805 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
807 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
809 if (self->aq_nic_cfg.is_autoneg)
810 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
812 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
813 ethtool_link_ksettings_add_link_mode(cmd, advertising,
816 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
817 ethtool_link_ksettings_add_link_mode(cmd, advertising,
820 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
821 ethtool_link_ksettings_add_link_mode(cmd, advertising,
824 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
825 ethtool_link_ksettings_add_link_mode(cmd, advertising,
828 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
829 ethtool_link_ksettings_add_link_mode(cmd, advertising,
832 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
833 ethtool_link_ksettings_add_link_mode(cmd, advertising,
836 /* Asym is when either RX or TX, but not both */
837 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
838 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
839 ethtool_link_ksettings_add_link_mode(cmd, advertising,
842 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
843 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
845 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
848 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
849 const struct ethtool_link_ksettings *cmd)
855 if (cmd->base.autoneg == AUTONEG_ENABLE) {
856 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
857 self->aq_nic_cfg.is_autoneg = true;
859 speed = cmd->base.speed;
863 rate = AQ_NIC_RATE_100M;
867 rate = AQ_NIC_RATE_1G;
871 rate = AQ_NIC_RATE_2GS;
875 rate = AQ_NIC_RATE_5G;
879 rate = AQ_NIC_RATE_10G;
887 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
892 self->aq_nic_cfg.is_autoneg = false;
895 mutex_lock(&self->fwreq_mutex);
896 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
897 mutex_unlock(&self->fwreq_mutex);
901 self->aq_nic_cfg.link_speed_msk = rate;
907 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
909 return &self->aq_nic_cfg;
912 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
916 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
921 int aq_nic_stop(struct aq_nic_s *self)
923 struct aq_vec_s *aq_vec = NULL;
926 netif_tx_disable(self->ndev);
927 netif_carrier_off(self->ndev);
929 del_timer_sync(&self->service_timer);
930 cancel_work_sync(&self->service_task);
932 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
934 if (self->aq_nic_cfg.is_polling)
935 del_timer_sync(&self->polling_timer);
937 aq_pci_func_free_irqs(self);
939 for (i = 0U, aq_vec = self->aq_vec[0];
940 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
943 return self->aq_hw_ops->hw_stop(self->aq_hw);
946 void aq_nic_deinit(struct aq_nic_s *self)
948 struct aq_vec_s *aq_vec = NULL;
954 for (i = 0U, aq_vec = self->aq_vec[0];
955 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
956 aq_vec_deinit(aq_vec);
958 if (likely(self->aq_fw_ops->deinit)) {
959 mutex_lock(&self->fwreq_mutex);
960 self->aq_fw_ops->deinit(self->aq_hw);
961 mutex_unlock(&self->fwreq_mutex);
964 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
965 self->aq_hw->aq_nic_cfg->wol)
966 if (likely(self->aq_fw_ops->set_power)) {
967 mutex_lock(&self->fwreq_mutex);
968 self->aq_fw_ops->set_power(self->aq_hw,
970 self->ndev->dev_addr);
971 mutex_unlock(&self->fwreq_mutex);
978 void aq_nic_free_vectors(struct aq_nic_s *self)
985 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
986 if (self->aq_vec[i]) {
987 aq_vec_free(self->aq_vec[i]);
988 self->aq_vec[i] = NULL;
995 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
999 if (!netif_running(self->ndev)) {
1004 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1005 self->power_state = AQ_HW_POWER_STATE_D3;
1006 netif_device_detach(self->ndev);
1007 netif_tx_stop_all_queues(self->ndev);
1009 err = aq_nic_stop(self);
1013 aq_nic_deinit(self);
1015 err = aq_nic_init(self);
1019 err = aq_nic_start(self);
1023 netif_device_attach(self->ndev);
1024 netif_tx_start_all_queues(self->ndev);
1033 void aq_nic_shutdown(struct aq_nic_s *self)
1042 netif_device_detach(self->ndev);
1044 if (netif_running(self->ndev)) {
1045 err = aq_nic_stop(self);
1049 aq_nic_deinit(self);