1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/crash_dump.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
35 #include <linux/version.h>
36 #include <linux/device.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h>
40 #include <linux/errno.h>
41 #include <linux/list.h>
42 #include <linux/string.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/interrupt.h>
45 #include <asm/byteorder.h>
46 #include <asm/param.h>
48 #include <linux/netdev_features.h>
49 #include <linux/udp.h>
50 #include <linux/tcp.h>
51 #include <net/udp_tunnel.h>
55 #include <linux/if_ether.h>
56 #include <linux/if_vlan.h>
57 #include <linux/pkt_sched.h>
58 #include <linux/ethtool.h>
60 #include <linux/random.h>
61 #include <net/ip6_checksum.h>
62 #include <linux/bitops.h>
63 #include <linux/vmalloc.h>
64 #include <linux/aer.h>
68 static char version[] =
69 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
71 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
76 module_param(debug, uint, 0);
77 MODULE_PARM_DESC(debug, " Default debug msglevel");
79 static const struct qed_eth_ops *qed_ops;
81 #define CHIP_NUM_57980S_40 0x1634
82 #define CHIP_NUM_57980S_10 0x1666
83 #define CHIP_NUM_57980S_MF 0x1636
84 #define CHIP_NUM_57980S_100 0x1644
85 #define CHIP_NUM_57980S_50 0x1654
86 #define CHIP_NUM_57980S_25 0x1656
87 #define CHIP_NUM_57980S_IOV 0x1664
88 #define CHIP_NUM_AH 0x8070
89 #define CHIP_NUM_AH_IOV 0x8090
91 #ifndef PCI_DEVICE_ID_NX2_57980E
92 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
93 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
94 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
95 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
96 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
97 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
98 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
99 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
100 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
104 enum qede_pci_private {
109 static const struct pci_device_id qede_pci_tbl[] = {
110 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
111 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
112 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
113 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
114 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
115 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
116 #ifdef CONFIG_QED_SRIOV
117 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
119 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
120 #ifdef CONFIG_QED_SRIOV
121 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
126 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
128 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
129 static pci_ers_result_t
130 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
132 #define TX_TIMEOUT (5 * HZ)
134 /* Utilize last protocol index for XDP */
137 static void qede_remove(struct pci_dev *pdev);
138 static void qede_shutdown(struct pci_dev *pdev);
139 static void qede_link_update(void *dev, struct qed_link_output *link);
140 static void qede_schedule_recovery_handler(void *dev);
141 static void qede_recovery_handler(struct qede_dev *edev);
142 static void qede_schedule_hw_err_handler(void *dev,
143 enum qed_hw_err_type err_type);
144 static void qede_get_eth_tlv_data(void *edev, void *data);
145 static void qede_get_generic_tlv_data(void *edev,
146 struct qed_generic_tlvs *data);
147 static void qede_generic_hw_err_handler(struct qede_dev *edev);
148 #ifdef CONFIG_QED_SRIOV
149 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
152 struct qede_dev *edev = netdev_priv(ndev);
155 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
159 if (vlan_proto != htons(ETH_P_8021Q))
160 return -EPROTONOSUPPORT;
162 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
165 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
168 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
170 struct qede_dev *edev = netdev_priv(ndev);
172 DP_VERBOSE(edev, QED_MSG_IOV,
173 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
174 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
176 if (!is_valid_ether_addr(mac)) {
177 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
181 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
184 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
186 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
187 struct qed_dev_info *qed_info = &edev->dev_info.common;
188 struct qed_update_vport_params *vport_params;
191 vport_params = vzalloc(sizeof(*vport_params));
194 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
196 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
198 /* Enable/Disable Tx switching for PF */
199 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
200 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
201 vport_params->vport_id = 0;
202 vport_params->update_tx_switching_flg = 1;
203 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
204 edev->ops->vport_update(edev->cdev, vport_params);
212 static const struct pci_error_handlers qede_err_handler = {
213 .error_detected = qede_io_error_detected,
216 static struct pci_driver qede_pci_driver = {
218 .id_table = qede_pci_tbl,
220 .remove = qede_remove,
221 .shutdown = qede_shutdown,
222 #ifdef CONFIG_QED_SRIOV
223 .sriov_configure = qede_sriov_configure,
225 .err_handler = &qede_err_handler,
228 static struct qed_eth_cb_ops qede_ll_ops = {
230 #ifdef CONFIG_RFS_ACCEL
231 .arfs_filter_op = qede_arfs_filter_op,
233 .link_update = qede_link_update,
234 .schedule_recovery_handler = qede_schedule_recovery_handler,
235 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
236 .get_generic_tlv_data = qede_get_generic_tlv_data,
237 .get_protocol_tlv_data = qede_get_eth_tlv_data,
239 .force_mac = qede_force_mac,
240 .ports_update = qede_udp_ports_update,
243 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
246 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
247 struct ethtool_drvinfo drvinfo;
248 struct qede_dev *edev;
250 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
253 /* Check whether this is a qede device */
254 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
257 memset(&drvinfo, 0, sizeof(drvinfo));
258 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
259 if (strcmp(drvinfo.driver, "qede"))
261 edev = netdev_priv(ndev);
264 case NETDEV_CHANGENAME:
265 /* Notify qed of the name change */
266 if (!edev->ops || !edev->ops->common)
268 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
270 case NETDEV_CHANGEADDR:
271 edev = netdev_priv(ndev);
272 qede_rdma_event_changeaddr(edev);
280 static struct notifier_block qede_netdev_notifier = {
281 .notifier_call = qede_netdev_event,
285 int __init qede_init(void)
289 pr_info("qede_init: %s\n", version);
291 qed_ops = qed_get_eth_ops();
293 pr_notice("Failed to get qed ethtool operations\n");
297 /* Must register notifier before pci ops, since we might miss
298 * interface rename after pci probe and netdev registration.
300 ret = register_netdevice_notifier(&qede_netdev_notifier);
302 pr_notice("Failed to register netdevice_notifier\n");
307 ret = pci_register_driver(&qede_pci_driver);
309 pr_notice("Failed to register driver\n");
310 unregister_netdevice_notifier(&qede_netdev_notifier);
318 static void __exit qede_cleanup(void)
320 if (debug & QED_LOG_INFO_MASK)
321 pr_info("qede_cleanup called\n");
323 unregister_netdevice_notifier(&qede_netdev_notifier);
324 pci_unregister_driver(&qede_pci_driver);
328 module_init(qede_init);
329 module_exit(qede_cleanup);
331 static int qede_open(struct net_device *ndev);
332 static int qede_close(struct net_device *ndev);
334 void qede_fill_by_demand_stats(struct qede_dev *edev)
336 struct qede_stats_common *p_common = &edev->stats.common;
337 struct qed_eth_stats stats;
339 edev->ops->get_vport_stats(edev->cdev, &stats);
341 p_common->no_buff_discards = stats.common.no_buff_discards;
342 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
343 p_common->ttl0_discard = stats.common.ttl0_discard;
344 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
345 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
346 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
347 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
348 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
349 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
350 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
351 p_common->mac_filter_discards = stats.common.mac_filter_discards;
352 p_common->gft_filter_drop = stats.common.gft_filter_drop;
354 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
355 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
356 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
357 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
358 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
359 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
360 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
361 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
362 p_common->coalesced_events = stats.common.tpa_coalesced_events;
363 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
364 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
365 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
367 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
368 p_common->rx_65_to_127_byte_packets =
369 stats.common.rx_65_to_127_byte_packets;
370 p_common->rx_128_to_255_byte_packets =
371 stats.common.rx_128_to_255_byte_packets;
372 p_common->rx_256_to_511_byte_packets =
373 stats.common.rx_256_to_511_byte_packets;
374 p_common->rx_512_to_1023_byte_packets =
375 stats.common.rx_512_to_1023_byte_packets;
376 p_common->rx_1024_to_1518_byte_packets =
377 stats.common.rx_1024_to_1518_byte_packets;
378 p_common->rx_crc_errors = stats.common.rx_crc_errors;
379 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
380 p_common->rx_pause_frames = stats.common.rx_pause_frames;
381 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
382 p_common->rx_align_errors = stats.common.rx_align_errors;
383 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
384 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
385 p_common->rx_jabbers = stats.common.rx_jabbers;
386 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
387 p_common->rx_fragments = stats.common.rx_fragments;
388 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
389 p_common->tx_65_to_127_byte_packets =
390 stats.common.tx_65_to_127_byte_packets;
391 p_common->tx_128_to_255_byte_packets =
392 stats.common.tx_128_to_255_byte_packets;
393 p_common->tx_256_to_511_byte_packets =
394 stats.common.tx_256_to_511_byte_packets;
395 p_common->tx_512_to_1023_byte_packets =
396 stats.common.tx_512_to_1023_byte_packets;
397 p_common->tx_1024_to_1518_byte_packets =
398 stats.common.tx_1024_to_1518_byte_packets;
399 p_common->tx_pause_frames = stats.common.tx_pause_frames;
400 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
401 p_common->brb_truncates = stats.common.brb_truncates;
402 p_common->brb_discards = stats.common.brb_discards;
403 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
404 p_common->link_change_count = stats.common.link_change_count;
405 p_common->ptp_skip_txts = edev->ptp_skip_txts;
407 if (QEDE_IS_BB(edev)) {
408 struct qede_stats_bb *p_bb = &edev->stats.bb;
410 p_bb->rx_1519_to_1522_byte_packets =
411 stats.bb.rx_1519_to_1522_byte_packets;
412 p_bb->rx_1519_to_2047_byte_packets =
413 stats.bb.rx_1519_to_2047_byte_packets;
414 p_bb->rx_2048_to_4095_byte_packets =
415 stats.bb.rx_2048_to_4095_byte_packets;
416 p_bb->rx_4096_to_9216_byte_packets =
417 stats.bb.rx_4096_to_9216_byte_packets;
418 p_bb->rx_9217_to_16383_byte_packets =
419 stats.bb.rx_9217_to_16383_byte_packets;
420 p_bb->tx_1519_to_2047_byte_packets =
421 stats.bb.tx_1519_to_2047_byte_packets;
422 p_bb->tx_2048_to_4095_byte_packets =
423 stats.bb.tx_2048_to_4095_byte_packets;
424 p_bb->tx_4096_to_9216_byte_packets =
425 stats.bb.tx_4096_to_9216_byte_packets;
426 p_bb->tx_9217_to_16383_byte_packets =
427 stats.bb.tx_9217_to_16383_byte_packets;
428 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
429 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
431 struct qede_stats_ah *p_ah = &edev->stats.ah;
433 p_ah->rx_1519_to_max_byte_packets =
434 stats.ah.rx_1519_to_max_byte_packets;
435 p_ah->tx_1519_to_max_byte_packets =
436 stats.ah.tx_1519_to_max_byte_packets;
440 static void qede_get_stats64(struct net_device *dev,
441 struct rtnl_link_stats64 *stats)
443 struct qede_dev *edev = netdev_priv(dev);
444 struct qede_stats_common *p_common;
446 qede_fill_by_demand_stats(edev);
447 p_common = &edev->stats.common;
449 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
450 p_common->rx_bcast_pkts;
451 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
452 p_common->tx_bcast_pkts;
454 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
455 p_common->rx_bcast_bytes;
456 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
457 p_common->tx_bcast_bytes;
459 stats->tx_errors = p_common->tx_err_drop_pkts;
460 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
462 stats->rx_fifo_errors = p_common->no_buff_discards;
464 if (QEDE_IS_BB(edev))
465 stats->collisions = edev->stats.bb.tx_total_collisions;
466 stats->rx_crc_errors = p_common->rx_crc_errors;
467 stats->rx_frame_errors = p_common->rx_align_errors;
470 #ifdef CONFIG_QED_SRIOV
471 static int qede_get_vf_config(struct net_device *dev, int vfidx,
472 struct ifla_vf_info *ivi)
474 struct qede_dev *edev = netdev_priv(dev);
479 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
482 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
483 int min_tx_rate, int max_tx_rate)
485 struct qede_dev *edev = netdev_priv(dev);
487 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
491 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
493 struct qede_dev *edev = netdev_priv(dev);
498 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
501 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
504 struct qede_dev *edev = netdev_priv(dev);
509 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
512 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
514 struct qede_dev *edev = netdev_priv(dev);
519 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
523 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
525 struct qede_dev *edev = netdev_priv(dev);
527 if (!netif_running(dev))
532 return qede_ptp_hw_ts(edev, ifr);
534 DP_VERBOSE(edev, QED_MSG_DEBUG,
535 "default IOCTL cmd 0x%x\n", cmd);
542 static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
545 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
546 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
547 qed_chain_get_cons_idx(&txq->tx_pbl),
548 qed_chain_get_prod_idx(&txq->tx_pbl),
552 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
554 struct qede_dev *edev = netdev_priv(dev);
555 struct qede_tx_queue *txq;
558 netif_carrier_off(dev);
559 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
561 if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
564 for_each_cos_in_txq(edev, cos) {
565 txq = &edev->fp_array[txqueue].txq[cos];
567 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
568 qed_chain_get_prod_idx(&txq->tx_pbl))
569 qede_tx_log_print(edev, txq);
575 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
576 edev->state == QEDE_STATE_RECOVERY) {
578 "Avoid handling a Tx timeout while another HW error is being handled\n");
582 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
583 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
584 schedule_delayed_work(&edev->sp_task, 0);
587 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
589 struct qede_dev *edev = netdev_priv(ndev);
590 int cos, count, offset;
592 if (num_tc > edev->dev_info.num_tc)
595 netdev_reset_tc(ndev);
596 netdev_set_num_tc(ndev, num_tc);
598 for_each_cos_in_txq(edev, cos) {
599 count = QEDE_TSS_COUNT(edev);
600 offset = cos * QEDE_TSS_COUNT(edev);
601 netdev_set_tc_queue(ndev, cos, count, offset);
608 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
611 switch (f->command) {
612 case FLOW_CLS_REPLACE:
613 return qede_add_tc_flower_fltr(edev, proto, f);
614 case FLOW_CLS_DESTROY:
615 return qede_delete_flow_filter(edev, f->cookie);
621 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
624 struct flow_cls_offload *f;
625 struct qede_dev *edev = cb_priv;
627 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
631 case TC_SETUP_CLSFLOWER:
633 return qede_set_flower(edev, f, f->common.protocol);
639 static LIST_HEAD(qede_block_cb_list);
642 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
645 struct qede_dev *edev = netdev_priv(dev);
646 struct tc_mqprio_qopt *mqprio;
650 return flow_block_cb_setup_simple(type_data,
652 qede_setup_tc_block_cb,
654 case TC_SETUP_QDISC_MQPRIO:
657 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
658 return qede_setup_tc(dev, mqprio->num_tc);
664 static const struct net_device_ops qede_netdev_ops = {
665 .ndo_open = qede_open,
666 .ndo_stop = qede_close,
667 .ndo_start_xmit = qede_start_xmit,
668 .ndo_select_queue = qede_select_queue,
669 .ndo_set_rx_mode = qede_set_rx_mode,
670 .ndo_set_mac_address = qede_set_mac_addr,
671 .ndo_validate_addr = eth_validate_addr,
672 .ndo_change_mtu = qede_change_mtu,
673 .ndo_do_ioctl = qede_ioctl,
674 .ndo_tx_timeout = qede_tx_timeout,
675 #ifdef CONFIG_QED_SRIOV
676 .ndo_set_vf_mac = qede_set_vf_mac,
677 .ndo_set_vf_vlan = qede_set_vf_vlan,
678 .ndo_set_vf_trust = qede_set_vf_trust,
680 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
681 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
682 .ndo_fix_features = qede_fix_features,
683 .ndo_set_features = qede_set_features,
684 .ndo_get_stats64 = qede_get_stats64,
685 #ifdef CONFIG_QED_SRIOV
686 .ndo_set_vf_link_state = qede_set_vf_link_state,
687 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
688 .ndo_get_vf_config = qede_get_vf_config,
689 .ndo_set_vf_rate = qede_set_vf_rate,
691 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
692 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
693 .ndo_features_check = qede_features_check,
695 #ifdef CONFIG_RFS_ACCEL
696 .ndo_rx_flow_steer = qede_rx_flow_steer,
698 .ndo_setup_tc = qede_setup_tc_offload,
701 static const struct net_device_ops qede_netdev_vf_ops = {
702 .ndo_open = qede_open,
703 .ndo_stop = qede_close,
704 .ndo_start_xmit = qede_start_xmit,
705 .ndo_select_queue = qede_select_queue,
706 .ndo_set_rx_mode = qede_set_rx_mode,
707 .ndo_set_mac_address = qede_set_mac_addr,
708 .ndo_validate_addr = eth_validate_addr,
709 .ndo_change_mtu = qede_change_mtu,
710 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
711 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
712 .ndo_fix_features = qede_fix_features,
713 .ndo_set_features = qede_set_features,
714 .ndo_get_stats64 = qede_get_stats64,
715 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
716 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
717 .ndo_features_check = qede_features_check,
720 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
721 .ndo_open = qede_open,
722 .ndo_stop = qede_close,
723 .ndo_start_xmit = qede_start_xmit,
724 .ndo_select_queue = qede_select_queue,
725 .ndo_set_rx_mode = qede_set_rx_mode,
726 .ndo_set_mac_address = qede_set_mac_addr,
727 .ndo_validate_addr = eth_validate_addr,
728 .ndo_change_mtu = qede_change_mtu,
729 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
730 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
731 .ndo_fix_features = qede_fix_features,
732 .ndo_set_features = qede_set_features,
733 .ndo_get_stats64 = qede_get_stats64,
734 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
735 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
736 .ndo_features_check = qede_features_check,
740 /* -------------------------------------------------------------------------
741 * START OF PROBE / REMOVE
742 * -------------------------------------------------------------------------
745 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
746 struct pci_dev *pdev,
747 struct qed_dev_eth_info *info,
748 u32 dp_module, u8 dp_level)
750 struct net_device *ndev;
751 struct qede_dev *edev;
753 ndev = alloc_etherdev_mqs(sizeof(*edev),
754 info->num_queues * info->num_tc,
757 pr_err("etherdev allocation failed\n");
761 edev = netdev_priv(ndev);
765 edev->dp_module = dp_module;
766 edev->dp_level = dp_level;
769 if (is_kdump_kernel()) {
770 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
771 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
773 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
774 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
777 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
778 info->num_queues, info->num_queues);
780 SET_NETDEV_DEV(ndev, &pdev->dev);
782 memset(&edev->stats, 0, sizeof(edev->stats));
783 memcpy(&edev->dev_info, info, sizeof(*info));
785 /* As ethtool doesn't have the ability to show WoL behavior as
786 * 'default', if device supports it declare it's enabled.
788 if (edev->dev_info.common.wol_support)
789 edev->wol_enabled = true;
791 INIT_LIST_HEAD(&edev->vlan_list);
796 static void qede_init_ndev(struct qede_dev *edev)
798 struct net_device *ndev = edev->ndev;
799 struct pci_dev *pdev = edev->pdev;
800 bool udp_tunnel_enable = false;
801 netdev_features_t hw_features;
803 pci_set_drvdata(pdev, ndev);
805 ndev->mem_start = edev->dev_info.common.pci_mem_start;
806 ndev->base_addr = ndev->mem_start;
807 ndev->mem_end = edev->dev_info.common.pci_mem_end;
808 ndev->irq = edev->dev_info.common.pci_irq;
810 ndev->watchdog_timeo = TX_TIMEOUT;
813 if (edev->dev_info.xdp_supported)
814 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
816 ndev->netdev_ops = &qede_netdev_vf_ops;
818 ndev->netdev_ops = &qede_netdev_ops;
821 qede_set_ethtool_ops(ndev);
823 ndev->priv_flags |= IFF_UNICAST_FLT;
825 /* user-changeble features */
826 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
827 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
828 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
830 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
831 hw_features |= NETIF_F_NTUPLE;
833 if (edev->dev_info.common.vxlan_enable ||
834 edev->dev_info.common.geneve_enable)
835 udp_tunnel_enable = true;
837 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
838 hw_features |= NETIF_F_TSO_ECN;
839 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
840 NETIF_F_SG | NETIF_F_TSO |
841 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
845 if (udp_tunnel_enable) {
846 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
847 NETIF_F_GSO_UDP_TUNNEL_CSUM);
848 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
849 NETIF_F_GSO_UDP_TUNNEL_CSUM);
852 if (edev->dev_info.common.gre_enable) {
853 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
854 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
855 NETIF_F_GSO_GRE_CSUM);
858 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
860 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
861 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
862 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
864 ndev->hw_features = hw_features;
866 /* MTU range: 46 - 9600 */
867 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
868 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
870 /* Set network device HW mac */
871 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
873 ndev->mtu = edev->dev_info.common.mtu;
876 /* This function converts from 32b param to two params of level and module
877 * Input 32b decoding:
878 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
879 * 'happy' flow, e.g. memory allocation failed.
880 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
881 * and provide important parameters.
882 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
883 * module. VERBOSE prints are for tracking the specific flow in low level.
885 * Notice that the level should be that of the lowest required logs.
887 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
889 *p_dp_level = QED_LEVEL_NOTICE;
892 if (debug & QED_LOG_VERBOSE_MASK) {
893 *p_dp_level = QED_LEVEL_VERBOSE;
894 *p_dp_module = (debug & 0x3FFFFFFF);
895 } else if (debug & QED_LOG_INFO_MASK) {
896 *p_dp_level = QED_LEVEL_INFO;
897 } else if (debug & QED_LOG_NOTICE_MASK) {
898 *p_dp_level = QED_LEVEL_NOTICE;
902 static void qede_free_fp_array(struct qede_dev *edev)
904 if (edev->fp_array) {
905 struct qede_fastpath *fp;
909 fp = &edev->fp_array[i];
912 /* Handle mem alloc failure case where qede_init_fp
913 * didn't register xdp_rxq_info yet.
914 * Implicit only (fp->type & QEDE_FASTPATH_RX)
916 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
917 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
922 kfree(edev->fp_array);
925 edev->num_queues = 0;
930 static int qede_alloc_fp_array(struct qede_dev *edev)
932 u8 fp_combined, fp_rx = edev->fp_num_rx;
933 struct qede_fastpath *fp;
936 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
937 sizeof(*edev->fp_array), GFP_KERNEL);
938 if (!edev->fp_array) {
939 DP_NOTICE(edev, "fp array allocation failed\n");
943 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
945 /* Allocate the FP elements for Rx queues followed by combined and then
946 * the Tx. This ordering should be maintained so that the respective
947 * queues (Rx or Tx) will be together in the fastpath array and the
948 * associated ids will be sequential.
951 fp = &edev->fp_array[i];
953 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
955 DP_NOTICE(edev, "sb info struct allocation failed\n");
960 fp->type = QEDE_FASTPATH_RX;
962 } else if (fp_combined) {
963 fp->type = QEDE_FASTPATH_COMBINED;
966 fp->type = QEDE_FASTPATH_TX;
969 if (fp->type & QEDE_FASTPATH_TX) {
970 fp->txq = kcalloc(edev->dev_info.num_tc,
971 sizeof(*fp->txq), GFP_KERNEL);
976 if (fp->type & QEDE_FASTPATH_RX) {
977 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
981 if (edev->xdp_prog) {
982 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
986 fp->type |= QEDE_FASTPATH_XDP;
993 qede_free_fp_array(edev);
997 /* The qede lock is used to protect driver state change and driver flows that
1000 void __qede_lock(struct qede_dev *edev)
1002 mutex_lock(&edev->qede_lock);
1005 void __qede_unlock(struct qede_dev *edev)
1007 mutex_unlock(&edev->qede_lock);
1010 /* This version of the lock should be used when acquiring the RTNL lock is also
1011 * needed in addition to the internal qede lock.
1013 static void qede_lock(struct qede_dev *edev)
1019 static void qede_unlock(struct qede_dev *edev)
1021 __qede_unlock(edev);
1025 static void qede_sp_task(struct work_struct *work)
1027 struct qede_dev *edev = container_of(work, struct qede_dev,
1030 /* The locking scheme depends on the specific flag:
1031 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1032 * ensure that ongoing flows are ended and new ones are not started.
1033 * In other cases - only the internal qede lock should be acquired.
1036 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1037 #ifdef CONFIG_QED_SRIOV
1038 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1039 * The recovery of the active VFs is currently not supported.
1041 if (pci_num_vf(edev->pdev))
1042 qede_sriov_configure(edev->pdev, 0);
1045 qede_recovery_handler(edev);
1051 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1052 if (edev->state == QEDE_STATE_OPEN)
1053 qede_config_rx_mode(edev->ndev);
1055 #ifdef CONFIG_RFS_ACCEL
1056 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1057 if (edev->state == QEDE_STATE_OPEN)
1058 qede_process_arfs_filters(edev, false);
1061 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1062 qede_generic_hw_err_handler(edev);
1063 __qede_unlock(edev);
1065 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1066 #ifdef CONFIG_QED_SRIOV
1067 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1068 * The recovery of the active VFs is currently not supported.
1070 if (pci_num_vf(edev->pdev))
1071 qede_sriov_configure(edev->pdev, 0);
1073 edev->ops->common->recovery_process(edev->cdev);
1077 static void qede_update_pf_params(struct qed_dev *cdev)
1079 struct qed_pf_params pf_params;
1082 /* 64 rx + 64 tx + 64 XDP */
1083 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1085 /* 1 rx + 1 xdp + max tx cos */
1086 num_cons = QED_MIN_L2_CONS;
1088 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1090 /* Same for VFs - make sure they'll have sufficient connections
1091 * to support XDP Tx queues.
1093 pf_params.eth_pf_params.num_vf_cons = 48;
1095 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1096 qed_ops->common->update_pf_params(cdev, &pf_params);
1099 #define QEDE_FW_VER_STR_SIZE 80
1101 static void qede_log_probe(struct qede_dev *edev)
1103 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1104 u8 buf[QEDE_FW_VER_STR_SIZE];
1107 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1108 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1109 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1111 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1112 QED_MFW_VERSION_3_OFFSET,
1113 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1114 QED_MFW_VERSION_2_OFFSET,
1115 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1116 QED_MFW_VERSION_1_OFFSET,
1117 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1118 QED_MFW_VERSION_0_OFFSET);
1120 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1121 if (p_dev_info->mbi_version && left_size)
1122 snprintf(buf + strlen(buf), left_size,
1124 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1125 QED_MBI_VERSION_2_OFFSET,
1126 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1127 QED_MBI_VERSION_1_OFFSET,
1128 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1129 QED_MBI_VERSION_0_OFFSET);
1131 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1132 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1133 buf, edev->ndev->name);
1136 enum qede_probe_mode {
1138 QEDE_PROBE_RECOVERY,
1141 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1142 bool is_vf, enum qede_probe_mode mode)
1144 struct qed_probe_params probe_params;
1145 struct qed_slowpath_params sp_params;
1146 struct qed_dev_eth_info dev_info;
1147 struct qede_dev *edev;
1148 struct qed_dev *cdev;
1151 if (unlikely(dp_level & QED_LEVEL_INFO))
1152 pr_notice("Starting qede probe\n");
1154 memset(&probe_params, 0, sizeof(probe_params));
1155 probe_params.protocol = QED_PROTOCOL_ETH;
1156 probe_params.dp_module = dp_module;
1157 probe_params.dp_level = dp_level;
1158 probe_params.is_vf = is_vf;
1159 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1160 cdev = qed_ops->common->probe(pdev, &probe_params);
1166 qede_update_pf_params(cdev);
1168 /* Start the Slowpath-process */
1169 memset(&sp_params, 0, sizeof(sp_params));
1170 sp_params.int_mode = QED_INT_MODE_MSIX;
1171 sp_params.drv_major = QEDE_MAJOR_VERSION;
1172 sp_params.drv_minor = QEDE_MINOR_VERSION;
1173 sp_params.drv_rev = QEDE_REVISION_VERSION;
1174 sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
1175 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1176 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1178 pr_notice("Cannot start slowpath\n");
1182 /* Learn information crucial for qede to progress */
1183 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1187 if (mode != QEDE_PROBE_RECOVERY) {
1188 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1195 struct net_device *ndev = pci_get_drvdata(pdev);
1197 edev = netdev_priv(ndev);
1199 memset(&edev->stats, 0, sizeof(edev->stats));
1200 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1204 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1206 qede_init_ndev(edev);
1208 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1212 if (mode != QEDE_PROBE_RECOVERY) {
1213 /* Prepare the lock prior to the registration of the netdev,
1214 * as once it's registered we might reach flows requiring it
1215 * [it's even possible to reach a flow needing it directly
1216 * from there, although it's unlikely].
1218 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1219 mutex_init(&edev->qede_lock);
1221 rc = register_netdev(edev->ndev);
1223 DP_NOTICE(edev, "Cannot register net-device\n");
1228 edev->ops->common->set_name(cdev, edev->ndev->name);
1230 /* PTP not supported on VFs */
1232 qede_ptp_enable(edev);
1234 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1238 qede_set_dcbnl_ops(edev->ndev);
1241 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1243 qede_log_probe(edev);
1247 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1249 free_netdev(edev->ndev);
1251 qed_ops->common->slowpath_stop(cdev);
1253 qed_ops->common->remove(cdev);
1258 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1264 switch ((enum qede_pci_private)id->driver_data) {
1265 case QEDE_PRIVATE_VF:
1266 if (debug & QED_LOG_VERBOSE_MASK)
1267 dev_err(&pdev->dev, "Probing a VF\n");
1271 if (debug & QED_LOG_VERBOSE_MASK)
1272 dev_err(&pdev->dev, "Probing a PF\n");
1275 qede_config_debug(debug, &dp_module, &dp_level);
1277 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1281 enum qede_remove_mode {
1283 QEDE_REMOVE_RECOVERY,
1286 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1288 struct net_device *ndev = pci_get_drvdata(pdev);
1289 struct qede_dev *edev;
1290 struct qed_dev *cdev;
1293 dev_info(&pdev->dev, "Device has already been removed\n");
1297 edev = netdev_priv(ndev);
1300 DP_INFO(edev, "Starting qede_remove\n");
1302 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1304 if (mode != QEDE_REMOVE_RECOVERY) {
1305 unregister_netdev(ndev);
1307 cancel_delayed_work_sync(&edev->sp_task);
1309 edev->ops->common->set_power_state(cdev, PCI_D0);
1311 pci_set_drvdata(pdev, NULL);
1314 qede_ptp_disable(edev);
1316 /* Use global ops since we've freed edev */
1317 qed_ops->common->slowpath_stop(cdev);
1318 if (system_state == SYSTEM_POWER_OFF)
1320 qed_ops->common->remove(cdev);
1323 /* Since this can happen out-of-sync with other flows,
1324 * don't release the netdevice until after slowpath stop
1325 * has been called to guarantee various other contexts
1326 * [e.g., QED register callbacks] won't break anything when
1327 * accessing the netdevice.
1329 if (mode != QEDE_REMOVE_RECOVERY)
1332 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1335 static void qede_remove(struct pci_dev *pdev)
1337 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1340 static void qede_shutdown(struct pci_dev *pdev)
1342 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1345 /* -------------------------------------------------------------------------
1346 * START OF LOAD / UNLOAD
1347 * -------------------------------------------------------------------------
1350 static int qede_set_num_queues(struct qede_dev *edev)
1355 /* Setup queues according to possible resources*/
1356 if (edev->req_queues)
1357 rss_num = edev->req_queues;
1359 rss_num = netif_get_num_default_rss_queues() *
1360 edev->dev_info.common.num_hwfns;
1362 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1364 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1366 /* Managed to request interrupts for our queues */
1367 edev->num_queues = rc;
1368 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1369 QEDE_QUEUE_CNT(edev), rss_num);
1373 edev->fp_num_tx = edev->req_num_tx;
1374 edev->fp_num_rx = edev->req_num_rx;
1379 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1382 if (sb_info->sb_virt) {
1383 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1384 QED_SB_TYPE_L2_QUEUE);
1385 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1386 (void *)sb_info->sb_virt, sb_info->sb_phys);
1387 memset(sb_info, 0, sizeof(*sb_info));
1391 /* This function allocates fast-path status block memory */
1392 static int qede_alloc_mem_sb(struct qede_dev *edev,
1393 struct qed_sb_info *sb_info, u16 sb_id)
1395 struct status_block_e4 *sb_virt;
1399 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1400 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1402 DP_ERR(edev, "Status block allocation failed\n");
1406 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1407 sb_virt, sb_phys, sb_id,
1408 QED_SB_TYPE_L2_QUEUE);
1410 DP_ERR(edev, "Status block initialization failed\n");
1411 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1419 static void qede_free_rx_buffers(struct qede_dev *edev,
1420 struct qede_rx_queue *rxq)
1424 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1425 struct sw_rx_data *rx_buf;
1428 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1429 data = rx_buf->data;
1431 dma_unmap_page(&edev->pdev->dev,
1432 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1434 rx_buf->data = NULL;
1439 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1441 /* Free rx buffers */
1442 qede_free_rx_buffers(edev, rxq);
1444 /* Free the parallel SW ring */
1445 kfree(rxq->sw_rx_ring);
1447 /* Free the real RQ ring used by FW */
1448 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1449 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1452 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1456 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1457 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1459 tpa_info->state = QEDE_AGG_STATE_NONE;
1463 /* This function allocates all memory needed per Rx queue */
1464 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1468 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1470 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1472 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1473 size = rxq->rx_headroom +
1474 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1476 /* Make sure that the headroom and payload fit in a single page */
1477 if (rxq->rx_buf_size + size > PAGE_SIZE)
1478 rxq->rx_buf_size = PAGE_SIZE - size;
1480 /* Segment size to split a page in multiple equal parts,
1481 * unless XDP is used in which case we'd use the entire page.
1483 if (!edev->xdp_prog) {
1484 size = size + rxq->rx_buf_size;
1485 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1487 rxq->rx_buf_seg_size = PAGE_SIZE;
1488 edev->ndev->features &= ~NETIF_F_GRO_HW;
1491 /* Allocate the parallel driver ring for Rx buffers */
1492 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1493 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1494 if (!rxq->sw_rx_ring) {
1495 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1500 /* Allocate FW Rx ring */
1501 rc = edev->ops->common->chain_alloc(edev->cdev,
1502 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1503 QED_CHAIN_MODE_NEXT_PTR,
1504 QED_CHAIN_CNT_TYPE_U16,
1506 sizeof(struct eth_rx_bd),
1507 &rxq->rx_bd_ring, NULL);
1511 /* Allocate FW completion ring */
1512 rc = edev->ops->common->chain_alloc(edev->cdev,
1513 QED_CHAIN_USE_TO_CONSUME,
1515 QED_CHAIN_CNT_TYPE_U16,
1517 sizeof(union eth_rx_cqe),
1518 &rxq->rx_comp_ring, NULL);
1522 /* Allocate buffers for the Rx ring */
1523 rxq->filled_buffers = 0;
1524 for (i = 0; i < rxq->num_rx_buffers; i++) {
1525 rc = qede_alloc_rx_buffer(rxq, false);
1528 "Rx buffers allocation failed at index %d\n", i);
1533 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1534 if (!edev->gro_disable)
1535 qede_set_tpa_param(rxq);
1540 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1542 /* Free the parallel SW ring */
1544 kfree(txq->sw_tx_ring.xdp);
1546 kfree(txq->sw_tx_ring.skbs);
1548 /* Free the real RQ ring used by FW */
1549 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1552 /* This function allocates all memory needed per Tx queue */
1553 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1555 union eth_tx_bd_types *p_virt;
1558 txq->num_tx_buffers = edev->q_num_tx_buffers;
1560 /* Allocate the parallel driver ring for Tx buffers */
1562 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1563 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1564 if (!txq->sw_tx_ring.xdp)
1567 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1568 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1569 if (!txq->sw_tx_ring.skbs)
1573 rc = edev->ops->common->chain_alloc(edev->cdev,
1574 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1576 QED_CHAIN_CNT_TYPE_U16,
1577 txq->num_tx_buffers,
1579 &txq->tx_pbl, NULL);
1586 qede_free_mem_txq(edev, txq);
1590 /* This function frees all memory of a single fp */
1591 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1593 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1595 if (fp->type & QEDE_FASTPATH_RX)
1596 qede_free_mem_rxq(edev, fp->rxq);
1598 if (fp->type & QEDE_FASTPATH_XDP)
1599 qede_free_mem_txq(edev, fp->xdp_tx);
1601 if (fp->type & QEDE_FASTPATH_TX) {
1604 for_each_cos_in_txq(edev, cos)
1605 qede_free_mem_txq(edev, &fp->txq[cos]);
1609 /* This function allocates all memory needed for a single fp (i.e. an entity
1610 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1612 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1616 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1620 if (fp->type & QEDE_FASTPATH_RX) {
1621 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1626 if (fp->type & QEDE_FASTPATH_XDP) {
1627 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1632 if (fp->type & QEDE_FASTPATH_TX) {
1635 for_each_cos_in_txq(edev, cos) {
1636 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1646 static void qede_free_mem_load(struct qede_dev *edev)
1651 struct qede_fastpath *fp = &edev->fp_array[i];
1653 qede_free_mem_fp(edev, fp);
1657 /* This function allocates all qede memory at NIC load. */
1658 static int qede_alloc_mem_load(struct qede_dev *edev)
1660 int rc = 0, queue_id;
1662 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1663 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1665 rc = qede_alloc_mem_fp(edev, fp);
1668 "Failed to allocate memory for fastpath - rss id = %d\n",
1670 qede_free_mem_load(edev);
1678 static void qede_empty_tx_queue(struct qede_dev *edev,
1679 struct qede_tx_queue *txq)
1681 unsigned int pkts_compl = 0, bytes_compl = 0;
1682 struct netdev_queue *netdev_txq;
1685 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1687 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1688 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1689 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1690 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1691 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1692 qed_chain_get_prod_idx(&txq->tx_pbl));
1694 rc = qede_free_tx_pkt(edev, txq, &len);
1697 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1699 qed_chain_get_cons_idx(&txq->tx_pbl),
1700 qed_chain_get_prod_idx(&txq->tx_pbl));
1709 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1712 static void qede_empty_tx_queues(struct qede_dev *edev)
1717 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1720 for_each_cos_in_txq(edev, cos) {
1721 struct qede_fastpath *fp;
1723 fp = &edev->fp_array[i];
1724 qede_empty_tx_queue(edev,
1730 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1731 static void qede_init_fp(struct qede_dev *edev)
1733 int queue_id, rxq_index = 0, txq_index = 0;
1734 struct qede_fastpath *fp;
1736 for_each_queue(queue_id) {
1737 fp = &edev->fp_array[queue_id];
1742 if (fp->type & QEDE_FASTPATH_XDP) {
1743 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1745 fp->xdp_tx->is_xdp = 1;
1748 if (fp->type & QEDE_FASTPATH_RX) {
1749 fp->rxq->rxq_id = rxq_index++;
1751 /* Determine how to map buffers for this queue */
1752 if (fp->type & QEDE_FASTPATH_XDP)
1753 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1755 fp->rxq->data_direction = DMA_FROM_DEVICE;
1756 fp->rxq->dev = &edev->pdev->dev;
1758 /* Driver have no error path from here */
1759 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1760 fp->rxq->rxq_id) < 0);
1763 if (fp->type & QEDE_FASTPATH_TX) {
1766 for_each_cos_in_txq(edev, cos) {
1767 struct qede_tx_queue *txq = &fp->txq[cos];
1771 txq->index = txq_index;
1772 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1773 txq->ndev_txq_id = ndev_tx_id;
1775 if (edev->dev_info.is_legacy)
1776 txq->is_legacy = true;
1777 txq->dev = &edev->pdev->dev;
1783 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1784 edev->ndev->name, queue_id);
1788 static int qede_set_real_num_queues(struct qede_dev *edev)
1792 rc = netif_set_real_num_tx_queues(edev->ndev,
1793 QEDE_TSS_COUNT(edev) *
1794 edev->dev_info.num_tc);
1796 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1800 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1802 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1809 static void qede_napi_disable_remove(struct qede_dev *edev)
1814 napi_disable(&edev->fp_array[i].napi);
1816 netif_napi_del(&edev->fp_array[i].napi);
1820 static void qede_napi_add_enable(struct qede_dev *edev)
1824 /* Add NAPI objects */
1826 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1827 qede_poll, NAPI_POLL_WEIGHT);
1828 napi_enable(&edev->fp_array[i].napi);
1832 static void qede_sync_free_irqs(struct qede_dev *edev)
1836 for (i = 0; i < edev->int_info.used_cnt; i++) {
1837 if (edev->int_info.msix_cnt) {
1838 synchronize_irq(edev->int_info.msix[i].vector);
1839 free_irq(edev->int_info.msix[i].vector,
1840 &edev->fp_array[i]);
1842 edev->ops->common->simd_handler_clean(edev->cdev, i);
1846 edev->int_info.used_cnt = 0;
1849 static int qede_req_msix_irqs(struct qede_dev *edev)
1853 /* Sanitize number of interrupts == number of prepared RSS queues */
1854 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1856 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1857 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1861 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1862 #ifdef CONFIG_RFS_ACCEL
1863 struct qede_fastpath *fp = &edev->fp_array[i];
1865 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1866 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1867 edev->int_info.msix[i].vector);
1869 DP_ERR(edev, "Failed to add CPU rmap\n");
1870 qede_free_arfs(edev);
1874 rc = request_irq(edev->int_info.msix[i].vector,
1875 qede_msix_fp_int, 0, edev->fp_array[i].name,
1876 &edev->fp_array[i]);
1878 DP_ERR(edev, "Request fp %d irq failed\n", i);
1879 qede_sync_free_irqs(edev);
1882 DP_VERBOSE(edev, NETIF_MSG_INTR,
1883 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1884 edev->fp_array[i].name, i,
1885 &edev->fp_array[i]);
1886 edev->int_info.used_cnt++;
1892 static void qede_simd_fp_handler(void *cookie)
1894 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1896 napi_schedule_irqoff(&fp->napi);
1899 static int qede_setup_irqs(struct qede_dev *edev)
1903 /* Learn Interrupt configuration */
1904 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1908 if (edev->int_info.msix_cnt) {
1909 rc = qede_req_msix_irqs(edev);
1912 edev->ndev->irq = edev->int_info.msix[0].vector;
1914 const struct qed_common_ops *ops;
1916 /* qed should learn receive the RSS ids and callbacks */
1917 ops = edev->ops->common;
1918 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1919 ops->simd_handler_config(edev->cdev,
1920 &edev->fp_array[i], i,
1921 qede_simd_fp_handler);
1922 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1927 static int qede_drain_txq(struct qede_dev *edev,
1928 struct qede_tx_queue *txq, bool allow_drain)
1932 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1936 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1938 rc = edev->ops->common->drain(edev->cdev);
1941 return qede_drain_txq(edev, txq, false);
1944 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1945 txq->index, txq->sw_tx_prod,
1950 usleep_range(1000, 2000);
1954 /* FW finished processing, wait for HW to transmit all tx packets */
1955 usleep_range(1000, 2000);
1960 static int qede_stop_txq(struct qede_dev *edev,
1961 struct qede_tx_queue *txq, int rss_id)
1963 /* delete doorbell from doorbell recovery mechanism */
1964 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
1967 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1970 static int qede_stop_queues(struct qede_dev *edev)
1972 struct qed_update_vport_params *vport_update_params;
1973 struct qed_dev *cdev = edev->cdev;
1974 struct qede_fastpath *fp;
1977 /* Disable the vport */
1978 vport_update_params = vzalloc(sizeof(*vport_update_params));
1979 if (!vport_update_params)
1982 vport_update_params->vport_id = 0;
1983 vport_update_params->update_vport_active_flg = 1;
1984 vport_update_params->vport_active_flg = 0;
1985 vport_update_params->update_rss_flg = 0;
1987 rc = edev->ops->vport_update(cdev, vport_update_params);
1988 vfree(vport_update_params);
1991 DP_ERR(edev, "Failed to update vport\n");
1995 /* Flush Tx queues. If needed, request drain from MCP */
1997 fp = &edev->fp_array[i];
1999 if (fp->type & QEDE_FASTPATH_TX) {
2002 for_each_cos_in_txq(edev, cos) {
2003 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2009 if (fp->type & QEDE_FASTPATH_XDP) {
2010 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2016 /* Stop all Queues in reverse order */
2017 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2018 fp = &edev->fp_array[i];
2020 /* Stop the Tx Queue(s) */
2021 if (fp->type & QEDE_FASTPATH_TX) {
2024 for_each_cos_in_txq(edev, cos) {
2025 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2031 /* Stop the Rx Queue */
2032 if (fp->type & QEDE_FASTPATH_RX) {
2033 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2035 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2040 /* Stop the XDP forwarding queue */
2041 if (fp->type & QEDE_FASTPATH_XDP) {
2042 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2046 bpf_prog_put(fp->rxq->xdp_prog);
2050 /* Stop the vport */
2051 rc = edev->ops->vport_stop(cdev, 0);
2053 DP_ERR(edev, "Failed to stop VPORT\n");
2058 static int qede_start_txq(struct qede_dev *edev,
2059 struct qede_fastpath *fp,
2060 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2062 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2063 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2064 struct qed_queue_start_common_params params;
2065 struct qed_txq_start_ret_params ret_params;
2068 memset(¶ms, 0, sizeof(params));
2069 memset(&ret_params, 0, sizeof(ret_params));
2071 /* Let the XDP queue share the queue-zone with one of the regular txq.
2072 * We don't really care about its coalescing.
2075 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2077 params.queue_id = txq->index;
2079 params.p_sb = fp->sb_info;
2080 params.sb_idx = sb_idx;
2081 params.tc = txq->cos;
2083 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2084 page_cnt, &ret_params);
2086 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2090 txq->doorbell_addr = ret_params.p_doorbell;
2091 txq->handle = ret_params.p_handle;
2093 /* Determine the FW consumer address associated */
2094 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2096 /* Prepare the doorbell parameters */
2097 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2098 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2099 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2100 DQ_XCM_ETH_TX_BD_PROD_CMD);
2101 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2103 /* register doorbell with doorbell recovery mechanism */
2104 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2105 &txq->tx_db, DB_REC_WIDTH_32B,
2111 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2113 int vlan_removal_en = 1;
2114 struct qed_dev *cdev = edev->cdev;
2115 struct qed_dev_info *qed_info = &edev->dev_info.common;
2116 struct qed_update_vport_params *vport_update_params;
2117 struct qed_queue_start_common_params q_params;
2118 struct qed_start_vport_params start = {0};
2121 if (!edev->num_queues) {
2123 "Cannot update V-VPORT as active as there are no Rx queues\n");
2127 vport_update_params = vzalloc(sizeof(*vport_update_params));
2128 if (!vport_update_params)
2131 start.handle_ptp_pkts = !!(edev->ptp);
2132 start.gro_enable = !edev->gro_disable;
2133 start.mtu = edev->ndev->mtu;
2135 start.drop_ttl0 = true;
2136 start.remove_inner_vlan = vlan_removal_en;
2137 start.clear_stats = clear_stats;
2139 rc = edev->ops->vport_start(cdev, &start);
2142 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2146 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2147 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2148 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2151 struct qede_fastpath *fp = &edev->fp_array[i];
2152 dma_addr_t p_phys_table;
2155 if (fp->type & QEDE_FASTPATH_RX) {
2156 struct qed_rxq_start_ret_params ret_params;
2157 struct qede_rx_queue *rxq = fp->rxq;
2160 memset(&ret_params, 0, sizeof(ret_params));
2161 memset(&q_params, 0, sizeof(q_params));
2162 q_params.queue_id = rxq->rxq_id;
2163 q_params.vport_id = 0;
2164 q_params.p_sb = fp->sb_info;
2165 q_params.sb_idx = RX_PI;
2168 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2169 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2171 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2173 rxq->rx_bd_ring.p_phys_addr,
2175 page_cnt, &ret_params);
2177 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2182 /* Use the return parameters */
2183 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2184 rxq->handle = ret_params.p_handle;
2186 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2187 rxq->hw_cons_ptr = val;
2189 qede_update_rx_prod(edev, rxq);
2192 if (fp->type & QEDE_FASTPATH_XDP) {
2193 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2197 bpf_prog_add(edev->xdp_prog, 1);
2198 fp->rxq->xdp_prog = edev->xdp_prog;
2201 if (fp->type & QEDE_FASTPATH_TX) {
2204 for_each_cos_in_txq(edev, cos) {
2205 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2213 /* Prepare and send the vport enable */
2214 vport_update_params->vport_id = start.vport_id;
2215 vport_update_params->update_vport_active_flg = 1;
2216 vport_update_params->vport_active_flg = 1;
2218 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2219 qed_info->tx_switching) {
2220 vport_update_params->update_tx_switching_flg = 1;
2221 vport_update_params->tx_switching_flg = 1;
2224 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2225 &vport_update_params->update_rss_flg);
2227 rc = edev->ops->vport_update(cdev, vport_update_params);
2229 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2232 vfree(vport_update_params);
2236 enum qede_unload_mode {
2238 QEDE_UNLOAD_RECOVERY,
2241 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2244 struct qed_link_params link_params;
2247 DP_INFO(edev, "Starting qede unload\n");
2252 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2254 if (mode != QEDE_UNLOAD_RECOVERY)
2255 edev->state = QEDE_STATE_CLOSED;
2257 qede_rdma_dev_event_close(edev);
2260 netif_tx_disable(edev->ndev);
2261 netif_carrier_off(edev->ndev);
2263 if (mode != QEDE_UNLOAD_RECOVERY) {
2264 /* Reset the link */
2265 memset(&link_params, 0, sizeof(link_params));
2266 link_params.link_up = false;
2267 edev->ops->common->set_link(edev->cdev, &link_params);
2269 rc = qede_stop_queues(edev);
2271 qede_sync_free_irqs(edev);
2275 DP_INFO(edev, "Stopped Queues\n");
2278 qede_vlan_mark_nonconfigured(edev);
2279 edev->ops->fastpath_stop(edev->cdev);
2281 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
2282 qede_poll_for_freeing_arfs_filters(edev);
2283 qede_free_arfs(edev);
2286 /* Release the interrupts */
2287 qede_sync_free_irqs(edev);
2288 edev->ops->common->set_fp_int(edev->cdev, 0);
2290 qede_napi_disable_remove(edev);
2292 if (mode == QEDE_UNLOAD_RECOVERY)
2293 qede_empty_tx_queues(edev);
2295 qede_free_mem_load(edev);
2296 qede_free_fp_array(edev);
2300 __qede_unlock(edev);
2302 if (mode != QEDE_UNLOAD_RECOVERY)
2303 DP_NOTICE(edev, "Link is down\n");
2305 edev->ptp_skip_txts = 0;
2307 DP_INFO(edev, "Ending qede unload\n");
2310 enum qede_load_mode {
2316 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2319 struct qed_link_params link_params;
2323 DP_INFO(edev, "Starting qede load\n");
2328 rc = qede_set_num_queues(edev);
2332 rc = qede_alloc_fp_array(edev);
2338 rc = qede_alloc_mem_load(edev);
2341 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2342 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2344 rc = qede_set_real_num_queues(edev);
2348 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
2349 rc = qede_alloc_arfs(edev);
2351 DP_NOTICE(edev, "aRFS memory allocation failed\n");
2354 qede_napi_add_enable(edev);
2355 DP_INFO(edev, "Napi added and enabled\n");
2357 rc = qede_setup_irqs(edev);
2360 DP_INFO(edev, "Setup IRQs succeeded\n");
2362 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2365 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2367 num_tc = netdev_get_num_tc(edev->ndev);
2368 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2369 qede_setup_tc(edev->ndev, num_tc);
2371 /* Program un-configured VLANs */
2372 qede_configure_vlan_filters(edev);
2374 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2376 /* Ask for link-up using current configuration */
2377 memset(&link_params, 0, sizeof(link_params));
2378 link_params.link_up = true;
2379 edev->ops->common->set_link(edev->cdev, &link_params);
2381 edev->state = QEDE_STATE_OPEN;
2383 DP_INFO(edev, "Ending successfully qede load\n");
2387 qede_sync_free_irqs(edev);
2388 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2390 qede_napi_disable_remove(edev);
2392 qede_free_mem_load(edev);
2394 edev->ops->common->set_fp_int(edev->cdev, 0);
2395 qede_free_fp_array(edev);
2396 edev->num_queues = 0;
2397 edev->fp_num_tx = 0;
2398 edev->fp_num_rx = 0;
2401 __qede_unlock(edev);
2406 /* 'func' should be able to run between unload and reload assuming interface
2407 * is actually running, or afterwards in case it's currently DOWN.
2409 void qede_reload(struct qede_dev *edev,
2410 struct qede_reload_args *args, bool is_locked)
2415 /* Since qede_lock is held, internal state wouldn't change even
2416 * if netdev state would start transitioning. Check whether current
2417 * internal configuration indicates device is up, then reload.
2419 if (edev->state == QEDE_STATE_OPEN) {
2420 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2422 args->func(edev, args);
2423 qede_load(edev, QEDE_LOAD_RELOAD, true);
2425 /* Since no one is going to do it for us, re-configure */
2426 qede_config_rx_mode(edev->ndev);
2428 args->func(edev, args);
2432 __qede_unlock(edev);
2435 /* called with rtnl_lock */
2436 static int qede_open(struct net_device *ndev)
2438 struct qede_dev *edev = netdev_priv(ndev);
2441 netif_carrier_off(ndev);
2443 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2445 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2449 udp_tunnel_get_rx_info(ndev);
2451 edev->ops->common->update_drv_state(edev->cdev, true);
2456 static int qede_close(struct net_device *ndev)
2458 struct qede_dev *edev = netdev_priv(ndev);
2460 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2462 edev->ops->common->update_drv_state(edev->cdev, false);
2467 static void qede_link_update(void *dev, struct qed_link_output *link)
2469 struct qede_dev *edev = dev;
2471 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2472 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2476 if (link->link_up) {
2477 if (!netif_carrier_ok(edev->ndev)) {
2478 DP_NOTICE(edev, "Link is up\n");
2479 netif_tx_start_all_queues(edev->ndev);
2480 netif_carrier_on(edev->ndev);
2481 qede_rdma_dev_event_open(edev);
2484 if (netif_carrier_ok(edev->ndev)) {
2485 DP_NOTICE(edev, "Link is down\n");
2486 netif_tx_disable(edev->ndev);
2487 netif_carrier_off(edev->ndev);
2488 qede_rdma_dev_event_close(edev);
2493 static void qede_schedule_recovery_handler(void *dev)
2495 struct qede_dev *edev = dev;
2497 if (edev->state == QEDE_STATE_RECOVERY) {
2499 "Avoid scheduling a recovery handling since already in recovery state\n");
2503 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2504 schedule_delayed_work(&edev->sp_task, 0);
2506 DP_INFO(edev, "Scheduled a recovery handler\n");
2509 static void qede_recovery_failed(struct qede_dev *edev)
2511 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2513 netif_device_detach(edev->ndev);
2516 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2519 static void qede_recovery_handler(struct qede_dev *edev)
2521 u32 curr_state = edev->state;
2524 DP_NOTICE(edev, "Starting a recovery process\n");
2526 /* No need to acquire first the qede_lock since is done by qede_sp_task
2527 * before calling this function.
2529 edev->state = QEDE_STATE_RECOVERY;
2531 edev->ops->common->recovery_prolog(edev->cdev);
2533 if (curr_state == QEDE_STATE_OPEN)
2534 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2536 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2538 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2539 IS_VF(edev), QEDE_PROBE_RECOVERY);
2545 if (curr_state == QEDE_STATE_OPEN) {
2546 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2550 qede_config_rx_mode(edev->ndev);
2551 udp_tunnel_get_rx_info(edev->ndev);
2554 edev->state = curr_state;
2556 DP_NOTICE(edev, "Recovery handling is done\n");
2561 qede_recovery_failed(edev);
2564 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2566 struct qed_dev *cdev = edev->cdev;
2569 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2572 /* Get a call trace of the flow that led to the error */
2573 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2575 /* Prevent HW attentions from being reasserted */
2576 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2577 edev->ops->common->attn_clr_enable(cdev, true);
2579 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2582 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2584 struct qed_dev *cdev = edev->cdev;
2587 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2590 /* Trigger a recovery process.
2591 * This is placed in the sleep requiring section just to make
2592 * sure it is the last one, and that all the other operations
2595 if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
2596 edev->ops->common->recovery_process(cdev);
2598 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2600 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2603 static void qede_set_hw_err_flags(struct qede_dev *edev,
2604 enum qed_hw_err_type err_type)
2606 unsigned long err_flags = 0;
2609 case QED_HW_ERR_DMAE_FAIL:
2610 set_bit(QEDE_ERR_WARN, &err_flags);
2612 case QED_HW_ERR_MFW_RESP_FAIL:
2613 case QED_HW_ERR_HW_ATTN:
2614 case QED_HW_ERR_RAMROD_FAIL:
2615 case QED_HW_ERR_FW_ASSERT:
2616 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2617 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2621 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2625 edev->err_flags |= err_flags;
2628 static void qede_schedule_hw_err_handler(void *dev,
2629 enum qed_hw_err_type err_type)
2631 struct qede_dev *edev = dev;
2633 /* Fan failure cannot be masked by handling of another HW error or by a
2634 * concurrent recovery process.
2636 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2637 edev->state == QEDE_STATE_RECOVERY) &&
2638 err_type != QED_HW_ERR_FAN_FAIL) {
2640 "Avoid scheduling an error handling while another HW error is being handled\n");
2644 if (err_type >= QED_HW_ERR_LAST) {
2645 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2646 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2650 qede_set_hw_err_flags(edev, err_type);
2651 qede_atomic_hw_err_handler(edev);
2652 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2653 schedule_delayed_work(&edev->sp_task, 0);
2655 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2658 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2660 struct netdev_queue *netdev_txq;
2662 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2663 if (netif_xmit_stopped(netdev_txq))
2669 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2671 struct qede_dev *edev = dev;
2672 struct netdev_hw_addr *ha;
2675 if (edev->ndev->features & NETIF_F_IP_CSUM)
2676 data->feat_flags |= QED_TLV_IP_CSUM;
2677 if (edev->ndev->features & NETIF_F_TSO)
2678 data->feat_flags |= QED_TLV_LSO;
2680 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2681 memset(data->mac[1], 0, ETH_ALEN);
2682 memset(data->mac[2], 0, ETH_ALEN);
2683 /* Copy the first two UC macs */
2684 netif_addr_lock_bh(edev->ndev);
2686 netdev_for_each_uc_addr(ha, edev->ndev) {
2687 ether_addr_copy(data->mac[i++], ha->addr);
2688 if (i == QED_TLV_MAC_COUNT)
2692 netif_addr_unlock_bh(edev->ndev);
2695 static void qede_get_eth_tlv_data(void *dev, void *data)
2697 struct qed_mfw_tlv_eth *etlv = data;
2698 struct qede_dev *edev = dev;
2699 struct qede_fastpath *fp;
2702 etlv->lso_maxoff_size = 0XFFFF;
2703 etlv->lso_maxoff_size_set = true;
2704 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2705 etlv->lso_minseg_size_set = true;
2706 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2707 etlv->prom_mode_set = true;
2708 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2709 etlv->tx_descr_size_set = true;
2710 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2711 etlv->rx_descr_size_set = true;
2712 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2713 etlv->iov_offload_set = true;
2715 /* Fill information regarding queues; Should be done under the qede
2716 * lock to guarantee those don't change beneath our feet.
2718 etlv->txqs_empty = true;
2719 etlv->rxqs_empty = true;
2720 etlv->num_txqs_full = 0;
2721 etlv->num_rxqs_full = 0;
2725 fp = &edev->fp_array[i];
2726 if (fp->type & QEDE_FASTPATH_TX) {
2727 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2729 if (txq->sw_tx_cons != txq->sw_tx_prod)
2730 etlv->txqs_empty = false;
2731 if (qede_is_txq_full(edev, txq))
2732 etlv->num_txqs_full++;
2734 if (fp->type & QEDE_FASTPATH_RX) {
2735 if (qede_has_rx_work(fp->rxq))
2736 etlv->rxqs_empty = false;
2738 /* This one is a bit tricky; Firmware might stop
2739 * placing packets if ring is not yet full.
2740 * Give an approximation.
2742 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2743 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2745 etlv->num_rxqs_full++;
2748 __qede_unlock(edev);
2750 etlv->txqs_empty_set = true;
2751 etlv->rxqs_empty_set = true;
2752 etlv->num_txqs_full_set = true;
2753 etlv->num_rxqs_full_set = true;
2757 * qede_io_error_detected - called when PCI error is detected
2758 * @pdev: Pointer to PCI device
2759 * @state: The current pci connection state
2761 * This function is called after a PCI bus error affecting
2762 * this device has been detected.
2764 static pci_ers_result_t
2765 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2767 struct net_device *dev = pci_get_drvdata(pdev);
2768 struct qede_dev *edev = netdev_priv(dev);
2771 return PCI_ERS_RESULT_NONE;
2773 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2776 if (edev->state == QEDE_STATE_RECOVERY) {
2777 DP_NOTICE(edev, "Device already in the recovery state\n");
2778 __qede_unlock(edev);
2779 return PCI_ERS_RESULT_NONE;
2782 /* PF handles the recovery of its VFs */
2784 DP_VERBOSE(edev, QED_MSG_IOV,
2785 "VF recovery is handled by its PF\n");
2786 __qede_unlock(edev);
2787 return PCI_ERS_RESULT_RECOVERED;
2791 netif_tx_disable(edev->ndev);
2792 netif_carrier_off(edev->ndev);
2794 set_bit(QEDE_SP_AER, &edev->sp_flags);
2795 schedule_delayed_work(&edev->sp_task, 0);
2797 __qede_unlock(edev);
2799 return PCI_ERS_RESULT_CAN_RECOVER;