1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <asm/byteorder.h>
20 #include <asm/param.h>
22 #include <linux/netdev_features.h>
23 #include <linux/udp.h>
24 #include <linux/tcp.h>
25 #include <net/udp_tunnel.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/pkt_sched.h>
32 #include <linux/ethtool.h>
34 #include <linux/random.h>
35 #include <net/ip6_checksum.h>
36 #include <linux/bitops.h>
37 #include <linux/vmalloc.h>
38 #include <linux/aer.h>
42 static char version[] =
43 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
45 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION);
50 module_param(debug, uint, 0);
51 MODULE_PARM_DESC(debug, " Default debug msglevel");
53 static const struct qed_eth_ops *qed_ops;
55 #define CHIP_NUM_57980S_40 0x1634
56 #define CHIP_NUM_57980S_10 0x1666
57 #define CHIP_NUM_57980S_MF 0x1636
58 #define CHIP_NUM_57980S_100 0x1644
59 #define CHIP_NUM_57980S_50 0x1654
60 #define CHIP_NUM_57980S_25 0x1656
61 #define CHIP_NUM_57980S_IOV 0x1664
62 #define CHIP_NUM_AH 0x8070
63 #define CHIP_NUM_AH_IOV 0x8090
65 #ifndef PCI_DEVICE_ID_NX2_57980E
66 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
67 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
68 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
69 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
70 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
71 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
72 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
73 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
74 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
78 enum qede_pci_private {
83 static const struct pci_device_id qede_pci_tbl[] = {
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
85 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
86 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
87 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
89 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
90 #ifdef CONFIG_QED_SRIOV
91 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
93 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
94 #ifdef CONFIG_QED_SRIOV
95 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
100 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
102 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
103 static pci_ers_result_t
104 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
106 #define TX_TIMEOUT (5 * HZ)
108 /* Utilize last protocol index for XDP */
111 static void qede_remove(struct pci_dev *pdev);
112 static void qede_shutdown(struct pci_dev *pdev);
113 static void qede_link_update(void *dev, struct qed_link_output *link);
114 static void qede_schedule_recovery_handler(void *dev);
115 static void qede_recovery_handler(struct qede_dev *edev);
116 static void qede_schedule_hw_err_handler(void *dev,
117 enum qed_hw_err_type err_type);
118 static void qede_get_eth_tlv_data(void *edev, void *data);
119 static void qede_get_generic_tlv_data(void *edev,
120 struct qed_generic_tlvs *data);
121 static void qede_generic_hw_err_handler(struct qede_dev *edev);
122 #ifdef CONFIG_QED_SRIOV
123 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
126 struct qede_dev *edev = netdev_priv(ndev);
129 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
133 if (vlan_proto != htons(ETH_P_8021Q))
134 return -EPROTONOSUPPORT;
136 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
139 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
142 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
144 struct qede_dev *edev = netdev_priv(ndev);
146 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
148 if (!is_valid_ether_addr(mac)) {
149 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
153 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
156 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
158 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
159 struct qed_dev_info *qed_info = &edev->dev_info.common;
160 struct qed_update_vport_params *vport_params;
163 vport_params = vzalloc(sizeof(*vport_params));
166 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
168 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
170 /* Enable/Disable Tx switching for PF */
171 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
172 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
173 vport_params->vport_id = 0;
174 vport_params->update_tx_switching_flg = 1;
175 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
176 edev->ops->vport_update(edev->cdev, vport_params);
184 static const struct pci_error_handlers qede_err_handler = {
185 .error_detected = qede_io_error_detected,
188 static struct pci_driver qede_pci_driver = {
190 .id_table = qede_pci_tbl,
192 .remove = qede_remove,
193 .shutdown = qede_shutdown,
194 #ifdef CONFIG_QED_SRIOV
195 .sriov_configure = qede_sriov_configure,
197 .err_handler = &qede_err_handler,
200 static struct qed_eth_cb_ops qede_ll_ops = {
202 #ifdef CONFIG_RFS_ACCEL
203 .arfs_filter_op = qede_arfs_filter_op,
205 .link_update = qede_link_update,
206 .schedule_recovery_handler = qede_schedule_recovery_handler,
207 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
208 .get_generic_tlv_data = qede_get_generic_tlv_data,
209 .get_protocol_tlv_data = qede_get_eth_tlv_data,
211 .force_mac = qede_force_mac,
212 .ports_update = qede_udp_ports_update,
215 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
218 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
219 struct ethtool_drvinfo drvinfo;
220 struct qede_dev *edev;
222 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
225 /* Check whether this is a qede device */
226 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
229 memset(&drvinfo, 0, sizeof(drvinfo));
230 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
231 if (strcmp(drvinfo.driver, "qede"))
233 edev = netdev_priv(ndev);
236 case NETDEV_CHANGENAME:
237 /* Notify qed of the name change */
238 if (!edev->ops || !edev->ops->common)
240 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
242 case NETDEV_CHANGEADDR:
243 edev = netdev_priv(ndev);
244 qede_rdma_event_changeaddr(edev);
252 static struct notifier_block qede_netdev_notifier = {
253 .notifier_call = qede_netdev_event,
257 int __init qede_init(void)
261 pr_info("qede_init: %s\n", version);
263 qede_forced_speed_maps_init();
265 qed_ops = qed_get_eth_ops();
267 pr_notice("Failed to get qed ethtool operations\n");
271 /* Must register notifier before pci ops, since we might miss
272 * interface rename after pci probe and netdev registration.
274 ret = register_netdevice_notifier(&qede_netdev_notifier);
276 pr_notice("Failed to register netdevice_notifier\n");
281 ret = pci_register_driver(&qede_pci_driver);
283 pr_notice("Failed to register driver\n");
284 unregister_netdevice_notifier(&qede_netdev_notifier);
292 static void __exit qede_cleanup(void)
294 if (debug & QED_LOG_INFO_MASK)
295 pr_info("qede_cleanup called\n");
297 unregister_netdevice_notifier(&qede_netdev_notifier);
298 pci_unregister_driver(&qede_pci_driver);
302 module_init(qede_init);
303 module_exit(qede_cleanup);
305 static int qede_open(struct net_device *ndev);
306 static int qede_close(struct net_device *ndev);
308 void qede_fill_by_demand_stats(struct qede_dev *edev)
310 struct qede_stats_common *p_common = &edev->stats.common;
311 struct qed_eth_stats stats;
313 edev->ops->get_vport_stats(edev->cdev, &stats);
315 p_common->no_buff_discards = stats.common.no_buff_discards;
316 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
317 p_common->ttl0_discard = stats.common.ttl0_discard;
318 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
319 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
320 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
321 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
322 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
323 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
324 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
325 p_common->mac_filter_discards = stats.common.mac_filter_discards;
326 p_common->gft_filter_drop = stats.common.gft_filter_drop;
328 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
329 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
330 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
331 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
332 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
333 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
334 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
335 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
336 p_common->coalesced_events = stats.common.tpa_coalesced_events;
337 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
338 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
339 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
341 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
342 p_common->rx_65_to_127_byte_packets =
343 stats.common.rx_65_to_127_byte_packets;
344 p_common->rx_128_to_255_byte_packets =
345 stats.common.rx_128_to_255_byte_packets;
346 p_common->rx_256_to_511_byte_packets =
347 stats.common.rx_256_to_511_byte_packets;
348 p_common->rx_512_to_1023_byte_packets =
349 stats.common.rx_512_to_1023_byte_packets;
350 p_common->rx_1024_to_1518_byte_packets =
351 stats.common.rx_1024_to_1518_byte_packets;
352 p_common->rx_crc_errors = stats.common.rx_crc_errors;
353 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
354 p_common->rx_pause_frames = stats.common.rx_pause_frames;
355 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
356 p_common->rx_align_errors = stats.common.rx_align_errors;
357 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
358 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
359 p_common->rx_jabbers = stats.common.rx_jabbers;
360 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
361 p_common->rx_fragments = stats.common.rx_fragments;
362 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
363 p_common->tx_65_to_127_byte_packets =
364 stats.common.tx_65_to_127_byte_packets;
365 p_common->tx_128_to_255_byte_packets =
366 stats.common.tx_128_to_255_byte_packets;
367 p_common->tx_256_to_511_byte_packets =
368 stats.common.tx_256_to_511_byte_packets;
369 p_common->tx_512_to_1023_byte_packets =
370 stats.common.tx_512_to_1023_byte_packets;
371 p_common->tx_1024_to_1518_byte_packets =
372 stats.common.tx_1024_to_1518_byte_packets;
373 p_common->tx_pause_frames = stats.common.tx_pause_frames;
374 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
375 p_common->brb_truncates = stats.common.brb_truncates;
376 p_common->brb_discards = stats.common.brb_discards;
377 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
378 p_common->link_change_count = stats.common.link_change_count;
379 p_common->ptp_skip_txts = edev->ptp_skip_txts;
381 if (QEDE_IS_BB(edev)) {
382 struct qede_stats_bb *p_bb = &edev->stats.bb;
384 p_bb->rx_1519_to_1522_byte_packets =
385 stats.bb.rx_1519_to_1522_byte_packets;
386 p_bb->rx_1519_to_2047_byte_packets =
387 stats.bb.rx_1519_to_2047_byte_packets;
388 p_bb->rx_2048_to_4095_byte_packets =
389 stats.bb.rx_2048_to_4095_byte_packets;
390 p_bb->rx_4096_to_9216_byte_packets =
391 stats.bb.rx_4096_to_9216_byte_packets;
392 p_bb->rx_9217_to_16383_byte_packets =
393 stats.bb.rx_9217_to_16383_byte_packets;
394 p_bb->tx_1519_to_2047_byte_packets =
395 stats.bb.tx_1519_to_2047_byte_packets;
396 p_bb->tx_2048_to_4095_byte_packets =
397 stats.bb.tx_2048_to_4095_byte_packets;
398 p_bb->tx_4096_to_9216_byte_packets =
399 stats.bb.tx_4096_to_9216_byte_packets;
400 p_bb->tx_9217_to_16383_byte_packets =
401 stats.bb.tx_9217_to_16383_byte_packets;
402 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
403 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
405 struct qede_stats_ah *p_ah = &edev->stats.ah;
407 p_ah->rx_1519_to_max_byte_packets =
408 stats.ah.rx_1519_to_max_byte_packets;
409 p_ah->tx_1519_to_max_byte_packets =
410 stats.ah.tx_1519_to_max_byte_packets;
414 static void qede_get_stats64(struct net_device *dev,
415 struct rtnl_link_stats64 *stats)
417 struct qede_dev *edev = netdev_priv(dev);
418 struct qede_stats_common *p_common;
420 qede_fill_by_demand_stats(edev);
421 p_common = &edev->stats.common;
423 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
424 p_common->rx_bcast_pkts;
425 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
426 p_common->tx_bcast_pkts;
428 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
429 p_common->rx_bcast_bytes;
430 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
431 p_common->tx_bcast_bytes;
433 stats->tx_errors = p_common->tx_err_drop_pkts;
434 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
436 stats->rx_fifo_errors = p_common->no_buff_discards;
438 if (QEDE_IS_BB(edev))
439 stats->collisions = edev->stats.bb.tx_total_collisions;
440 stats->rx_crc_errors = p_common->rx_crc_errors;
441 stats->rx_frame_errors = p_common->rx_align_errors;
444 #ifdef CONFIG_QED_SRIOV
445 static int qede_get_vf_config(struct net_device *dev, int vfidx,
446 struct ifla_vf_info *ivi)
448 struct qede_dev *edev = netdev_priv(dev);
453 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
456 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
457 int min_tx_rate, int max_tx_rate)
459 struct qede_dev *edev = netdev_priv(dev);
461 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
465 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
467 struct qede_dev *edev = netdev_priv(dev);
472 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
475 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
478 struct qede_dev *edev = netdev_priv(dev);
483 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
486 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
488 struct qede_dev *edev = netdev_priv(dev);
493 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
497 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
499 struct qede_dev *edev = netdev_priv(dev);
501 if (!netif_running(dev))
506 return qede_ptp_hw_ts(edev, ifr);
508 DP_VERBOSE(edev, QED_MSG_DEBUG,
509 "default IOCTL cmd 0x%x\n", cmd);
516 static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
519 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
520 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
521 qed_chain_get_cons_idx(&txq->tx_pbl),
522 qed_chain_get_prod_idx(&txq->tx_pbl),
526 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
528 struct qede_dev *edev = netdev_priv(dev);
529 struct qede_tx_queue *txq;
532 netif_carrier_off(dev);
533 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
535 if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
538 for_each_cos_in_txq(edev, cos) {
539 txq = &edev->fp_array[txqueue].txq[cos];
541 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
542 qed_chain_get_prod_idx(&txq->tx_pbl))
543 qede_tx_log_print(edev, txq);
549 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
550 edev->state == QEDE_STATE_RECOVERY) {
552 "Avoid handling a Tx timeout while another HW error is being handled\n");
556 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
557 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
558 schedule_delayed_work(&edev->sp_task, 0);
561 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
563 struct qede_dev *edev = netdev_priv(ndev);
564 int cos, count, offset;
566 if (num_tc > edev->dev_info.num_tc)
569 netdev_reset_tc(ndev);
570 netdev_set_num_tc(ndev, num_tc);
572 for_each_cos_in_txq(edev, cos) {
573 count = QEDE_TSS_COUNT(edev);
574 offset = cos * QEDE_TSS_COUNT(edev);
575 netdev_set_tc_queue(ndev, cos, count, offset);
582 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
585 switch (f->command) {
586 case FLOW_CLS_REPLACE:
587 return qede_add_tc_flower_fltr(edev, proto, f);
588 case FLOW_CLS_DESTROY:
589 return qede_delete_flow_filter(edev, f->cookie);
595 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
598 struct flow_cls_offload *f;
599 struct qede_dev *edev = cb_priv;
601 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
605 case TC_SETUP_CLSFLOWER:
607 return qede_set_flower(edev, f, f->common.protocol);
613 static LIST_HEAD(qede_block_cb_list);
616 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
619 struct qede_dev *edev = netdev_priv(dev);
620 struct tc_mqprio_qopt *mqprio;
624 return flow_block_cb_setup_simple(type_data,
626 qede_setup_tc_block_cb,
628 case TC_SETUP_QDISC_MQPRIO:
631 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
632 return qede_setup_tc(dev, mqprio->num_tc);
638 static const struct net_device_ops qede_netdev_ops = {
639 .ndo_open = qede_open,
640 .ndo_stop = qede_close,
641 .ndo_start_xmit = qede_start_xmit,
642 .ndo_select_queue = qede_select_queue,
643 .ndo_set_rx_mode = qede_set_rx_mode,
644 .ndo_set_mac_address = qede_set_mac_addr,
645 .ndo_validate_addr = eth_validate_addr,
646 .ndo_change_mtu = qede_change_mtu,
647 .ndo_do_ioctl = qede_ioctl,
648 .ndo_tx_timeout = qede_tx_timeout,
649 #ifdef CONFIG_QED_SRIOV
650 .ndo_set_vf_mac = qede_set_vf_mac,
651 .ndo_set_vf_vlan = qede_set_vf_vlan,
652 .ndo_set_vf_trust = qede_set_vf_trust,
654 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
655 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
656 .ndo_fix_features = qede_fix_features,
657 .ndo_set_features = qede_set_features,
658 .ndo_get_stats64 = qede_get_stats64,
659 #ifdef CONFIG_QED_SRIOV
660 .ndo_set_vf_link_state = qede_set_vf_link_state,
661 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
662 .ndo_get_vf_config = qede_get_vf_config,
663 .ndo_set_vf_rate = qede_set_vf_rate,
665 .ndo_features_check = qede_features_check,
667 #ifdef CONFIG_RFS_ACCEL
668 .ndo_rx_flow_steer = qede_rx_flow_steer,
670 .ndo_xdp_xmit = qede_xdp_transmit,
671 .ndo_setup_tc = qede_setup_tc_offload,
674 static const struct net_device_ops qede_netdev_vf_ops = {
675 .ndo_open = qede_open,
676 .ndo_stop = qede_close,
677 .ndo_start_xmit = qede_start_xmit,
678 .ndo_select_queue = qede_select_queue,
679 .ndo_set_rx_mode = qede_set_rx_mode,
680 .ndo_set_mac_address = qede_set_mac_addr,
681 .ndo_validate_addr = eth_validate_addr,
682 .ndo_change_mtu = qede_change_mtu,
683 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
684 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
685 .ndo_fix_features = qede_fix_features,
686 .ndo_set_features = qede_set_features,
687 .ndo_get_stats64 = qede_get_stats64,
688 .ndo_features_check = qede_features_check,
691 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
692 .ndo_open = qede_open,
693 .ndo_stop = qede_close,
694 .ndo_start_xmit = qede_start_xmit,
695 .ndo_select_queue = qede_select_queue,
696 .ndo_set_rx_mode = qede_set_rx_mode,
697 .ndo_set_mac_address = qede_set_mac_addr,
698 .ndo_validate_addr = eth_validate_addr,
699 .ndo_change_mtu = qede_change_mtu,
700 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
701 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
702 .ndo_fix_features = qede_fix_features,
703 .ndo_set_features = qede_set_features,
704 .ndo_get_stats64 = qede_get_stats64,
705 .ndo_features_check = qede_features_check,
707 .ndo_xdp_xmit = qede_xdp_transmit,
710 /* -------------------------------------------------------------------------
711 * START OF PROBE / REMOVE
712 * -------------------------------------------------------------------------
715 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
716 struct pci_dev *pdev,
717 struct qed_dev_eth_info *info,
718 u32 dp_module, u8 dp_level)
720 struct net_device *ndev;
721 struct qede_dev *edev;
723 ndev = alloc_etherdev_mqs(sizeof(*edev),
724 info->num_queues * info->num_tc,
727 pr_err("etherdev allocation failed\n");
731 edev = netdev_priv(ndev);
735 edev->dp_module = dp_module;
736 edev->dp_level = dp_level;
739 if (is_kdump_kernel()) {
740 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
741 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
743 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
744 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
747 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
748 info->num_queues, info->num_queues);
750 SET_NETDEV_DEV(ndev, &pdev->dev);
752 memset(&edev->stats, 0, sizeof(edev->stats));
753 memcpy(&edev->dev_info, info, sizeof(*info));
755 /* As ethtool doesn't have the ability to show WoL behavior as
756 * 'default', if device supports it declare it's enabled.
758 if (edev->dev_info.common.wol_support)
759 edev->wol_enabled = true;
761 INIT_LIST_HEAD(&edev->vlan_list);
766 static void qede_init_ndev(struct qede_dev *edev)
768 struct net_device *ndev = edev->ndev;
769 struct pci_dev *pdev = edev->pdev;
770 bool udp_tunnel_enable = false;
771 netdev_features_t hw_features;
773 pci_set_drvdata(pdev, ndev);
775 ndev->mem_start = edev->dev_info.common.pci_mem_start;
776 ndev->base_addr = ndev->mem_start;
777 ndev->mem_end = edev->dev_info.common.pci_mem_end;
778 ndev->irq = edev->dev_info.common.pci_irq;
780 ndev->watchdog_timeo = TX_TIMEOUT;
783 if (edev->dev_info.xdp_supported)
784 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
786 ndev->netdev_ops = &qede_netdev_vf_ops;
788 ndev->netdev_ops = &qede_netdev_ops;
791 qede_set_ethtool_ops(ndev);
793 ndev->priv_flags |= IFF_UNICAST_FLT;
795 /* user-changeble features */
796 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
797 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
798 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
800 if (edev->dev_info.common.b_arfs_capable)
801 hw_features |= NETIF_F_NTUPLE;
803 if (edev->dev_info.common.vxlan_enable ||
804 edev->dev_info.common.geneve_enable)
805 udp_tunnel_enable = true;
807 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
808 hw_features |= NETIF_F_TSO_ECN;
809 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
810 NETIF_F_SG | NETIF_F_TSO |
811 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
815 if (udp_tunnel_enable) {
816 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
817 NETIF_F_GSO_UDP_TUNNEL_CSUM);
818 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
819 NETIF_F_GSO_UDP_TUNNEL_CSUM);
821 qede_set_udp_tunnels(edev);
824 if (edev->dev_info.common.gre_enable) {
825 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
826 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
827 NETIF_F_GSO_GRE_CSUM);
830 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
832 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
833 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
834 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
836 ndev->hw_features = hw_features;
838 /* MTU range: 46 - 9600 */
839 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
840 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
842 /* Set network device HW mac */
843 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
845 ndev->mtu = edev->dev_info.common.mtu;
848 /* This function converts from 32b param to two params of level and module
849 * Input 32b decoding:
850 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
851 * 'happy' flow, e.g. memory allocation failed.
852 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
853 * and provide important parameters.
854 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
855 * module. VERBOSE prints are for tracking the specific flow in low level.
857 * Notice that the level should be that of the lowest required logs.
859 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
861 *p_dp_level = QED_LEVEL_NOTICE;
864 if (debug & QED_LOG_VERBOSE_MASK) {
865 *p_dp_level = QED_LEVEL_VERBOSE;
866 *p_dp_module = (debug & 0x3FFFFFFF);
867 } else if (debug & QED_LOG_INFO_MASK) {
868 *p_dp_level = QED_LEVEL_INFO;
869 } else if (debug & QED_LOG_NOTICE_MASK) {
870 *p_dp_level = QED_LEVEL_NOTICE;
874 static void qede_free_fp_array(struct qede_dev *edev)
876 if (edev->fp_array) {
877 struct qede_fastpath *fp;
881 fp = &edev->fp_array[i];
884 /* Handle mem alloc failure case where qede_init_fp
885 * didn't register xdp_rxq_info yet.
886 * Implicit only (fp->type & QEDE_FASTPATH_RX)
888 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
889 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
894 kfree(edev->fp_array);
897 edev->num_queues = 0;
902 static int qede_alloc_fp_array(struct qede_dev *edev)
904 u8 fp_combined, fp_rx = edev->fp_num_rx;
905 struct qede_fastpath *fp;
909 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
910 sizeof(*edev->fp_array), GFP_KERNEL);
911 if (!edev->fp_array) {
912 DP_NOTICE(edev, "fp array allocation failed\n");
916 mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
917 sizeof(*edev->coal_entry), GFP_KERNEL);
919 DP_ERR(edev, "coalesce entry allocation failed\n");
920 kfree(edev->coal_entry);
923 edev->coal_entry = mem;
925 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
927 /* Allocate the FP elements for Rx queues followed by combined and then
928 * the Tx. This ordering should be maintained so that the respective
929 * queues (Rx or Tx) will be together in the fastpath array and the
930 * associated ids will be sequential.
933 fp = &edev->fp_array[i];
935 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
937 DP_NOTICE(edev, "sb info struct allocation failed\n");
942 fp->type = QEDE_FASTPATH_RX;
944 } else if (fp_combined) {
945 fp->type = QEDE_FASTPATH_COMBINED;
948 fp->type = QEDE_FASTPATH_TX;
951 if (fp->type & QEDE_FASTPATH_TX) {
952 fp->txq = kcalloc(edev->dev_info.num_tc,
953 sizeof(*fp->txq), GFP_KERNEL);
958 if (fp->type & QEDE_FASTPATH_RX) {
959 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
963 if (edev->xdp_prog) {
964 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
968 fp->type |= QEDE_FASTPATH_XDP;
975 qede_free_fp_array(edev);
979 /* The qede lock is used to protect driver state change and driver flows that
982 void __qede_lock(struct qede_dev *edev)
984 mutex_lock(&edev->qede_lock);
987 void __qede_unlock(struct qede_dev *edev)
989 mutex_unlock(&edev->qede_lock);
992 /* This version of the lock should be used when acquiring the RTNL lock is also
993 * needed in addition to the internal qede lock.
995 static void qede_lock(struct qede_dev *edev)
1001 static void qede_unlock(struct qede_dev *edev)
1003 __qede_unlock(edev);
1007 static void qede_sp_task(struct work_struct *work)
1009 struct qede_dev *edev = container_of(work, struct qede_dev,
1012 /* Disable execution of this deferred work once
1013 * qede removal is in progress, this stop any future
1014 * scheduling of sp_task.
1016 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1019 /* The locking scheme depends on the specific flag:
1020 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1021 * ensure that ongoing flows are ended and new ones are not started.
1022 * In other cases - only the internal qede lock should be acquired.
1025 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1026 #ifdef CONFIG_QED_SRIOV
1027 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1028 * The recovery of the active VFs is currently not supported.
1030 if (pci_num_vf(edev->pdev))
1031 qede_sriov_configure(edev->pdev, 0);
1034 qede_recovery_handler(edev);
1040 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1041 if (edev->state == QEDE_STATE_OPEN)
1042 qede_config_rx_mode(edev->ndev);
1044 #ifdef CONFIG_RFS_ACCEL
1045 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1046 if (edev->state == QEDE_STATE_OPEN)
1047 qede_process_arfs_filters(edev, false);
1050 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1051 qede_generic_hw_err_handler(edev);
1052 __qede_unlock(edev);
1054 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1055 #ifdef CONFIG_QED_SRIOV
1056 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1057 * The recovery of the active VFs is currently not supported.
1059 if (pci_num_vf(edev->pdev))
1060 qede_sriov_configure(edev->pdev, 0);
1062 edev->ops->common->recovery_process(edev->cdev);
1066 static void qede_update_pf_params(struct qed_dev *cdev)
1068 struct qed_pf_params pf_params;
1071 /* 64 rx + 64 tx + 64 XDP */
1072 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1074 /* 1 rx + 1 xdp + max tx cos */
1075 num_cons = QED_MIN_L2_CONS;
1077 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1079 /* Same for VFs - make sure they'll have sufficient connections
1080 * to support XDP Tx queues.
1082 pf_params.eth_pf_params.num_vf_cons = 48;
1084 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1085 qed_ops->common->update_pf_params(cdev, &pf_params);
1088 #define QEDE_FW_VER_STR_SIZE 80
1090 static void qede_log_probe(struct qede_dev *edev)
1092 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1093 u8 buf[QEDE_FW_VER_STR_SIZE];
1096 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1097 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1098 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1100 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1101 QED_MFW_VERSION_3_OFFSET,
1102 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1103 QED_MFW_VERSION_2_OFFSET,
1104 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1105 QED_MFW_VERSION_1_OFFSET,
1106 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1107 QED_MFW_VERSION_0_OFFSET);
1109 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1110 if (p_dev_info->mbi_version && left_size)
1111 snprintf(buf + strlen(buf), left_size,
1113 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1114 QED_MBI_VERSION_2_OFFSET,
1115 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1116 QED_MBI_VERSION_1_OFFSET,
1117 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1118 QED_MBI_VERSION_0_OFFSET);
1120 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1121 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1122 buf, edev->ndev->name);
1125 enum qede_probe_mode {
1127 QEDE_PROBE_RECOVERY,
1130 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1131 bool is_vf, enum qede_probe_mode mode)
1133 struct qed_probe_params probe_params;
1134 struct qed_slowpath_params sp_params;
1135 struct qed_dev_eth_info dev_info;
1136 struct qede_dev *edev;
1137 struct qed_dev *cdev;
1140 if (unlikely(dp_level & QED_LEVEL_INFO))
1141 pr_notice("Starting qede probe\n");
1143 memset(&probe_params, 0, sizeof(probe_params));
1144 probe_params.protocol = QED_PROTOCOL_ETH;
1145 probe_params.dp_module = dp_module;
1146 probe_params.dp_level = dp_level;
1147 probe_params.is_vf = is_vf;
1148 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1149 cdev = qed_ops->common->probe(pdev, &probe_params);
1155 qede_update_pf_params(cdev);
1157 /* Start the Slowpath-process */
1158 memset(&sp_params, 0, sizeof(sp_params));
1159 sp_params.int_mode = QED_INT_MODE_MSIX;
1160 sp_params.drv_major = QEDE_MAJOR_VERSION;
1161 sp_params.drv_minor = QEDE_MINOR_VERSION;
1162 sp_params.drv_rev = QEDE_REVISION_VERSION;
1163 sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
1164 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1165 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1167 pr_notice("Cannot start slowpath\n");
1171 /* Learn information crucial for qede to progress */
1172 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1176 if (mode != QEDE_PROBE_RECOVERY) {
1177 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1184 edev->devlink = qed_ops->common->devlink_register(cdev);
1185 if (IS_ERR(edev->devlink)) {
1186 DP_NOTICE(edev, "Cannot register devlink\n");
1187 edev->devlink = NULL;
1188 /* Go on, we can live without devlink */
1191 struct net_device *ndev = pci_get_drvdata(pdev);
1193 edev = netdev_priv(ndev);
1195 if (edev->devlink) {
1196 struct qed_devlink *qdl = devlink_priv(edev->devlink);
1201 memset(&edev->stats, 0, sizeof(edev->stats));
1202 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1206 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1208 qede_init_ndev(edev);
1210 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1214 if (mode != QEDE_PROBE_RECOVERY) {
1215 /* Prepare the lock prior to the registration of the netdev,
1216 * as once it's registered we might reach flows requiring it
1217 * [it's even possible to reach a flow needing it directly
1218 * from there, although it's unlikely].
1220 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1221 mutex_init(&edev->qede_lock);
1223 rc = register_netdev(edev->ndev);
1225 DP_NOTICE(edev, "Cannot register net-device\n");
1230 edev->ops->common->set_name(cdev, edev->ndev->name);
1232 /* PTP not supported on VFs */
1234 qede_ptp_enable(edev);
1236 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1240 qede_set_dcbnl_ops(edev->ndev);
1243 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1245 qede_log_probe(edev);
1249 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1251 if (mode != QEDE_PROBE_RECOVERY)
1252 free_netdev(edev->ndev);
1256 qed_ops->common->slowpath_stop(cdev);
1258 qed_ops->common->remove(cdev);
1263 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1269 switch ((enum qede_pci_private)id->driver_data) {
1270 case QEDE_PRIVATE_VF:
1271 if (debug & QED_LOG_VERBOSE_MASK)
1272 dev_err(&pdev->dev, "Probing a VF\n");
1276 if (debug & QED_LOG_VERBOSE_MASK)
1277 dev_err(&pdev->dev, "Probing a PF\n");
1280 qede_config_debug(debug, &dp_module, &dp_level);
1282 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1286 enum qede_remove_mode {
1288 QEDE_REMOVE_RECOVERY,
1291 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1293 struct net_device *ndev = pci_get_drvdata(pdev);
1294 struct qede_dev *edev;
1295 struct qed_dev *cdev;
1298 dev_info(&pdev->dev, "Device has already been removed\n");
1302 edev = netdev_priv(ndev);
1305 DP_INFO(edev, "Starting qede_remove\n");
1307 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1309 if (mode != QEDE_REMOVE_RECOVERY) {
1310 set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1311 unregister_netdev(ndev);
1313 cancel_delayed_work_sync(&edev->sp_task);
1315 edev->ops->common->set_power_state(cdev, PCI_D0);
1317 pci_set_drvdata(pdev, NULL);
1320 qede_ptp_disable(edev);
1322 /* Use global ops since we've freed edev */
1323 qed_ops->common->slowpath_stop(cdev);
1324 if (system_state == SYSTEM_POWER_OFF)
1327 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1328 qed_ops->common->devlink_unregister(edev->devlink);
1329 edev->devlink = NULL;
1331 qed_ops->common->remove(cdev);
1334 /* Since this can happen out-of-sync with other flows,
1335 * don't release the netdevice until after slowpath stop
1336 * has been called to guarantee various other contexts
1337 * [e.g., QED register callbacks] won't break anything when
1338 * accessing the netdevice.
1340 if (mode != QEDE_REMOVE_RECOVERY) {
1341 kfree(edev->coal_entry);
1345 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1348 static void qede_remove(struct pci_dev *pdev)
1350 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1353 static void qede_shutdown(struct pci_dev *pdev)
1355 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1358 /* -------------------------------------------------------------------------
1359 * START OF LOAD / UNLOAD
1360 * -------------------------------------------------------------------------
1363 static int qede_set_num_queues(struct qede_dev *edev)
1368 /* Setup queues according to possible resources*/
1369 if (edev->req_queues)
1370 rss_num = edev->req_queues;
1372 rss_num = netif_get_num_default_rss_queues() *
1373 edev->dev_info.common.num_hwfns;
1375 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1377 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1379 /* Managed to request interrupts for our queues */
1380 edev->num_queues = rc;
1381 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1382 QEDE_QUEUE_CNT(edev), rss_num);
1386 edev->fp_num_tx = edev->req_num_tx;
1387 edev->fp_num_rx = edev->req_num_rx;
1392 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1395 if (sb_info->sb_virt) {
1396 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1397 QED_SB_TYPE_L2_QUEUE);
1398 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1399 (void *)sb_info->sb_virt, sb_info->sb_phys);
1400 memset(sb_info, 0, sizeof(*sb_info));
1404 /* This function allocates fast-path status block memory */
1405 static int qede_alloc_mem_sb(struct qede_dev *edev,
1406 struct qed_sb_info *sb_info, u16 sb_id)
1408 struct status_block_e4 *sb_virt;
1412 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1413 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1415 DP_ERR(edev, "Status block allocation failed\n");
1419 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1420 sb_virt, sb_phys, sb_id,
1421 QED_SB_TYPE_L2_QUEUE);
1423 DP_ERR(edev, "Status block initialization failed\n");
1424 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1432 static void qede_free_rx_buffers(struct qede_dev *edev,
1433 struct qede_rx_queue *rxq)
1437 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1438 struct sw_rx_data *rx_buf;
1441 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1442 data = rx_buf->data;
1444 dma_unmap_page(&edev->pdev->dev,
1445 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1447 rx_buf->data = NULL;
1452 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1454 /* Free rx buffers */
1455 qede_free_rx_buffers(edev, rxq);
1457 /* Free the parallel SW ring */
1458 kfree(rxq->sw_rx_ring);
1460 /* Free the real RQ ring used by FW */
1461 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1462 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1465 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1469 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1470 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1472 tpa_info->state = QEDE_AGG_STATE_NONE;
1476 /* This function allocates all memory needed per Rx queue */
1477 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1479 struct qed_chain_init_params params = {
1480 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1481 .num_elems = RX_RING_SIZE,
1483 struct qed_dev *cdev = edev->cdev;
1486 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1488 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1490 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1491 size = rxq->rx_headroom +
1492 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1494 /* Make sure that the headroom and payload fit in a single page */
1495 if (rxq->rx_buf_size + size > PAGE_SIZE)
1496 rxq->rx_buf_size = PAGE_SIZE - size;
1498 /* Segment size to split a page in multiple equal parts,
1499 * unless XDP is used in which case we'd use the entire page.
1501 if (!edev->xdp_prog) {
1502 size = size + rxq->rx_buf_size;
1503 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1505 rxq->rx_buf_seg_size = PAGE_SIZE;
1506 edev->ndev->features &= ~NETIF_F_GRO_HW;
1509 /* Allocate the parallel driver ring for Rx buffers */
1510 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1511 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1512 if (!rxq->sw_rx_ring) {
1513 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1518 /* Allocate FW Rx ring */
1519 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1520 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1521 params.elem_size = sizeof(struct eth_rx_bd);
1523 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
1527 /* Allocate FW completion ring */
1528 params.mode = QED_CHAIN_MODE_PBL;
1529 params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1530 params.elem_size = sizeof(union eth_rx_cqe);
1532 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
1536 /* Allocate buffers for the Rx ring */
1537 rxq->filled_buffers = 0;
1538 for (i = 0; i < rxq->num_rx_buffers; i++) {
1539 rc = qede_alloc_rx_buffer(rxq, false);
1542 "Rx buffers allocation failed at index %d\n", i);
1547 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1548 if (!edev->gro_disable)
1549 qede_set_tpa_param(rxq);
1554 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1556 /* Free the parallel SW ring */
1558 kfree(txq->sw_tx_ring.xdp);
1560 kfree(txq->sw_tx_ring.skbs);
1562 /* Free the real RQ ring used by FW */
1563 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1566 /* This function allocates all memory needed per Tx queue */
1567 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1569 struct qed_chain_init_params params = {
1570 .mode = QED_CHAIN_MODE_PBL,
1571 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1572 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1573 .num_elems = edev->q_num_tx_buffers,
1574 .elem_size = sizeof(union eth_tx_bd_types),
1578 txq->num_tx_buffers = edev->q_num_tx_buffers;
1580 /* Allocate the parallel driver ring for Tx buffers */
1582 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1583 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1584 if (!txq->sw_tx_ring.xdp)
1587 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1588 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1589 if (!txq->sw_tx_ring.skbs)
1593 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
1600 qede_free_mem_txq(edev, txq);
1604 /* This function frees all memory of a single fp */
1605 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1607 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1609 if (fp->type & QEDE_FASTPATH_RX)
1610 qede_free_mem_rxq(edev, fp->rxq);
1612 if (fp->type & QEDE_FASTPATH_XDP)
1613 qede_free_mem_txq(edev, fp->xdp_tx);
1615 if (fp->type & QEDE_FASTPATH_TX) {
1618 for_each_cos_in_txq(edev, cos)
1619 qede_free_mem_txq(edev, &fp->txq[cos]);
1623 /* This function allocates all memory needed for a single fp (i.e. an entity
1624 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1626 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1630 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1634 if (fp->type & QEDE_FASTPATH_RX) {
1635 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1640 if (fp->type & QEDE_FASTPATH_XDP) {
1641 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1646 if (fp->type & QEDE_FASTPATH_TX) {
1649 for_each_cos_in_txq(edev, cos) {
1650 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1660 static void qede_free_mem_load(struct qede_dev *edev)
1665 struct qede_fastpath *fp = &edev->fp_array[i];
1667 qede_free_mem_fp(edev, fp);
1671 /* This function allocates all qede memory at NIC load. */
1672 static int qede_alloc_mem_load(struct qede_dev *edev)
1674 int rc = 0, queue_id;
1676 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1677 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1679 rc = qede_alloc_mem_fp(edev, fp);
1682 "Failed to allocate memory for fastpath - rss id = %d\n",
1684 qede_free_mem_load(edev);
1692 static void qede_empty_tx_queue(struct qede_dev *edev,
1693 struct qede_tx_queue *txq)
1695 unsigned int pkts_compl = 0, bytes_compl = 0;
1696 struct netdev_queue *netdev_txq;
1699 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1701 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1702 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1703 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1704 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1705 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1706 qed_chain_get_prod_idx(&txq->tx_pbl));
1708 rc = qede_free_tx_pkt(edev, txq, &len);
1711 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1713 qed_chain_get_cons_idx(&txq->tx_pbl),
1714 qed_chain_get_prod_idx(&txq->tx_pbl));
1723 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1726 static void qede_empty_tx_queues(struct qede_dev *edev)
1731 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1734 for_each_cos_in_txq(edev, cos) {
1735 struct qede_fastpath *fp;
1737 fp = &edev->fp_array[i];
1738 qede_empty_tx_queue(edev,
1744 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1745 static void qede_init_fp(struct qede_dev *edev)
1747 int queue_id, rxq_index = 0, txq_index = 0;
1748 struct qede_fastpath *fp;
1749 bool init_xdp = false;
1751 for_each_queue(queue_id) {
1752 fp = &edev->fp_array[queue_id];
1757 if (fp->type & QEDE_FASTPATH_XDP) {
1758 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1760 fp->xdp_tx->is_xdp = 1;
1762 spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1766 if (fp->type & QEDE_FASTPATH_RX) {
1767 fp->rxq->rxq_id = rxq_index++;
1769 /* Determine how to map buffers for this queue */
1770 if (fp->type & QEDE_FASTPATH_XDP)
1771 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1773 fp->rxq->data_direction = DMA_FROM_DEVICE;
1774 fp->rxq->dev = &edev->pdev->dev;
1776 /* Driver have no error path from here */
1777 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1778 fp->rxq->rxq_id, 0) < 0);
1780 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1781 MEM_TYPE_PAGE_ORDER0,
1784 "Failed to register XDP memory model\n");
1788 if (fp->type & QEDE_FASTPATH_TX) {
1791 for_each_cos_in_txq(edev, cos) {
1792 struct qede_tx_queue *txq = &fp->txq[cos];
1796 txq->index = txq_index;
1797 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1798 txq->ndev_txq_id = ndev_tx_id;
1800 if (edev->dev_info.is_legacy)
1801 txq->is_legacy = true;
1802 txq->dev = &edev->pdev->dev;
1808 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1809 edev->ndev->name, queue_id);
1813 edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1814 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1818 static int qede_set_real_num_queues(struct qede_dev *edev)
1822 rc = netif_set_real_num_tx_queues(edev->ndev,
1823 QEDE_TSS_COUNT(edev) *
1824 edev->dev_info.num_tc);
1826 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1830 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1832 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1839 static void qede_napi_disable_remove(struct qede_dev *edev)
1844 napi_disable(&edev->fp_array[i].napi);
1846 netif_napi_del(&edev->fp_array[i].napi);
1850 static void qede_napi_add_enable(struct qede_dev *edev)
1854 /* Add NAPI objects */
1856 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1857 qede_poll, NAPI_POLL_WEIGHT);
1858 napi_enable(&edev->fp_array[i].napi);
1862 static void qede_sync_free_irqs(struct qede_dev *edev)
1866 for (i = 0; i < edev->int_info.used_cnt; i++) {
1867 if (edev->int_info.msix_cnt) {
1868 synchronize_irq(edev->int_info.msix[i].vector);
1869 free_irq(edev->int_info.msix[i].vector,
1870 &edev->fp_array[i]);
1872 edev->ops->common->simd_handler_clean(edev->cdev, i);
1876 edev->int_info.used_cnt = 0;
1879 static int qede_req_msix_irqs(struct qede_dev *edev)
1883 /* Sanitize number of interrupts == number of prepared RSS queues */
1884 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1886 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1887 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1891 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1892 #ifdef CONFIG_RFS_ACCEL
1893 struct qede_fastpath *fp = &edev->fp_array[i];
1895 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1896 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1897 edev->int_info.msix[i].vector);
1899 DP_ERR(edev, "Failed to add CPU rmap\n");
1900 qede_free_arfs(edev);
1904 rc = request_irq(edev->int_info.msix[i].vector,
1905 qede_msix_fp_int, 0, edev->fp_array[i].name,
1906 &edev->fp_array[i]);
1908 DP_ERR(edev, "Request fp %d irq failed\n", i);
1909 qede_sync_free_irqs(edev);
1912 DP_VERBOSE(edev, NETIF_MSG_INTR,
1913 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1914 edev->fp_array[i].name, i,
1915 &edev->fp_array[i]);
1916 edev->int_info.used_cnt++;
1922 static void qede_simd_fp_handler(void *cookie)
1924 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1926 napi_schedule_irqoff(&fp->napi);
1929 static int qede_setup_irqs(struct qede_dev *edev)
1933 /* Learn Interrupt configuration */
1934 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1938 if (edev->int_info.msix_cnt) {
1939 rc = qede_req_msix_irqs(edev);
1942 edev->ndev->irq = edev->int_info.msix[0].vector;
1944 const struct qed_common_ops *ops;
1946 /* qed should learn receive the RSS ids and callbacks */
1947 ops = edev->ops->common;
1948 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1949 ops->simd_handler_config(edev->cdev,
1950 &edev->fp_array[i], i,
1951 qede_simd_fp_handler);
1952 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1957 static int qede_drain_txq(struct qede_dev *edev,
1958 struct qede_tx_queue *txq, bool allow_drain)
1962 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1966 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1968 rc = edev->ops->common->drain(edev->cdev);
1971 return qede_drain_txq(edev, txq, false);
1974 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1975 txq->index, txq->sw_tx_prod,
1980 usleep_range(1000, 2000);
1984 /* FW finished processing, wait for HW to transmit all tx packets */
1985 usleep_range(1000, 2000);
1990 static int qede_stop_txq(struct qede_dev *edev,
1991 struct qede_tx_queue *txq, int rss_id)
1993 /* delete doorbell from doorbell recovery mechanism */
1994 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
1997 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2000 static int qede_stop_queues(struct qede_dev *edev)
2002 struct qed_update_vport_params *vport_update_params;
2003 struct qed_dev *cdev = edev->cdev;
2004 struct qede_fastpath *fp;
2007 /* Disable the vport */
2008 vport_update_params = vzalloc(sizeof(*vport_update_params));
2009 if (!vport_update_params)
2012 vport_update_params->vport_id = 0;
2013 vport_update_params->update_vport_active_flg = 1;
2014 vport_update_params->vport_active_flg = 0;
2015 vport_update_params->update_rss_flg = 0;
2017 rc = edev->ops->vport_update(cdev, vport_update_params);
2018 vfree(vport_update_params);
2021 DP_ERR(edev, "Failed to update vport\n");
2025 /* Flush Tx queues. If needed, request drain from MCP */
2027 fp = &edev->fp_array[i];
2029 if (fp->type & QEDE_FASTPATH_TX) {
2032 for_each_cos_in_txq(edev, cos) {
2033 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2039 if (fp->type & QEDE_FASTPATH_XDP) {
2040 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2046 /* Stop all Queues in reverse order */
2047 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2048 fp = &edev->fp_array[i];
2050 /* Stop the Tx Queue(s) */
2051 if (fp->type & QEDE_FASTPATH_TX) {
2054 for_each_cos_in_txq(edev, cos) {
2055 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2061 /* Stop the Rx Queue */
2062 if (fp->type & QEDE_FASTPATH_RX) {
2063 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2065 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2070 /* Stop the XDP forwarding queue */
2071 if (fp->type & QEDE_FASTPATH_XDP) {
2072 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2076 bpf_prog_put(fp->rxq->xdp_prog);
2080 /* Stop the vport */
2081 rc = edev->ops->vport_stop(cdev, 0);
2083 DP_ERR(edev, "Failed to stop VPORT\n");
2088 static int qede_start_txq(struct qede_dev *edev,
2089 struct qede_fastpath *fp,
2090 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2092 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2093 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2094 struct qed_queue_start_common_params params;
2095 struct qed_txq_start_ret_params ret_params;
2098 memset(¶ms, 0, sizeof(params));
2099 memset(&ret_params, 0, sizeof(ret_params));
2101 /* Let the XDP queue share the queue-zone with one of the regular txq.
2102 * We don't really care about its coalescing.
2105 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2107 params.queue_id = txq->index;
2109 params.p_sb = fp->sb_info;
2110 params.sb_idx = sb_idx;
2111 params.tc = txq->cos;
2113 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2114 page_cnt, &ret_params);
2116 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2120 txq->doorbell_addr = ret_params.p_doorbell;
2121 txq->handle = ret_params.p_handle;
2123 /* Determine the FW consumer address associated */
2124 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2126 /* Prepare the doorbell parameters */
2127 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2128 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2129 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2130 DQ_XCM_ETH_TX_BD_PROD_CMD);
2131 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2133 /* register doorbell with doorbell recovery mechanism */
2134 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2135 &txq->tx_db, DB_REC_WIDTH_32B,
2141 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2143 int vlan_removal_en = 1;
2144 struct qed_dev *cdev = edev->cdev;
2145 struct qed_dev_info *qed_info = &edev->dev_info.common;
2146 struct qed_update_vport_params *vport_update_params;
2147 struct qed_queue_start_common_params q_params;
2148 struct qed_start_vport_params start = {0};
2151 if (!edev->num_queues) {
2153 "Cannot update V-VPORT as active as there are no Rx queues\n");
2157 vport_update_params = vzalloc(sizeof(*vport_update_params));
2158 if (!vport_update_params)
2161 start.handle_ptp_pkts = !!(edev->ptp);
2162 start.gro_enable = !edev->gro_disable;
2163 start.mtu = edev->ndev->mtu;
2165 start.drop_ttl0 = true;
2166 start.remove_inner_vlan = vlan_removal_en;
2167 start.clear_stats = clear_stats;
2169 rc = edev->ops->vport_start(cdev, &start);
2172 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2176 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2177 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2178 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2181 struct qede_fastpath *fp = &edev->fp_array[i];
2182 dma_addr_t p_phys_table;
2185 if (fp->type & QEDE_FASTPATH_RX) {
2186 struct qed_rxq_start_ret_params ret_params;
2187 struct qede_rx_queue *rxq = fp->rxq;
2190 memset(&ret_params, 0, sizeof(ret_params));
2191 memset(&q_params, 0, sizeof(q_params));
2192 q_params.queue_id = rxq->rxq_id;
2193 q_params.vport_id = 0;
2194 q_params.p_sb = fp->sb_info;
2195 q_params.sb_idx = RX_PI;
2198 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2199 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2201 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2203 rxq->rx_bd_ring.p_phys_addr,
2205 page_cnt, &ret_params);
2207 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2212 /* Use the return parameters */
2213 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2214 rxq->handle = ret_params.p_handle;
2216 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2217 rxq->hw_cons_ptr = val;
2219 qede_update_rx_prod(edev, rxq);
2222 if (fp->type & QEDE_FASTPATH_XDP) {
2223 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2227 bpf_prog_add(edev->xdp_prog, 1);
2228 fp->rxq->xdp_prog = edev->xdp_prog;
2231 if (fp->type & QEDE_FASTPATH_TX) {
2234 for_each_cos_in_txq(edev, cos) {
2235 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2243 /* Prepare and send the vport enable */
2244 vport_update_params->vport_id = start.vport_id;
2245 vport_update_params->update_vport_active_flg = 1;
2246 vport_update_params->vport_active_flg = 1;
2248 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2249 qed_info->tx_switching) {
2250 vport_update_params->update_tx_switching_flg = 1;
2251 vport_update_params->tx_switching_flg = 1;
2254 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2255 &vport_update_params->update_rss_flg);
2257 rc = edev->ops->vport_update(cdev, vport_update_params);
2259 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2262 vfree(vport_update_params);
2266 enum qede_unload_mode {
2268 QEDE_UNLOAD_RECOVERY,
2271 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2274 struct qed_link_params link_params;
2277 DP_INFO(edev, "Starting qede unload\n");
2282 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2284 if (mode != QEDE_UNLOAD_RECOVERY)
2285 edev->state = QEDE_STATE_CLOSED;
2287 qede_rdma_dev_event_close(edev);
2290 netif_tx_disable(edev->ndev);
2291 netif_carrier_off(edev->ndev);
2293 if (mode != QEDE_UNLOAD_RECOVERY) {
2294 /* Reset the link */
2295 memset(&link_params, 0, sizeof(link_params));
2296 link_params.link_up = false;
2297 edev->ops->common->set_link(edev->cdev, &link_params);
2299 rc = qede_stop_queues(edev);
2301 qede_sync_free_irqs(edev);
2305 DP_INFO(edev, "Stopped Queues\n");
2308 qede_vlan_mark_nonconfigured(edev);
2309 edev->ops->fastpath_stop(edev->cdev);
2311 if (edev->dev_info.common.b_arfs_capable) {
2312 qede_poll_for_freeing_arfs_filters(edev);
2313 qede_free_arfs(edev);
2316 /* Release the interrupts */
2317 qede_sync_free_irqs(edev);
2318 edev->ops->common->set_fp_int(edev->cdev, 0);
2320 qede_napi_disable_remove(edev);
2322 if (mode == QEDE_UNLOAD_RECOVERY)
2323 qede_empty_tx_queues(edev);
2325 qede_free_mem_load(edev);
2326 qede_free_fp_array(edev);
2330 __qede_unlock(edev);
2332 if (mode != QEDE_UNLOAD_RECOVERY)
2333 DP_NOTICE(edev, "Link is down\n");
2335 edev->ptp_skip_txts = 0;
2337 DP_INFO(edev, "Ending qede unload\n");
2340 enum qede_load_mode {
2346 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2349 struct qed_link_params link_params;
2350 struct ethtool_coalesce coal = {};
2354 DP_INFO(edev, "Starting qede load\n");
2359 rc = qede_set_num_queues(edev);
2363 rc = qede_alloc_fp_array(edev);
2369 rc = qede_alloc_mem_load(edev);
2372 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2373 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2375 rc = qede_set_real_num_queues(edev);
2379 if (qede_alloc_arfs(edev)) {
2380 edev->ndev->features &= ~NETIF_F_NTUPLE;
2381 edev->dev_info.common.b_arfs_capable = false;
2384 qede_napi_add_enable(edev);
2385 DP_INFO(edev, "Napi added and enabled\n");
2387 rc = qede_setup_irqs(edev);
2390 DP_INFO(edev, "Setup IRQs succeeded\n");
2392 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2395 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2397 num_tc = netdev_get_num_tc(edev->ndev);
2398 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2399 qede_setup_tc(edev->ndev, num_tc);
2401 /* Program un-configured VLANs */
2402 qede_configure_vlan_filters(edev);
2404 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2406 /* Ask for link-up using current configuration */
2407 memset(&link_params, 0, sizeof(link_params));
2408 link_params.link_up = true;
2409 edev->ops->common->set_link(edev->cdev, &link_params);
2411 edev->state = QEDE_STATE_OPEN;
2413 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2414 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2417 if (edev->coal_entry[i].isvalid) {
2418 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2419 coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2421 __qede_unlock(edev);
2422 qede_set_per_coalesce(edev->ndev, i, &coal);
2425 DP_INFO(edev, "Ending successfully qede load\n");
2429 qede_sync_free_irqs(edev);
2430 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2432 qede_napi_disable_remove(edev);
2434 qede_free_mem_load(edev);
2436 edev->ops->common->set_fp_int(edev->cdev, 0);
2437 qede_free_fp_array(edev);
2438 edev->num_queues = 0;
2439 edev->fp_num_tx = 0;
2440 edev->fp_num_rx = 0;
2443 __qede_unlock(edev);
2448 /* 'func' should be able to run between unload and reload assuming interface
2449 * is actually running, or afterwards in case it's currently DOWN.
2451 void qede_reload(struct qede_dev *edev,
2452 struct qede_reload_args *args, bool is_locked)
2457 /* Since qede_lock is held, internal state wouldn't change even
2458 * if netdev state would start transitioning. Check whether current
2459 * internal configuration indicates device is up, then reload.
2461 if (edev->state == QEDE_STATE_OPEN) {
2462 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2464 args->func(edev, args);
2465 qede_load(edev, QEDE_LOAD_RELOAD, true);
2467 /* Since no one is going to do it for us, re-configure */
2468 qede_config_rx_mode(edev->ndev);
2470 args->func(edev, args);
2474 __qede_unlock(edev);
2477 /* called with rtnl_lock */
2478 static int qede_open(struct net_device *ndev)
2480 struct qede_dev *edev = netdev_priv(ndev);
2483 netif_carrier_off(ndev);
2485 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2487 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2491 udp_tunnel_nic_reset_ntf(ndev);
2493 edev->ops->common->update_drv_state(edev->cdev, true);
2498 static int qede_close(struct net_device *ndev)
2500 struct qede_dev *edev = netdev_priv(ndev);
2502 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2505 edev->ops->common->update_drv_state(edev->cdev, false);
2510 static void qede_link_update(void *dev, struct qed_link_output *link)
2512 struct qede_dev *edev = dev;
2514 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2515 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2519 if (link->link_up) {
2520 if (!netif_carrier_ok(edev->ndev)) {
2521 DP_NOTICE(edev, "Link is up\n");
2522 netif_tx_start_all_queues(edev->ndev);
2523 netif_carrier_on(edev->ndev);
2524 qede_rdma_dev_event_open(edev);
2527 if (netif_carrier_ok(edev->ndev)) {
2528 DP_NOTICE(edev, "Link is down\n");
2529 netif_tx_disable(edev->ndev);
2530 netif_carrier_off(edev->ndev);
2531 qede_rdma_dev_event_close(edev);
2536 static void qede_schedule_recovery_handler(void *dev)
2538 struct qede_dev *edev = dev;
2540 if (edev->state == QEDE_STATE_RECOVERY) {
2542 "Avoid scheduling a recovery handling since already in recovery state\n");
2546 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2547 schedule_delayed_work(&edev->sp_task, 0);
2549 DP_INFO(edev, "Scheduled a recovery handler\n");
2552 static void qede_recovery_failed(struct qede_dev *edev)
2554 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2556 netif_device_detach(edev->ndev);
2559 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2562 static void qede_recovery_handler(struct qede_dev *edev)
2564 u32 curr_state = edev->state;
2567 DP_NOTICE(edev, "Starting a recovery process\n");
2569 /* No need to acquire first the qede_lock since is done by qede_sp_task
2570 * before calling this function.
2572 edev->state = QEDE_STATE_RECOVERY;
2574 edev->ops->common->recovery_prolog(edev->cdev);
2576 if (curr_state == QEDE_STATE_OPEN)
2577 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2579 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2581 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2582 IS_VF(edev), QEDE_PROBE_RECOVERY);
2588 if (curr_state == QEDE_STATE_OPEN) {
2589 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2593 qede_config_rx_mode(edev->ndev);
2594 udp_tunnel_nic_reset_ntf(edev->ndev);
2597 edev->state = curr_state;
2599 DP_NOTICE(edev, "Recovery handling is done\n");
2604 qede_recovery_failed(edev);
2607 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2609 struct qed_dev *cdev = edev->cdev;
2612 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2615 /* Get a call trace of the flow that led to the error */
2616 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2618 /* Prevent HW attentions from being reasserted */
2619 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2620 edev->ops->common->attn_clr_enable(cdev, true);
2622 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2625 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2628 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2632 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2634 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2636 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2639 static void qede_set_hw_err_flags(struct qede_dev *edev,
2640 enum qed_hw_err_type err_type)
2642 unsigned long err_flags = 0;
2645 case QED_HW_ERR_DMAE_FAIL:
2646 set_bit(QEDE_ERR_WARN, &err_flags);
2648 case QED_HW_ERR_MFW_RESP_FAIL:
2649 case QED_HW_ERR_HW_ATTN:
2650 case QED_HW_ERR_RAMROD_FAIL:
2651 case QED_HW_ERR_FW_ASSERT:
2652 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2653 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2657 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2661 edev->err_flags |= err_flags;
2664 static void qede_schedule_hw_err_handler(void *dev,
2665 enum qed_hw_err_type err_type)
2667 struct qede_dev *edev = dev;
2669 /* Fan failure cannot be masked by handling of another HW error or by a
2670 * concurrent recovery process.
2672 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2673 edev->state == QEDE_STATE_RECOVERY) &&
2674 err_type != QED_HW_ERR_FAN_FAIL) {
2676 "Avoid scheduling an error handling while another HW error is being handled\n");
2680 if (err_type >= QED_HW_ERR_LAST) {
2681 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2682 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2686 edev->last_err_type = err_type;
2687 qede_set_hw_err_flags(edev, err_type);
2688 qede_atomic_hw_err_handler(edev);
2689 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2690 schedule_delayed_work(&edev->sp_task, 0);
2692 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2695 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2697 struct netdev_queue *netdev_txq;
2699 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2700 if (netif_xmit_stopped(netdev_txq))
2706 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2708 struct qede_dev *edev = dev;
2709 struct netdev_hw_addr *ha;
2712 if (edev->ndev->features & NETIF_F_IP_CSUM)
2713 data->feat_flags |= QED_TLV_IP_CSUM;
2714 if (edev->ndev->features & NETIF_F_TSO)
2715 data->feat_flags |= QED_TLV_LSO;
2717 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2718 eth_zero_addr(data->mac[1]);
2719 eth_zero_addr(data->mac[2]);
2720 /* Copy the first two UC macs */
2721 netif_addr_lock_bh(edev->ndev);
2723 netdev_for_each_uc_addr(ha, edev->ndev) {
2724 ether_addr_copy(data->mac[i++], ha->addr);
2725 if (i == QED_TLV_MAC_COUNT)
2729 netif_addr_unlock_bh(edev->ndev);
2732 static void qede_get_eth_tlv_data(void *dev, void *data)
2734 struct qed_mfw_tlv_eth *etlv = data;
2735 struct qede_dev *edev = dev;
2736 struct qede_fastpath *fp;
2739 etlv->lso_maxoff_size = 0XFFFF;
2740 etlv->lso_maxoff_size_set = true;
2741 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2742 etlv->lso_minseg_size_set = true;
2743 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2744 etlv->prom_mode_set = true;
2745 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2746 etlv->tx_descr_size_set = true;
2747 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2748 etlv->rx_descr_size_set = true;
2749 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2750 etlv->iov_offload_set = true;
2752 /* Fill information regarding queues; Should be done under the qede
2753 * lock to guarantee those don't change beneath our feet.
2755 etlv->txqs_empty = true;
2756 etlv->rxqs_empty = true;
2757 etlv->num_txqs_full = 0;
2758 etlv->num_rxqs_full = 0;
2762 fp = &edev->fp_array[i];
2763 if (fp->type & QEDE_FASTPATH_TX) {
2764 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2766 if (txq->sw_tx_cons != txq->sw_tx_prod)
2767 etlv->txqs_empty = false;
2768 if (qede_is_txq_full(edev, txq))
2769 etlv->num_txqs_full++;
2771 if (fp->type & QEDE_FASTPATH_RX) {
2772 if (qede_has_rx_work(fp->rxq))
2773 etlv->rxqs_empty = false;
2775 /* This one is a bit tricky; Firmware might stop
2776 * placing packets if ring is not yet full.
2777 * Give an approximation.
2779 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2780 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2782 etlv->num_rxqs_full++;
2785 __qede_unlock(edev);
2787 etlv->txqs_empty_set = true;
2788 etlv->rxqs_empty_set = true;
2789 etlv->num_txqs_full_set = true;
2790 etlv->num_rxqs_full_set = true;
2794 * qede_io_error_detected - called when PCI error is detected
2795 * @pdev: Pointer to PCI device
2796 * @state: The current pci connection state
2798 * This function is called after a PCI bus error affecting
2799 * this device has been detected.
2801 static pci_ers_result_t
2802 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2804 struct net_device *dev = pci_get_drvdata(pdev);
2805 struct qede_dev *edev = netdev_priv(dev);
2808 return PCI_ERS_RESULT_NONE;
2810 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2813 if (edev->state == QEDE_STATE_RECOVERY) {
2814 DP_NOTICE(edev, "Device already in the recovery state\n");
2815 __qede_unlock(edev);
2816 return PCI_ERS_RESULT_NONE;
2819 /* PF handles the recovery of its VFs */
2821 DP_VERBOSE(edev, QED_MSG_IOV,
2822 "VF recovery is handled by its PF\n");
2823 __qede_unlock(edev);
2824 return PCI_ERS_RESULT_RECOVERED;
2828 netif_tx_disable(edev->ndev);
2829 netif_carrier_off(edev->ndev);
2831 set_bit(QEDE_SP_AER, &edev->sp_flags);
2832 schedule_delayed_work(&edev->sp_task, 0);
2834 __qede_unlock(edev);
2836 return PCI_ERS_RESULT_CAN_RECOVER;