1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
51 #include <linux/qed/qed_chain.h>
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
60 #include "qed_reg_addr.h"
62 #include "qed_sriov.h"
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
70 unsigned long **pp_qid_usage;
72 /* The lock is meant to synchronize access to the qid usage */
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
78 struct qed_l2_info *p_l2_info;
79 unsigned long **pp_qids;
82 if (!QED_IS_L2_PERSONALITY(p_hwfn))
85 p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
88 p_hwfn->p_l2_info = p_l2_info;
90 if (IS_PF(p_hwfn->cdev)) {
91 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
95 qed_vf_get_num_rxqs(p_hwfn, &rx);
96 qed_vf_get_num_txqs(p_hwfn, &tx);
98 p_l2_info->queues = max_t(u8, rx, tx);
101 pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
105 p_l2_info->pp_qid_usage = pp_qids;
107 for (i = 0; i < p_l2_info->queues; i++) {
108 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
118 if (!QED_IS_L2_PERSONALITY(p_hwfn))
121 mutex_init(&p_hwfn->p_l2_info->lock);
124 void qed_l2_free(struct qed_hwfn *p_hwfn)
128 if (!QED_IS_L2_PERSONALITY(p_hwfn))
131 if (!p_hwfn->p_l2_info)
134 if (!p_hwfn->p_l2_info->pp_qid_usage)
137 /* Free until hit first uninitialized entry */
138 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
139 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
141 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
144 kfree(p_hwfn->p_l2_info->pp_qid_usage);
147 kfree(p_hwfn->p_l2_info);
148 p_hwfn->p_l2_info = NULL;
151 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
152 struct qed_queue_cid *p_cid)
154 struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
155 u16 queue_id = p_cid->rel.queue_id;
159 mutex_lock(&p_l2_info->lock);
161 if (queue_id >= p_l2_info->queues) {
163 "Requested to increase usage for qzone %04x out of %08x\n",
164 queue_id, p_l2_info->queues);
169 first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
170 MAX_QUEUES_PER_QZONE);
171 if (first >= MAX_QUEUES_PER_QZONE) {
176 __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
177 p_cid->qid_usage_idx = first;
180 mutex_unlock(&p_l2_info->lock);
184 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
185 struct qed_queue_cid *p_cid)
187 mutex_lock(&p_hwfn->p_l2_info->lock);
189 clear_bit(p_cid->qid_usage_idx,
190 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
192 mutex_unlock(&p_hwfn->p_l2_info->lock);
195 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
196 struct qed_queue_cid *p_cid)
198 bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
200 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
201 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
203 /* For PF's VFs we maintain the index inside queue-zone in IOV */
204 if (p_cid->vfid == QED_QUEUE_CID_SELF)
205 qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
210 /* The internal is only meant to be directly called by PFs initializeing CIDs
213 static struct qed_queue_cid *
214 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
217 struct qed_queue_start_common_params *p_params,
219 struct qed_queue_cid_vf_params *p_vf_params)
221 struct qed_queue_cid *p_cid;
224 p_cid = vzalloc(sizeof(*p_cid));
228 p_cid->opaque_fid = opaque_fid;
230 p_cid->p_owner = p_hwfn;
232 /* Fill in parameters */
233 p_cid->rel.vport_id = p_params->vport_id;
234 p_cid->rel.queue_id = p_params->queue_id;
235 p_cid->rel.stats_id = p_params->stats_id;
236 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
237 p_cid->b_is_rx = b_is_rx;
238 p_cid->sb_idx = p_params->sb_idx;
240 /* Fill-in bits related to VFs' queues if information was provided */
242 p_cid->vfid = p_vf_params->vfid;
243 p_cid->vf_qid = p_vf_params->vf_qid;
244 p_cid->vf_legacy = p_vf_params->vf_legacy;
246 p_cid->vfid = QED_QUEUE_CID_SELF;
249 /* Don't try calculating the absolute indices for VFs */
250 if (IS_VF(p_hwfn->cdev)) {
251 p_cid->abs = p_cid->rel;
255 /* Calculate the engine-absolute indices of the resources.
256 * This would guarantee they're valid later on.
257 * In some cases [SBs] we already have the right values.
259 rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
263 rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
267 /* In case of a PF configuring its VF's queues, the stats-id is already
268 * absolute [since there's a single index that's suitable per-VF].
270 if (p_cid->vfid == QED_QUEUE_CID_SELF) {
271 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
272 &p_cid->abs.stats_id);
276 p_cid->abs.stats_id = p_cid->rel.stats_id;
280 /* VF-images have provided the qid_usage_idx on their own.
281 * Otherwise, we need to allocate a unique one.
284 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
287 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
292 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
298 p_cid->qid_usage_idx,
301 p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
310 struct qed_queue_cid *
311 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
313 struct qed_queue_start_common_params *p_params,
315 struct qed_queue_cid_vf_params *p_vf_params)
317 struct qed_queue_cid *p_cid;
318 u8 vfid = QED_CXT_PF_CID;
319 bool b_legacy_vf = false;
322 /* In case of legacy VFs, The CID can be derived from the additional
323 * VF parameters - the VF assumes queue X uses CID X, so we can simply
324 * use the vf_qid for this purpose as well.
327 vfid = p_vf_params->vfid;
329 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
331 cid = p_vf_params->vf_qid;
335 /* Get a unique firmware CID for this queue, in case it's a PF.
336 * VF's don't need a CID as the queue configuration will be done
339 if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
340 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
342 DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
347 p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
348 p_params, b_is_rx, p_vf_params);
349 if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
350 _qed_cxt_release_cid(p_hwfn, cid, vfid);
355 static struct qed_queue_cid *
356 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
359 struct qed_queue_start_common_params *p_params)
361 return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
365 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
366 struct qed_sp_vport_start_params *p_params)
368 struct vport_start_ramrod_data *p_ramrod = NULL;
369 struct qed_spq_entry *p_ent = NULL;
370 struct qed_sp_init_data init_data;
375 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
379 memset(&init_data, 0, sizeof(init_data));
380 init_data.cid = qed_spq_get_cid(p_hwfn);
381 init_data.opaque_fid = p_params->opaque_fid;
382 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
384 rc = qed_sp_init_request(p_hwfn, &p_ent,
385 ETH_RAMROD_VPORT_START,
386 PROTOCOLID_ETH, &init_data);
390 p_ramrod = &p_ent->ramrod.vport_start;
391 p_ramrod->vport_id = abs_vport_id;
393 p_ramrod->mtu = cpu_to_le16(p_params->mtu);
394 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
395 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
396 p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
397 p_ramrod->untagged = p_params->only_untagged;
399 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
400 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
402 p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
404 /* TPA related fields */
405 memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
407 p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
409 switch (p_params->tpa_mode) {
410 case QED_TPA_MODE_GRO:
411 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
412 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
413 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
414 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
415 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
416 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
417 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
418 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
424 p_ramrod->tx_switching_en = p_params->tx_switching;
426 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
427 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
429 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
430 p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
431 p_params->concrete_fid);
433 return qed_spq_post(p_hwfn, p_ent, NULL);
436 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
437 struct qed_sp_vport_start_params *p_params)
439 if (IS_VF(p_hwfn->cdev)) {
440 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
442 p_params->remove_inner_vlan,
444 p_params->max_buffers_per_cqe,
445 p_params->only_untagged);
448 return qed_sp_eth_vport_start(p_hwfn, p_params);
452 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
453 struct vport_update_ramrod_data *p_ramrod,
454 struct qed_rss_params *p_rss)
456 struct eth_vport_rss_config *p_config;
457 u16 capabilities = 0;
462 p_ramrod->common.update_rss_flg = 0;
465 p_config = &p_ramrod->rss_config;
467 BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
469 rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
473 p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
474 p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
475 p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
476 p_config->update_rss_key = p_rss->update_rss_key;
478 p_config->rss_mode = p_rss->rss_enable ?
479 ETH_VPORT_RSS_MODE_REGULAR :
480 ETH_VPORT_RSS_MODE_DISABLED;
482 SET_FIELD(capabilities,
483 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
484 !!(p_rss->rss_caps & QED_RSS_IPV4));
485 SET_FIELD(capabilities,
486 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
487 !!(p_rss->rss_caps & QED_RSS_IPV6));
488 SET_FIELD(capabilities,
489 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
490 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
491 SET_FIELD(capabilities,
492 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
493 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
494 SET_FIELD(capabilities,
495 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
496 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
497 SET_FIELD(capabilities,
498 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
499 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
500 p_config->tbl_size = p_rss->rss_table_size_log;
502 p_config->capabilities = cpu_to_le16(capabilities);
504 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
505 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
506 p_ramrod->common.update_rss_flg,
508 p_config->update_rss_capabilities,
509 p_config->capabilities,
510 p_config->update_rss_ind_table, p_config->update_rss_key);
512 table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
513 1 << p_config->tbl_size);
514 for (i = 0; i < table_size; i++) {
515 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
520 p_config->indirection_table[i] =
521 cpu_to_le16(p_queue->abs.queue_id);
524 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
525 "Configured RSS indirection table [%d entries]:\n",
527 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
530 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
531 le16_to_cpu(p_config->indirection_table[i]),
532 le16_to_cpu(p_config->indirection_table[i + 1]),
533 le16_to_cpu(p_config->indirection_table[i + 2]),
534 le16_to_cpu(p_config->indirection_table[i + 3]),
535 le16_to_cpu(p_config->indirection_table[i + 4]),
536 le16_to_cpu(p_config->indirection_table[i + 5]),
537 le16_to_cpu(p_config->indirection_table[i + 6]),
538 le16_to_cpu(p_config->indirection_table[i + 7]),
539 le16_to_cpu(p_config->indirection_table[i + 8]),
540 le16_to_cpu(p_config->indirection_table[i + 9]),
541 le16_to_cpu(p_config->indirection_table[i + 10]),
542 le16_to_cpu(p_config->indirection_table[i + 11]),
543 le16_to_cpu(p_config->indirection_table[i + 12]),
544 le16_to_cpu(p_config->indirection_table[i + 13]),
545 le16_to_cpu(p_config->indirection_table[i + 14]),
546 le16_to_cpu(p_config->indirection_table[i + 15]));
549 for (i = 0; i < 10; i++)
550 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
556 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
557 struct vport_update_ramrod_data *p_ramrod,
558 struct qed_filter_accept_flags accept_flags)
560 p_ramrod->common.update_rx_mode_flg =
561 accept_flags.update_rx_mode_config;
563 p_ramrod->common.update_tx_mode_flg =
564 accept_flags.update_tx_mode_config;
566 /* Set Rx mode accept flags */
567 if (p_ramrod->common.update_rx_mode_flg) {
568 u8 accept_filter = accept_flags.rx_accept_filter;
571 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
572 !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
573 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
575 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
576 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
578 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
579 !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
580 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
582 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
583 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
584 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
586 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
587 !!(accept_filter & QED_ACCEPT_BCAST));
589 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
590 !!(accept_filter & QED_ACCEPT_ANY_VNI));
592 p_ramrod->rx_mode.state = cpu_to_le16(state);
593 DP_VERBOSE(p_hwfn, QED_MSG_SP,
594 "p_ramrod->rx_mode.state = 0x%x\n", state);
597 /* Set Tx mode accept flags */
598 if (p_ramrod->common.update_tx_mode_flg) {
599 u8 accept_filter = accept_flags.tx_accept_filter;
602 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
603 !!(accept_filter & QED_ACCEPT_NONE));
605 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
606 !!(accept_filter & QED_ACCEPT_NONE));
608 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
612 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
613 !!(accept_filter & QED_ACCEPT_BCAST));
615 p_ramrod->tx_mode.state = cpu_to_le16(state);
616 DP_VERBOSE(p_hwfn, QED_MSG_SP,
617 "p_ramrod->tx_mode.state = 0x%x\n", state);
622 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
623 struct vport_update_ramrod_data *p_ramrod,
624 struct qed_sge_tpa_params *p_params)
626 struct eth_vport_tpa_param *p_tpa;
629 p_ramrod->common.update_tpa_param_flg = 0;
630 p_ramrod->common.update_tpa_en_flg = 0;
631 p_ramrod->common.update_tpa_param_flg = 0;
635 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
636 p_tpa = &p_ramrod->tpa_param;
637 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
638 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
639 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
640 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
642 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
643 p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
644 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
645 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
646 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
647 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
648 p_tpa->tpa_max_size = p_params->tpa_max_size;
649 p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
650 p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
654 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
655 struct vport_update_ramrod_data *p_ramrod,
656 struct qed_sp_vport_update_params *p_params)
660 memset(&p_ramrod->approx_mcast.bins, 0,
661 sizeof(p_ramrod->approx_mcast.bins));
663 if (!p_params->update_approx_mcast_flg)
666 p_ramrod->common.update_approx_mcast_flg = 1;
667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
668 u32 *p_bins = (u32 *)p_params->bins;
670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
674 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
675 struct qed_sp_vport_update_params *p_params,
676 enum spq_mode comp_mode,
677 struct qed_spq_comp_cb *p_comp_data)
679 struct qed_rss_params *p_rss_params = p_params->rss_params;
680 struct vport_update_ramrod_data_cmn *p_cmn;
681 struct qed_sp_init_data init_data;
682 struct vport_update_ramrod_data *p_ramrod = NULL;
683 struct qed_spq_entry *p_ent = NULL;
684 u8 abs_vport_id = 0, val;
687 if (IS_VF(p_hwfn->cdev)) {
688 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
692 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
696 memset(&init_data, 0, sizeof(init_data));
697 init_data.cid = qed_spq_get_cid(p_hwfn);
698 init_data.opaque_fid = p_params->opaque_fid;
699 init_data.comp_mode = comp_mode;
700 init_data.p_comp_data = p_comp_data;
702 rc = qed_sp_init_request(p_hwfn, &p_ent,
703 ETH_RAMROD_VPORT_UPDATE,
704 PROTOCOLID_ETH, &init_data);
708 /* Copy input params to ramrod according to FW struct */
709 p_ramrod = &p_ent->ramrod.vport_update;
710 p_cmn = &p_ramrod->common;
712 p_cmn->vport_id = abs_vport_id;
713 p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
714 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
715 p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
716 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
717 p_cmn->accept_any_vlan = p_params->accept_any_vlan;
718 val = p_params->update_accept_any_vlan_flg;
719 p_cmn->update_accept_any_vlan_flg = val;
721 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
722 val = p_params->update_inner_vlan_removal_flg;
723 p_cmn->update_inner_vlan_removal_en_flg = val;
725 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
726 val = p_params->update_default_vlan_enable_flg;
727 p_cmn->update_default_vlan_en_flg = val;
729 p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
730 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
732 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
734 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
735 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
737 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
738 val = p_params->update_anti_spoofing_en_flg;
739 p_ramrod->common.update_anti_spoofing_en_flg = val;
741 rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
743 /* Return spq entry which is taken in qed_sp_init_request()*/
744 qed_spq_return_entry(p_hwfn, p_ent);
748 /* Update mcast bins for VFs, PF doesn't use this functionality */
749 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
751 qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
752 qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
753 return qed_spq_post(p_hwfn, p_ent, NULL);
756 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
758 struct vport_stop_ramrod_data *p_ramrod;
759 struct qed_sp_init_data init_data;
760 struct qed_spq_entry *p_ent;
764 if (IS_VF(p_hwfn->cdev))
765 return qed_vf_pf_vport_stop(p_hwfn);
767 rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
771 memset(&init_data, 0, sizeof(init_data));
772 init_data.cid = qed_spq_get_cid(p_hwfn);
773 init_data.opaque_fid = opaque_fid;
774 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
776 rc = qed_sp_init_request(p_hwfn, &p_ent,
777 ETH_RAMROD_VPORT_STOP,
778 PROTOCOLID_ETH, &init_data);
782 p_ramrod = &p_ent->ramrod.vport_stop;
783 p_ramrod->vport_id = abs_vport_id;
785 return qed_spq_post(p_hwfn, p_ent, NULL);
789 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
790 struct qed_filter_accept_flags *p_accept_flags)
792 struct qed_sp_vport_update_params s_params;
794 memset(&s_params, 0, sizeof(s_params));
795 memcpy(&s_params.accept_flags, p_accept_flags,
796 sizeof(struct qed_filter_accept_flags));
798 return qed_vf_pf_vport_update(p_hwfn, &s_params);
801 static int qed_filter_accept_cmd(struct qed_dev *cdev,
803 struct qed_filter_accept_flags accept_flags,
804 u8 update_accept_any_vlan,
806 enum spq_mode comp_mode,
807 struct qed_spq_comp_cb *p_comp_data)
809 struct qed_sp_vport_update_params vport_update_params;
812 /* Prepare and send the vport rx_mode change */
813 memset(&vport_update_params, 0, sizeof(vport_update_params));
814 vport_update_params.vport_id = vport;
815 vport_update_params.accept_flags = accept_flags;
816 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
817 vport_update_params.accept_any_vlan = accept_any_vlan;
819 for_each_hwfn(cdev, i) {
820 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
822 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
825 rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
831 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
832 comp_mode, p_comp_data);
834 DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
838 DP_VERBOSE(p_hwfn, QED_MSG_SP,
839 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
840 accept_flags.rx_accept_filter,
841 accept_flags.tx_accept_filter);
842 if (update_accept_any_vlan)
843 DP_VERBOSE(p_hwfn, QED_MSG_SP,
844 "accept_any_vlan=%d configured\n",
851 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
852 struct qed_queue_cid *p_cid,
854 dma_addr_t bd_chain_phys_addr,
855 dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
857 struct rx_queue_start_ramrod_data *p_ramrod = NULL;
858 struct qed_spq_entry *p_ent = NULL;
859 struct qed_sp_init_data init_data;
862 DP_VERBOSE(p_hwfn, QED_MSG_SP,
863 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
864 p_cid->opaque_fid, p_cid->cid,
865 p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
868 memset(&init_data, 0, sizeof(init_data));
869 init_data.cid = p_cid->cid;
870 init_data.opaque_fid = p_cid->opaque_fid;
871 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
873 rc = qed_sp_init_request(p_hwfn, &p_ent,
874 ETH_RAMROD_RX_QUEUE_START,
875 PROTOCOLID_ETH, &init_data);
879 p_ramrod = &p_ent->ramrod.rx_queue_start;
881 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
882 p_ramrod->sb_index = p_cid->sb_idx;
883 p_ramrod->vport_id = p_cid->abs.vport_id;
884 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
885 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
886 p_ramrod->complete_cqe_flg = 0;
887 p_ramrod->complete_event_flg = 1;
889 p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
890 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
892 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
893 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
895 if (p_cid->vfid != QED_QUEUE_CID_SELF) {
896 bool b_legacy_vf = !!(p_cid->vf_legacy &
897 QED_QCID_LEGACY_VF_RX_PROD);
899 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
900 DP_VERBOSE(p_hwfn, QED_MSG_SP,
901 "Queue%s is meant for VF rxq[%02x]\n",
902 b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
903 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
906 return qed_spq_post(p_hwfn, p_ent, NULL);
910 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
911 struct qed_queue_cid *p_cid,
913 dma_addr_t bd_chain_phys_addr,
914 dma_addr_t cqe_pbl_addr,
915 u16 cqe_pbl_size, void __iomem **pp_prod)
917 u32 init_prod_val = 0;
919 *pp_prod = p_hwfn->regview +
920 GTT_BAR0_MAP_REG_MSDM_RAM +
921 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
923 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
924 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
925 (u32 *)(&init_prod_val));
927 return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
930 cqe_pbl_addr, cqe_pbl_size);
934 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
936 struct qed_queue_start_common_params *p_params,
938 dma_addr_t bd_chain_phys_addr,
939 dma_addr_t cqe_pbl_addr,
941 struct qed_rxq_start_ret_params *p_ret_params)
943 struct qed_queue_cid *p_cid;
946 /* Allocate a CID for the queue */
947 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
951 if (IS_PF(p_hwfn->cdev)) {
952 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
955 cqe_pbl_addr, cqe_pbl_size,
956 &p_ret_params->p_prod);
958 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
962 cqe_pbl_size, &p_ret_params->p_prod);
965 /* Provide the caller with a reference to as handler */
967 qed_eth_queue_cid_release(p_hwfn, p_cid);
969 p_ret_params->p_handle = (void *)p_cid;
974 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
975 void **pp_rxq_handles,
978 u8 complete_event_flg,
979 enum spq_mode comp_mode,
980 struct qed_spq_comp_cb *p_comp_data)
982 struct rx_queue_update_ramrod_data *p_ramrod = NULL;
983 struct qed_spq_entry *p_ent = NULL;
984 struct qed_sp_init_data init_data;
985 struct qed_queue_cid *p_cid;
989 memset(&init_data, 0, sizeof(init_data));
990 init_data.comp_mode = comp_mode;
991 init_data.p_comp_data = p_comp_data;
993 for (i = 0; i < num_rxqs; i++) {
994 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
997 init_data.cid = p_cid->cid;
998 init_data.opaque_fid = p_cid->opaque_fid;
1000 rc = qed_sp_init_request(p_hwfn, &p_ent,
1001 ETH_RAMROD_RX_QUEUE_UPDATE,
1002 PROTOCOLID_ETH, &init_data);
1006 p_ramrod = &p_ent->ramrod.rx_queue_update;
1007 p_ramrod->vport_id = p_cid->abs.vport_id;
1009 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1010 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1011 p_ramrod->complete_event_flg = complete_event_flg;
1013 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1022 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1023 struct qed_queue_cid *p_cid,
1024 bool b_eq_completion_only, bool b_cqe_completion)
1026 struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1027 struct qed_spq_entry *p_ent = NULL;
1028 struct qed_sp_init_data init_data;
1031 memset(&init_data, 0, sizeof(init_data));
1032 init_data.cid = p_cid->cid;
1033 init_data.opaque_fid = p_cid->opaque_fid;
1034 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1036 rc = qed_sp_init_request(p_hwfn, &p_ent,
1037 ETH_RAMROD_RX_QUEUE_STOP,
1038 PROTOCOLID_ETH, &init_data);
1042 p_ramrod = &p_ent->ramrod.rx_queue_stop;
1043 p_ramrod->vport_id = p_cid->abs.vport_id;
1044 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1046 /* Cleaning the queue requires the completion to arrive there.
1047 * In addition, VFs require the answer to come as eqe to PF.
1049 p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1050 !b_eq_completion_only) ||
1052 p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1053 b_eq_completion_only;
1055 return qed_spq_post(p_hwfn, p_ent, NULL);
1058 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1060 bool eq_completion_only, bool cqe_completion)
1062 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1065 if (IS_PF(p_hwfn->cdev))
1066 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1070 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1073 qed_eth_queue_cid_release(p_hwfn, p_cid);
1078 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1079 struct qed_queue_cid *p_cid,
1080 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1082 struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1083 struct qed_spq_entry *p_ent = NULL;
1084 struct qed_sp_init_data init_data;
1088 memset(&init_data, 0, sizeof(init_data));
1089 init_data.cid = p_cid->cid;
1090 init_data.opaque_fid = p_cid->opaque_fid;
1091 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1093 rc = qed_sp_init_request(p_hwfn, &p_ent,
1094 ETH_RAMROD_TX_QUEUE_START,
1095 PROTOCOLID_ETH, &init_data);
1099 p_ramrod = &p_ent->ramrod.tx_queue_start;
1100 p_ramrod->vport_id = p_cid->abs.vport_id;
1102 p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1103 p_ramrod->sb_index = p_cid->sb_idx;
1104 p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1106 p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1107 p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1109 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1110 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1112 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1114 return qed_spq_post(p_hwfn, p_ent, NULL);
1118 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1119 struct qed_queue_cid *p_cid,
1121 dma_addr_t pbl_addr,
1122 u16 pbl_size, void __iomem **pp_doorbell)
1127 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1129 qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1133 /* Provide the caller with the necessary return values */
1134 *pp_doorbell = p_hwfn->doorbells +
1135 qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1141 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1143 struct qed_queue_start_common_params *p_params,
1145 dma_addr_t pbl_addr,
1147 struct qed_txq_start_ret_params *p_ret_params)
1149 struct qed_queue_cid *p_cid;
1152 p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1156 if (IS_PF(p_hwfn->cdev))
1157 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1159 &p_ret_params->p_doorbell);
1161 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1163 &p_ret_params->p_doorbell);
1166 qed_eth_queue_cid_release(p_hwfn, p_cid);
1168 p_ret_params->p_handle = (void *)p_cid;
1174 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1176 struct qed_spq_entry *p_ent = NULL;
1177 struct qed_sp_init_data init_data;
1180 memset(&init_data, 0, sizeof(init_data));
1181 init_data.cid = p_cid->cid;
1182 init_data.opaque_fid = p_cid->opaque_fid;
1183 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1185 rc = qed_sp_init_request(p_hwfn, &p_ent,
1186 ETH_RAMROD_TX_QUEUE_STOP,
1187 PROTOCOLID_ETH, &init_data);
1191 return qed_spq_post(p_hwfn, p_ent, NULL);
1194 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1196 struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1199 if (IS_PF(p_hwfn->cdev))
1200 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1202 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1205 qed_eth_queue_cid_release(p_hwfn, p_cid);
1209 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1211 enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1214 case QED_FILTER_ADD:
1215 action = ETH_FILTER_ACTION_ADD;
1217 case QED_FILTER_REMOVE:
1218 action = ETH_FILTER_ACTION_REMOVE;
1220 case QED_FILTER_FLUSH:
1221 action = ETH_FILTER_ACTION_REMOVE_ALL;
1224 action = MAX_ETH_FILTER_ACTION;
1231 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1233 struct qed_filter_ucast *p_filter_cmd,
1234 struct vport_filter_update_ramrod_data **pp_ramrod,
1235 struct qed_spq_entry **pp_ent,
1236 enum spq_mode comp_mode,
1237 struct qed_spq_comp_cb *p_comp_data)
1239 u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1240 struct vport_filter_update_ramrod_data *p_ramrod;
1241 struct eth_filter_cmd *p_first_filter;
1242 struct eth_filter_cmd *p_second_filter;
1243 struct qed_sp_init_data init_data;
1244 enum eth_filter_action action;
1247 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1248 &vport_to_remove_from);
1252 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1258 memset(&init_data, 0, sizeof(init_data));
1259 init_data.cid = qed_spq_get_cid(p_hwfn);
1260 init_data.opaque_fid = opaque_fid;
1261 init_data.comp_mode = comp_mode;
1262 init_data.p_comp_data = p_comp_data;
1264 rc = qed_sp_init_request(p_hwfn, pp_ent,
1265 ETH_RAMROD_FILTERS_UPDATE,
1266 PROTOCOLID_ETH, &init_data);
1270 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1271 p_ramrod = *pp_ramrod;
1272 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1273 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1275 switch (p_filter_cmd->opcode) {
1276 case QED_FILTER_REPLACE:
1277 case QED_FILTER_MOVE:
1278 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1280 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1283 p_first_filter = &p_ramrod->filter_cmds[0];
1284 p_second_filter = &p_ramrod->filter_cmds[1];
1286 switch (p_filter_cmd->type) {
1287 case QED_FILTER_MAC:
1288 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1289 case QED_FILTER_VLAN:
1290 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1291 case QED_FILTER_MAC_VLAN:
1292 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1293 case QED_FILTER_INNER_MAC:
1294 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1295 case QED_FILTER_INNER_VLAN:
1296 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1297 case QED_FILTER_INNER_PAIR:
1298 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1299 case QED_FILTER_INNER_MAC_VNI_PAIR:
1300 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1302 case QED_FILTER_MAC_VNI_PAIR:
1303 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1304 case QED_FILTER_VNI:
1305 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1308 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1309 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1310 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1311 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1312 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1313 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1314 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1315 &p_first_filter->mac_mid,
1316 &p_first_filter->mac_lsb,
1317 (u8 *)p_filter_cmd->mac);
1320 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1321 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1322 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1323 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1324 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1326 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1327 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1328 (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1329 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1331 if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1332 p_second_filter->type = p_first_filter->type;
1333 p_second_filter->mac_msb = p_first_filter->mac_msb;
1334 p_second_filter->mac_mid = p_first_filter->mac_mid;
1335 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1336 p_second_filter->vlan_id = p_first_filter->vlan_id;
1337 p_second_filter->vni = p_first_filter->vni;
1339 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1341 p_first_filter->vport_id = vport_to_remove_from;
1343 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1344 p_second_filter->vport_id = vport_to_add_to;
1345 } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1346 p_first_filter->vport_id = vport_to_add_to;
1347 memcpy(p_second_filter, p_first_filter,
1348 sizeof(*p_second_filter));
1349 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1350 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1352 action = qed_filter_action(p_filter_cmd->opcode);
1354 if (action == MAX_ETH_FILTER_ACTION) {
1356 "%d is not supported yet\n",
1357 p_filter_cmd->opcode);
1361 p_first_filter->action = action;
1362 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1363 QED_FILTER_REMOVE) ?
1364 vport_to_remove_from :
1371 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1373 struct qed_filter_ucast *p_filter_cmd,
1374 enum spq_mode comp_mode,
1375 struct qed_spq_comp_cb *p_comp_data)
1377 struct vport_filter_update_ramrod_data *p_ramrod = NULL;
1378 struct qed_spq_entry *p_ent = NULL;
1379 struct eth_filter_cmd_header *p_header;
1382 rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1384 comp_mode, p_comp_data);
1386 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1389 p_header = &p_ramrod->filter_cmd_hdr;
1390 p_header->assert_on_error = p_filter_cmd->assert_on_error;
1392 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1394 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1398 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1399 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1400 (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1401 ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1403 ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1404 "MOVE" : "REPLACE")),
1405 (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1406 ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1407 "VLAN" : "MAC & VLAN"),
1408 p_ramrod->filter_cmd_hdr.cmd_cnt,
1409 p_filter_cmd->is_rx_filter,
1410 p_filter_cmd->is_tx_filter);
1411 DP_VERBOSE(p_hwfn, QED_MSG_SP,
1412 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1413 p_filter_cmd->vport_to_add_to,
1414 p_filter_cmd->vport_to_remove_from,
1415 p_filter_cmd->mac[0],
1416 p_filter_cmd->mac[1],
1417 p_filter_cmd->mac[2],
1418 p_filter_cmd->mac[3],
1419 p_filter_cmd->mac[4],
1420 p_filter_cmd->mac[5],
1421 p_filter_cmd->vlan);
1426 /*******************************************************************************
1428 * Calculates crc 32 on a buffer
1429 * Note: crc32_length MUST be aligned to 8
1431 ******************************************************************************/
1432 static u32 qed_calc_crc32c(u8 *crc32_packet,
1433 u32 crc32_length, u32 crc32_seed, u8 complement)
1435 u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1436 u8 msb = 0, current_byte = 0;
1438 if ((!crc32_packet) ||
1439 (crc32_length == 0) ||
1440 ((crc32_length % 8) != 0))
1441 return crc32_result;
1442 for (byte = 0; byte < crc32_length; byte++) {
1443 current_byte = crc32_packet[byte];
1444 for (bit = 0; bit < 8; bit++) {
1445 msb = (u8)(crc32_result >> 31);
1446 crc32_result = crc32_result << 1;
1447 if (msb != (0x1 & (current_byte >> bit))) {
1448 crc32_result = crc32_result ^ CRC32_POLY;
1449 crc32_result |= 1; /*crc32_result[0] = 1;*/
1453 return crc32_result;
1456 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1458 u32 packet_buf[2] = { 0 };
1460 memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1461 return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1464 u8 qed_mcast_bin_from_mac(u8 *mac)
1466 u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1473 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1475 struct qed_filter_mcast *p_filter_cmd,
1476 enum spq_mode comp_mode,
1477 struct qed_spq_comp_cb *p_comp_data)
1479 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1480 struct vport_update_ramrod_data *p_ramrod = NULL;
1481 struct qed_spq_entry *p_ent = NULL;
1482 struct qed_sp_init_data init_data;
1483 u8 abs_vport_id = 0;
1486 if (p_filter_cmd->opcode == QED_FILTER_ADD)
1487 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1490 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1496 memset(&init_data, 0, sizeof(init_data));
1497 init_data.cid = qed_spq_get_cid(p_hwfn);
1498 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1499 init_data.comp_mode = comp_mode;
1500 init_data.p_comp_data = p_comp_data;
1502 rc = qed_sp_init_request(p_hwfn, &p_ent,
1503 ETH_RAMROD_VPORT_UPDATE,
1504 PROTOCOLID_ETH, &init_data);
1506 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1510 p_ramrod = &p_ent->ramrod.vport_update;
1511 p_ramrod->common.update_approx_mcast_flg = 1;
1513 /* explicitly clear out the entire vector */
1514 memset(&p_ramrod->approx_mcast.bins, 0,
1515 sizeof(p_ramrod->approx_mcast.bins));
1516 memset(bins, 0, sizeof(unsigned long) *
1517 ETH_MULTICAST_MAC_BINS_IN_REGS);
1518 /* filter ADD op is explicit set op and it removes
1519 * any existing filters for the vport
1521 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1522 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1525 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1526 __set_bit(bit, bins);
1529 /* Convert to correct endianity */
1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1531 struct vport_update_ramrod_mcast *p_ramrod_bins;
1532 u32 *p_bins = (u32 *)bins;
1534 p_ramrod_bins = &p_ramrod->approx_mcast;
1535 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1539 p_ramrod->common.vport_id = abs_vport_id;
1541 return qed_spq_post(p_hwfn, p_ent, NULL);
1544 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1545 struct qed_filter_mcast *p_filter_cmd,
1546 enum spq_mode comp_mode,
1547 struct qed_spq_comp_cb *p_comp_data)
1552 /* only ADD and REMOVE operations are supported for multi-cast */
1553 if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1554 (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1555 (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1558 for_each_hwfn(cdev, i) {
1559 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1564 qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1568 opaque_fid = p_hwfn->hw_info.opaque_fid;
1570 rc = qed_sp_eth_filter_mcast(p_hwfn,
1573 comp_mode, p_comp_data);
1578 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1579 struct qed_filter_ucast *p_filter_cmd,
1580 enum spq_mode comp_mode,
1581 struct qed_spq_comp_cb *p_comp_data)
1586 for_each_hwfn(cdev, i) {
1587 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1591 rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1595 opaque_fid = p_hwfn->hw_info.opaque_fid;
1597 rc = qed_sp_eth_filter_ucast(p_hwfn,
1600 comp_mode, p_comp_data);
1608 /* Statistics related code */
1609 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1611 u32 *p_len, u16 statistics_bin)
1613 if (IS_PF(p_hwfn->cdev)) {
1614 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1615 PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1616 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1618 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1619 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1621 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1622 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1626 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1627 struct qed_ptt *p_ptt,
1628 struct qed_eth_stats *p_stats,
1631 struct eth_pstorm_per_queue_stat pstats;
1632 u32 pstats_addr = 0, pstats_len = 0;
1634 __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1637 memset(&pstats, 0, sizeof(pstats));
1638 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1640 p_stats->common.tx_ucast_bytes +=
1641 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1642 p_stats->common.tx_mcast_bytes +=
1643 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1644 p_stats->common.tx_bcast_bytes +=
1645 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1646 p_stats->common.tx_ucast_pkts +=
1647 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1648 p_stats->common.tx_mcast_pkts +=
1649 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1650 p_stats->common.tx_bcast_pkts +=
1651 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1652 p_stats->common.tx_err_drop_pkts +=
1653 HILO_64_REGPAIR(pstats.error_drop_pkts);
1656 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1657 struct qed_ptt *p_ptt,
1658 struct qed_eth_stats *p_stats,
1661 struct tstorm_per_port_stat tstats;
1662 u32 tstats_addr, tstats_len;
1664 if (IS_PF(p_hwfn->cdev)) {
1665 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1666 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1667 tstats_len = sizeof(struct tstorm_per_port_stat);
1669 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1670 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1672 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1673 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1676 memset(&tstats, 0, sizeof(tstats));
1677 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1679 p_stats->common.mftag_filter_discards +=
1680 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1681 p_stats->common.mac_filter_discards +=
1682 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1683 p_stats->common.gft_filter_drop +=
1684 HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1687 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1689 u32 *p_len, u16 statistics_bin)
1691 if (IS_PF(p_hwfn->cdev)) {
1692 *p_addr = BAR0_MAP_REG_USDM_RAM +
1693 USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1694 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1696 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1697 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1699 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1700 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1704 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1705 struct qed_ptt *p_ptt,
1706 struct qed_eth_stats *p_stats,
1709 struct eth_ustorm_per_queue_stat ustats;
1710 u32 ustats_addr = 0, ustats_len = 0;
1712 __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1715 memset(&ustats, 0, sizeof(ustats));
1716 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1718 p_stats->common.rx_ucast_bytes +=
1719 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1720 p_stats->common.rx_mcast_bytes +=
1721 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1722 p_stats->common.rx_bcast_bytes +=
1723 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1724 p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1725 p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1726 p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1729 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1731 u32 *p_len, u16 statistics_bin)
1733 if (IS_PF(p_hwfn->cdev)) {
1734 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1735 MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1736 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1738 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1739 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1741 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1742 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1746 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1747 struct qed_ptt *p_ptt,
1748 struct qed_eth_stats *p_stats,
1751 struct eth_mstorm_per_queue_stat mstats;
1752 u32 mstats_addr = 0, mstats_len = 0;
1754 __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1757 memset(&mstats, 0, sizeof(mstats));
1758 qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1760 p_stats->common.no_buff_discards +=
1761 HILO_64_REGPAIR(mstats.no_buff_discard);
1762 p_stats->common.packet_too_big_discard +=
1763 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1764 p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1765 p_stats->common.tpa_coalesced_pkts +=
1766 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1767 p_stats->common.tpa_coalesced_events +=
1768 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1769 p_stats->common.tpa_aborts_num +=
1770 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1771 p_stats->common.tpa_coalesced_bytes +=
1772 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1775 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1776 struct qed_ptt *p_ptt,
1777 struct qed_eth_stats *p_stats)
1779 struct qed_eth_stats_common *p_common = &p_stats->common;
1780 struct port_stats port_stats;
1783 memset(&port_stats, 0, sizeof(port_stats));
1785 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1786 p_hwfn->mcp_info->port_addr +
1787 offsetof(struct public_port, stats),
1788 sizeof(port_stats));
1790 p_common->rx_64_byte_packets += port_stats.eth.r64;
1791 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1792 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1793 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1794 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1795 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1796 p_common->rx_crc_errors += port_stats.eth.rfcs;
1797 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1798 p_common->rx_pause_frames += port_stats.eth.rxpf;
1799 p_common->rx_pfc_frames += port_stats.eth.rxpp;
1800 p_common->rx_align_errors += port_stats.eth.raln;
1801 p_common->rx_carrier_errors += port_stats.eth.rfcr;
1802 p_common->rx_oversize_packets += port_stats.eth.rovr;
1803 p_common->rx_jabbers += port_stats.eth.rjbr;
1804 p_common->rx_undersize_packets += port_stats.eth.rund;
1805 p_common->rx_fragments += port_stats.eth.rfrg;
1806 p_common->tx_64_byte_packets += port_stats.eth.t64;
1807 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1808 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1809 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1810 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1811 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1812 p_common->tx_pause_frames += port_stats.eth.txpf;
1813 p_common->tx_pfc_frames += port_stats.eth.txpp;
1814 p_common->rx_mac_bytes += port_stats.eth.rbyte;
1815 p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1816 p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1817 p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1818 p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1819 p_common->tx_mac_bytes += port_stats.eth.tbyte;
1820 p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1821 p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1822 p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1823 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1824 for (j = 0; j < 8; j++) {
1825 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1826 p_common->brb_discards += port_stats.brb.brb_discard[j];
1829 if (QED_IS_BB(p_hwfn->cdev)) {
1830 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1832 p_bb->rx_1519_to_1522_byte_packets +=
1833 port_stats.eth.u0.bb0.r1522;
1834 p_bb->rx_1519_to_2047_byte_packets +=
1835 port_stats.eth.u0.bb0.r2047;
1836 p_bb->rx_2048_to_4095_byte_packets +=
1837 port_stats.eth.u0.bb0.r4095;
1838 p_bb->rx_4096_to_9216_byte_packets +=
1839 port_stats.eth.u0.bb0.r9216;
1840 p_bb->rx_9217_to_16383_byte_packets +=
1841 port_stats.eth.u0.bb0.r16383;
1842 p_bb->tx_1519_to_2047_byte_packets +=
1843 port_stats.eth.u1.bb1.t2047;
1844 p_bb->tx_2048_to_4095_byte_packets +=
1845 port_stats.eth.u1.bb1.t4095;
1846 p_bb->tx_4096_to_9216_byte_packets +=
1847 port_stats.eth.u1.bb1.t9216;
1848 p_bb->tx_9217_to_16383_byte_packets +=
1849 port_stats.eth.u1.bb1.t16383;
1850 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1851 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1853 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1855 p_ah->rx_1519_to_max_byte_packets +=
1856 port_stats.eth.u0.ah0.r1519_to_max;
1857 p_ah->tx_1519_to_max_byte_packets =
1858 port_stats.eth.u1.ah1.t1519_to_max;
1861 p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
1862 p_hwfn->mcp_info->port_addr +
1863 offsetof(struct public_port,
1864 link_change_count));
1867 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1868 struct qed_ptt *p_ptt,
1869 struct qed_eth_stats *stats,
1870 u16 statistics_bin, bool b_get_port_stats)
1872 __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1873 __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1874 __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1875 __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1877 if (b_get_port_stats && p_hwfn->mcp_info)
1878 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1881 static void _qed_get_vport_stats(struct qed_dev *cdev,
1882 struct qed_eth_stats *stats)
1887 memset(stats, 0, sizeof(*stats));
1889 for_each_hwfn(cdev, i) {
1890 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1891 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1895 /* The main vport index is relative first */
1896 if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1897 DP_ERR(p_hwfn, "No vport available!\n");
1902 if (IS_PF(cdev) && !p_ptt) {
1903 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1907 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1908 IS_PF(cdev) ? true : false);
1911 if (IS_PF(cdev) && p_ptt)
1912 qed_ptt_release(p_hwfn, p_ptt);
1916 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1921 memset(stats, 0, sizeof(*stats));
1925 _qed_get_vport_stats(cdev, stats);
1927 if (!cdev->reset_stats)
1930 /* Reduce the statistics baseline */
1931 for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1932 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1935 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1936 void qed_reset_vport_stats(struct qed_dev *cdev)
1940 for_each_hwfn(cdev, i) {
1941 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1942 struct eth_mstorm_per_queue_stat mstats;
1943 struct eth_ustorm_per_queue_stat ustats;
1944 struct eth_pstorm_per_queue_stat pstats;
1945 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1947 u32 addr = 0, len = 0;
1949 if (IS_PF(cdev) && !p_ptt) {
1950 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1954 memset(&mstats, 0, sizeof(mstats));
1955 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1956 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1958 memset(&ustats, 0, sizeof(ustats));
1959 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1960 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1962 memset(&pstats, 0, sizeof(pstats));
1963 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1964 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1967 qed_ptt_release(p_hwfn, p_ptt);
1970 /* PORT statistics are not necessarily reset, so we need to
1971 * read and create a baseline for future statistics.
1972 * Link change stat is maintained by MFW, return its value as is.
1974 if (!cdev->reset_stats) {
1975 DP_INFO(cdev, "Reset stats not allocated\n");
1977 _qed_get_vport_stats(cdev, cdev->reset_stats);
1978 cdev->reset_stats->common.link_change_count = 0;
1982 static enum gft_profile_type
1983 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1985 if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1986 return GFT_PROFILE_TYPE_4_TUPLE;
1987 if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1988 return GFT_PROFILE_TYPE_IP_DST_ADDR;
1989 if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1990 return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1991 return GFT_PROFILE_TYPE_L4_DST_PORT;
1994 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1995 struct qed_ptt *p_ptt,
1996 struct qed_arfs_config_params *p_cfg_params)
1998 if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1999 qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2004 qed_arfs_mode_to_hsi(p_cfg_params->mode));
2007 "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
2008 p_cfg_params->tcp ? "Enable" : "Disable",
2009 p_cfg_params->udp ? "Enable" : "Disable",
2010 p_cfg_params->ipv4 ? "Enable" : "Disable",
2011 p_cfg_params->ipv6 ? "Enable" : "Disable",
2012 (u32)p_cfg_params->mode);
2014 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2015 qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2020 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2021 struct qed_spq_comp_cb *p_cb,
2022 struct qed_ntuple_filter_params *p_params)
2024 struct rx_update_gft_filter_data *p_ramrod = NULL;
2025 struct qed_spq_entry *p_ent = NULL;
2026 struct qed_sp_init_data init_data;
2027 u16 abs_rx_q_id = 0;
2028 u8 abs_vport_id = 0;
2032 memset(&init_data, 0, sizeof(init_data));
2033 init_data.cid = qed_spq_get_cid(p_hwfn);
2035 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2038 init_data.comp_mode = QED_SPQ_MODE_CB;
2039 init_data.p_comp_data = p_cb;
2041 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2044 rc = qed_sp_init_request(p_hwfn, &p_ent,
2045 ETH_RAMROD_GFT_UPDATE_FILTER,
2046 PROTOCOLID_ETH, &init_data);
2050 p_ramrod = &p_ent->ramrod.rx_update_gft;
2052 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2053 p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2055 if (p_params->b_is_drop) {
2056 p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2058 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2062 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2063 rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2068 p_ramrod->rx_qid_valid = 1;
2069 p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2072 p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2075 p_ramrod->flow_id_valid = 0;
2076 p_ramrod->flow_id = 0;
2077 p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2078 : GFT_DELETE_FILTER;
2080 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2081 "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2082 abs_vport_id, abs_rx_q_id,
2083 p_params->b_is_add ? "Adding" : "Removing",
2084 (u64)p_params->addr, p_params->length);
2086 return qed_spq_post(p_hwfn, p_ent, NULL);
2089 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2090 struct qed_ptt *p_ptt,
2091 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2093 u32 coalesce, address, is_valid;
2094 struct cau_sb_entry sb_entry;
2098 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2099 p_cid->sb_igu_id * sizeof(u64),
2100 (u64)(uintptr_t)&sb_entry, 2, 0);
2102 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2106 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2108 address = BAR0_MAP_REG_USDM_RAM +
2109 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2110 coalesce = qed_rd(p_hwfn, p_ptt, address);
2112 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2116 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2117 *p_rx_coal = (u16)(coalesce << timer_res);
2122 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2123 struct qed_ptt *p_ptt,
2124 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2126 u32 coalesce, address, is_valid;
2127 struct cau_sb_entry sb_entry;
2131 rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2132 p_cid->sb_igu_id * sizeof(u64),
2133 (u64)(uintptr_t)&sb_entry, 2, 0);
2135 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2139 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2141 address = BAR0_MAP_REG_XSDM_RAM +
2142 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2143 coalesce = qed_rd(p_hwfn, p_ptt, address);
2145 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2149 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2150 *p_tx_coal = (u16)(coalesce << timer_res);
2155 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2157 struct qed_queue_cid *p_cid = handle;
2158 struct qed_ptt *p_ptt;
2161 if (IS_VF(p_hwfn->cdev)) {
2162 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2164 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2169 p_ptt = qed_ptt_acquire(p_hwfn);
2173 if (p_cid->b_is_rx) {
2174 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2178 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2184 qed_ptt_release(p_hwfn, p_ptt);
2189 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2190 struct qed_dev_eth_info *info)
2194 memset(info, 0, sizeof(*info));
2199 int max_vf_vlan_filters = 0;
2200 int max_vf_mac_filters = 0;
2202 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2205 /* Since the feature controls only queue-zones,
2206 * make sure we have the contexts [rx, tx, xdp] to
2209 for_each_hwfn(cdev, i) {
2210 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2211 u16 l2_queues = (u16)FEAT_NUM(hwfn,
2215 cids = hwfn->pf_params.eth_pf_params.num_cons;
2216 num_queues += min_t(u16, l2_queues, cids / 3);
2219 /* queues might theoretically be >256, but interrupts'
2220 * upper-limit guarantes that it would fit in a u8.
2222 if (cdev->int_params.fp_msix_cnt) {
2223 u8 irqs = cdev->int_params.fp_msix_cnt;
2225 info->num_queues = (u8)min_t(u16,
2229 info->num_queues = cdev->num_hwfns;
2232 if (IS_QED_SRIOV(cdev)) {
2233 max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2234 QED_ETH_VF_NUM_VLAN_FILTERS;
2235 max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2236 QED_ETH_VF_NUM_MAC_FILTERS;
2238 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2240 max_vf_vlan_filters;
2241 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2245 ether_addr_copy(info->port_mac,
2246 cdev->hwfns[0].hw_info.hw_mac_addr);
2248 info->xdp_supported = true;
2252 /* Determine queues & XDP support */
2253 for_each_hwfn(cdev, i) {
2254 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2257 qed_vf_get_num_cids(p_hwfn, &cids);
2258 qed_vf_get_num_rxqs(p_hwfn, &queues);
2259 info->num_queues += queues;
2263 /* Enable VF XDP in case PF guarntees sufficient connections */
2264 if (total_cids >= info->num_queues * 3)
2265 info->xdp_supported = true;
2267 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2268 (u8 *)&info->num_vlan_filters);
2269 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2270 (u8 *)&info->num_mac_filters);
2271 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2273 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2276 qed_fill_dev_info(cdev, &info->common);
2279 eth_zero_addr(info->common.hw_mac);
2284 static void qed_register_eth_ops(struct qed_dev *cdev,
2285 struct qed_eth_cb_ops *ops, void *cookie)
2287 cdev->protocol_ops.eth = ops;
2288 cdev->ops_cookie = cookie;
2290 /* For VF, we start bulletin reading */
2292 qed_vf_start_iov_wq(cdev);
2295 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2300 return qed_vf_check_mac(&cdev->hwfns[0], mac);
2303 static int qed_start_vport(struct qed_dev *cdev,
2304 struct qed_start_vport_params *params)
2308 for_each_hwfn(cdev, i) {
2309 struct qed_sp_vport_start_params start = { 0 };
2310 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2312 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2314 start.remove_inner_vlan = params->remove_inner_vlan;
2315 start.only_untagged = true; /* untagged only */
2316 start.drop_ttl0 = params->drop_ttl0;
2317 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2318 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2319 start.handle_ptp_pkts = params->handle_ptp_pkts;
2320 start.vport_id = params->vport_id;
2321 start.max_buffers_per_cqe = 16;
2322 start.mtu = params->mtu;
2324 rc = qed_sp_vport_start(p_hwfn, &start);
2326 DP_ERR(cdev, "Failed to start VPORT\n");
2330 rc = qed_hw_start_fastpath(p_hwfn);
2332 DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2336 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2337 "Started V-PORT %d with MTU %d\n",
2338 start.vport_id, start.mtu);
2341 if (params->clear_stats)
2342 qed_reset_vport_stats(cdev);
2347 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2351 for_each_hwfn(cdev, i) {
2352 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2354 rc = qed_sp_vport_stop(p_hwfn,
2355 p_hwfn->hw_info.opaque_fid, vport_id);
2358 DP_ERR(cdev, "Failed to stop VPORT\n");
2365 static int qed_update_vport_rss(struct qed_dev *cdev,
2366 struct qed_update_vport_rss_params *input,
2367 struct qed_rss_params *rss)
2371 /* Update configuration with what's correct regardless of CMT */
2372 rss->update_rss_config = 1;
2373 rss->rss_enable = 1;
2374 rss->update_rss_capabilities = 1;
2375 rss->update_rss_ind_table = 1;
2376 rss->update_rss_key = 1;
2377 rss->rss_caps = input->rss_caps;
2378 memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2380 /* In regular scenario, we'd simply need to take input handlers.
2381 * But in CMT, we'd have to split the handlers according to the
2382 * engine they were configured on. We'd then have to understand
2383 * whether RSS is really required, since 2-queues on CMT doesn't
2386 if (cdev->num_hwfns == 1) {
2387 memcpy(rss->rss_ind_table,
2388 input->rss_ind_table,
2389 QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2390 rss->rss_table_size_log = 7;
2394 /* Start by copying the non-spcific information to the 2nd copy */
2395 memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2397 /* CMT should be round-robin */
2398 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2399 struct qed_queue_cid *cid = input->rss_ind_table[i];
2400 struct qed_rss_params *t_rss;
2402 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2407 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2410 /* Make sure RSS is actually required */
2411 for_each_hwfn(cdev, fn) {
2412 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2413 if (rss[fn].rss_ind_table[i] !=
2414 rss[fn].rss_ind_table[0])
2417 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2418 DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2419 "CMT - 1 queue per-hwfn; Disabling RSS\n");
2422 rss[fn].rss_table_size_log = 6;
2428 static int qed_update_vport(struct qed_dev *cdev,
2429 struct qed_update_vport_params *params)
2431 struct qed_sp_vport_update_params sp_params;
2432 struct qed_rss_params *rss;
2438 rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2442 memset(&sp_params, 0, sizeof(sp_params));
2444 /* Translate protocol params into sp params */
2445 sp_params.vport_id = params->vport_id;
2446 sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2447 sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2448 sp_params.vport_active_rx_flg = params->vport_active_flg;
2449 sp_params.vport_active_tx_flg = params->vport_active_flg;
2450 sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2451 sp_params.tx_switching_flg = params->tx_switching_flg;
2452 sp_params.accept_any_vlan = params->accept_any_vlan;
2453 sp_params.update_accept_any_vlan_flg =
2454 params->update_accept_any_vlan_flg;
2456 /* Prepare the RSS configuration */
2457 if (params->update_rss_flg)
2458 if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss))
2459 params->update_rss_flg = 0;
2461 for_each_hwfn(cdev, i) {
2462 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2464 if (params->update_rss_flg)
2465 sp_params.rss_params = &rss[i];
2467 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2468 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2469 QED_SPQ_MODE_EBLOCK,
2472 DP_ERR(cdev, "Failed to update VPORT\n");
2476 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2477 "Updated V-PORT %d: active_flag %d [update %d]\n",
2478 params->vport_id, params->vport_active_flg,
2479 params->update_vport_active_flg);
2487 static int qed_start_rxq(struct qed_dev *cdev,
2489 struct qed_queue_start_common_params *p_params,
2491 dma_addr_t bd_chain_phys_addr,
2492 dma_addr_t cqe_pbl_addr,
2494 struct qed_rxq_start_ret_params *ret_params)
2496 struct qed_hwfn *p_hwfn;
2499 hwfn_index = rss_num % cdev->num_hwfns;
2500 p_hwfn = &cdev->hwfns[hwfn_index];
2502 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2503 p_params->stats_id = p_params->vport_id;
2505 rc = qed_eth_rx_queue_start(p_hwfn,
2506 p_hwfn->hw_info.opaque_fid,
2510 cqe_pbl_addr, cqe_pbl_size, ret_params);
2512 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2516 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2517 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2518 p_params->queue_id, rss_num, p_params->vport_id,
2519 p_params->p_sb->igu_sb_id);
2524 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2527 struct qed_hwfn *p_hwfn;
2529 hwfn_index = rss_id % cdev->num_hwfns;
2530 p_hwfn = &cdev->hwfns[hwfn_index];
2532 rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2534 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2541 static int qed_start_txq(struct qed_dev *cdev,
2543 struct qed_queue_start_common_params *p_params,
2544 dma_addr_t pbl_addr,
2546 struct qed_txq_start_ret_params *ret_params)
2548 struct qed_hwfn *p_hwfn;
2551 hwfn_index = rss_num % cdev->num_hwfns;
2552 p_hwfn = &cdev->hwfns[hwfn_index];
2553 p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2554 p_params->stats_id = p_params->vport_id;
2556 rc = qed_eth_tx_queue_start(p_hwfn,
2557 p_hwfn->hw_info.opaque_fid,
2559 pbl_addr, pbl_size, ret_params);
2562 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2566 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2567 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2568 p_params->queue_id, rss_num, p_params->vport_id,
2569 p_params->p_sb->igu_sb_id);
2574 #define QED_HW_STOP_RETRY_LIMIT (10)
2575 static int qed_fastpath_stop(struct qed_dev *cdev)
2579 rc = qed_hw_stop_fastpath(cdev);
2581 DP_ERR(cdev, "Failed to stop Fastpath\n");
2588 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2590 struct qed_hwfn *p_hwfn;
2593 hwfn_index = rss_id % cdev->num_hwfns;
2594 p_hwfn = &cdev->hwfns[hwfn_index];
2596 rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2598 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2605 static int qed_tunn_configure(struct qed_dev *cdev,
2606 struct qed_tunn_params *tunn_params)
2608 struct qed_tunnel_info tunn_info;
2611 memset(&tunn_info, 0, sizeof(tunn_info));
2612 if (tunn_params->update_vxlan_port) {
2613 tunn_info.vxlan_port.b_update_port = true;
2614 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2617 if (tunn_params->update_geneve_port) {
2618 tunn_info.geneve_port.b_update_port = true;
2619 tunn_info.geneve_port.port = tunn_params->geneve_port;
2622 for_each_hwfn(cdev, i) {
2623 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2624 struct qed_ptt *p_ptt;
2625 struct qed_tunnel_info *tun;
2627 tun = &hwfn->cdev->tunnel;
2629 p_ptt = qed_ptt_acquire(hwfn);
2636 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2637 QED_SPQ_MODE_EBLOCK, NULL);
2640 qed_ptt_release(hwfn, p_ptt);
2644 if (IS_PF_SRIOV(hwfn)) {
2645 u16 vxlan_port, geneve_port;
2648 vxlan_port = tun->vxlan_port.port;
2649 geneve_port = tun->geneve_port.port;
2651 qed_for_each_vf(hwfn, j) {
2652 qed_iov_bulletin_set_udp_ports(hwfn, j,
2657 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2660 qed_ptt_release(hwfn, p_ptt);
2666 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2667 enum qed_filter_rx_mode_type type)
2669 struct qed_filter_accept_flags accept_flags;
2671 memset(&accept_flags, 0, sizeof(accept_flags));
2673 accept_flags.update_rx_mode_config = 1;
2674 accept_flags.update_tx_mode_config = 1;
2675 accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2676 QED_ACCEPT_MCAST_MATCHED |
2678 accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2679 QED_ACCEPT_MCAST_MATCHED |
2682 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2683 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2684 QED_ACCEPT_MCAST_UNMATCHED;
2685 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2686 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2687 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2688 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2691 return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2692 QED_SPQ_MODE_CB, NULL);
2695 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2696 struct qed_filter_ucast_params *params)
2698 struct qed_filter_ucast ucast;
2700 if (!params->vlan_valid && !params->mac_valid) {
2702 "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2706 memset(&ucast, 0, sizeof(ucast));
2707 switch (params->type) {
2708 case QED_FILTER_XCAST_TYPE_ADD:
2709 ucast.opcode = QED_FILTER_ADD;
2711 case QED_FILTER_XCAST_TYPE_DEL:
2712 ucast.opcode = QED_FILTER_REMOVE;
2714 case QED_FILTER_XCAST_TYPE_REPLACE:
2715 ucast.opcode = QED_FILTER_REPLACE;
2718 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2722 if (params->vlan_valid && params->mac_valid) {
2723 ucast.type = QED_FILTER_MAC_VLAN;
2724 ether_addr_copy(ucast.mac, params->mac);
2725 ucast.vlan = params->vlan;
2726 } else if (params->mac_valid) {
2727 ucast.type = QED_FILTER_MAC;
2728 ether_addr_copy(ucast.mac, params->mac);
2730 ucast.type = QED_FILTER_VLAN;
2731 ucast.vlan = params->vlan;
2734 ucast.is_rx_filter = true;
2735 ucast.is_tx_filter = true;
2737 return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2740 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2741 struct qed_filter_mcast_params *params)
2743 struct qed_filter_mcast mcast;
2746 memset(&mcast, 0, sizeof(mcast));
2747 switch (params->type) {
2748 case QED_FILTER_XCAST_TYPE_ADD:
2749 mcast.opcode = QED_FILTER_ADD;
2751 case QED_FILTER_XCAST_TYPE_DEL:
2752 mcast.opcode = QED_FILTER_REMOVE;
2755 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2759 mcast.num_mc_addrs = params->num;
2760 for (i = 0; i < mcast.num_mc_addrs; i++)
2761 ether_addr_copy(mcast.mac[i], params->mac[i]);
2763 return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2766 static int qed_configure_filter(struct qed_dev *cdev,
2767 struct qed_filter_params *params)
2769 enum qed_filter_rx_mode_type accept_flags;
2771 switch (params->type) {
2772 case QED_FILTER_TYPE_UCAST:
2773 return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast);
2774 case QED_FILTER_TYPE_MCAST:
2775 return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast);
2776 case QED_FILTER_TYPE_RX_MODE:
2777 accept_flags = params->filter.accept_flags;
2778 return qed_configure_filter_rx_mode(cdev, accept_flags);
2780 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2785 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2786 enum qed_filter_config_mode mode)
2788 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2789 struct qed_arfs_config_params arfs_config_params;
2791 memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2792 arfs_config_params.tcp = true;
2793 arfs_config_params.udp = true;
2794 arfs_config_params.ipv4 = true;
2795 arfs_config_params.ipv6 = true;
2796 arfs_config_params.mode = mode;
2797 qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2798 &arfs_config_params);
2803 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2805 union event_ring_data *data, u8 fw_return_code)
2807 struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2808 void *dev = p_hwfn->cdev->ops_cookie;
2810 op->arfs_filter_op(dev, cookie, fw_return_code);
2814 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2816 struct qed_ntuple_filter_params *params)
2818 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2819 struct qed_spq_comp_cb cb;
2822 cb.function = qed_arfs_sp_response_handler;
2825 if (params->b_is_vf) {
2826 if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2828 DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2833 params->vport_id = params->vf_id + 1;
2834 params->qid = QED_RFS_NTUPLE_QID_RSS;
2837 rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2840 "Failed to issue a-RFS filter configuration\n");
2842 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2843 "Successfully issued a-RFS filter configuration\n");
2848 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2850 struct qed_queue_cid *p_cid = handle;
2851 struct qed_hwfn *p_hwfn;
2854 p_hwfn = p_cid->p_owner;
2855 rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2857 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2862 static int qed_fp_cqe_completion(struct qed_dev *dev,
2863 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2865 return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2869 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2876 for_each_hwfn(cdev, i) {
2877 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2879 ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2887 #ifdef CONFIG_QED_SRIOV
2888 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2892 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2895 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2897 static const struct qed_eth_ops qed_eth_ops_pass = {
2898 .common = &qed_common_ops_pass,
2899 #ifdef CONFIG_QED_SRIOV
2900 .iov = &qed_iov_ops_pass,
2903 .dcb = &qed_dcbnl_ops_pass,
2905 .ptp = &qed_ptp_ops_pass,
2906 .fill_dev_info = &qed_fill_eth_dev_info,
2907 .register_ops = &qed_register_eth_ops,
2908 .check_mac = &qed_check_mac,
2909 .vport_start = &qed_start_vport,
2910 .vport_stop = &qed_stop_vport,
2911 .vport_update = &qed_update_vport,
2912 .q_rx_start = &qed_start_rxq,
2913 .q_rx_stop = &qed_stop_rxq,
2914 .q_tx_start = &qed_start_txq,
2915 .q_tx_stop = &qed_stop_txq,
2916 .filter_config = &qed_configure_filter,
2917 .fastpath_stop = &qed_fastpath_stop,
2918 .eth_cqe_completion = &qed_fp_cqe_completion,
2919 .get_vport_stats = &qed_get_vport_stats,
2920 .tunn_config = &qed_tunn_configure,
2921 .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2922 .configure_arfs_searcher = &qed_configure_arfs_searcher,
2923 .get_coalesce = &qed_get_coalesce,
2924 .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2927 const struct qed_eth_ops *qed_get_eth_ops(void)
2929 return &qed_eth_ops_pass;
2931 EXPORT_SYMBOL(qed_get_eth_ops);
2933 void qed_put_eth_ops(void)
2935 /* TODO - reference count for module? */
2937 EXPORT_SYMBOL(qed_put_eth_ops);