1 /* bnx2x_sriov.c: QLogic Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
4 * Copyright 2014 QLogic Corporation
7 * Unless you and QLogic execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2, available
10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
12 * Notwithstanding the above, under no circumstances may you combine this
13 * software in any way with any other QLogic software provided under a
14 * license other than the GPL, without QLogic's express prior written
17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18 * Written by: Shmulik Ravid
19 * Ariel Elior <ariel.elior@qlogic.com>
23 #include "bnx2x_init.h"
24 #include "bnx2x_cmn.h"
26 #include <linux/crc32.h>
27 #include <linux/if_vlan.h>
29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
30 struct bnx2x_virtf **vf,
31 struct pf_vf_bulletin_content **bulletin,
34 /* General service functions */
35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
79 u8 igu_sb_id, u8 segment, u16 index, u8 op,
82 /* acking a VF sb through the PF - use the GRC */
84 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
85 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
86 u32 func_encode = vf->abs_vfid;
87 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
88 struct igu_regular cmd_data = {0};
90 cmd_data.sb_id_and_flags =
91 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
92 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
93 (update << IGU_REGULAR_BUPDATE_SHIFT) |
94 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
96 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
97 func_encode << IGU_CTRL_REG_FID_SHIFT |
98 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
100 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 cmd_data.sb_id_and_flags, igu_addr_data);
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
105 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
107 REG_WR(bp, igu_addr_ctl, ctl);
111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
112 struct bnx2x_virtf *vf,
115 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
117 BNX2X_ERR("Slowpath objects not yet initialized!\n");
119 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
125 /* VFOP operations states */
126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
127 struct bnx2x_queue_init_params *init_params,
128 struct bnx2x_queue_setup_params *setup_params,
129 u16 q_idx, u16 sb_idx)
132 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
136 init_params->tx.sb_cq_index,
137 init_params->tx.hc_rate,
139 setup_params->txq_params.traffic_type);
142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
143 struct bnx2x_queue_init_params *init_params,
144 struct bnx2x_queue_setup_params *setup_params,
145 u16 q_idx, u16 sb_idx)
147 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
149 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
150 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
154 init_params->rx.sb_cq_index,
155 init_params->rx.hc_rate,
156 setup_params->gen_params.mtu,
158 rxq_params->sge_buf_sz,
159 rxq_params->max_sges_pkt,
160 rxq_params->tpa_agg_sz,
162 rxq_params->drop_flags,
163 rxq_params->cache_line_log);
166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
167 struct bnx2x_virtf *vf,
168 struct bnx2x_vf_queue *q,
169 struct bnx2x_vf_queue_construct_params *p,
170 unsigned long q_type)
172 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
173 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
177 /* Enable host coalescing in the transition to INIT state */
178 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
179 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
181 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
182 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
185 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
186 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
189 init_p->cxts[0] = q->cxt;
193 /* Setup-op general parameters */
194 setup_p->gen_params.spcl_id = vf->sp_cl_id;
195 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
196 setup_p->gen_params.fp_hsi = vf->fp_hsi;
199 * collect statistics, zero statistics, local-switching, security,
200 * OV for Flex10, RSS and MCAST for leading
202 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
203 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
205 /* for VFs, enable tx switching, bd coherency, and mac address
208 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
209 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
211 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
213 __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
215 /* Setup-op rx parameters */
216 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
217 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
219 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
220 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
221 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
223 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
224 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
227 /* Setup-op tx parameters */
228 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
229 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
230 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
234 static int bnx2x_vf_queue_create(struct bnx2x *bp,
235 struct bnx2x_virtf *vf, int qid,
236 struct bnx2x_vf_queue_construct_params *qctor)
238 struct bnx2x_queue_state_params *q_params;
241 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
243 /* Prepare ramrod information */
244 q_params = &qctor->qstate;
245 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
246 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
248 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
249 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
250 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
254 /* Run Queue 'construction' ramrods */
255 q_params->cmd = BNX2X_Q_CMD_INIT;
256 rc = bnx2x_queue_state_change(bp, q_params);
260 memcpy(&q_params->params.setup, &qctor->prep_qsetup,
261 sizeof(struct bnx2x_queue_setup_params));
262 q_params->cmd = BNX2X_Q_CMD_SETUP;
263 rc = bnx2x_queue_state_change(bp, q_params);
267 /* enable interrupts */
268 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
269 USTORM_ID, 0, IGU_INT_ENABLE, 0);
274 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
277 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
278 BNX2X_Q_CMD_TERMINATE,
279 BNX2X_Q_CMD_CFC_DEL};
280 struct bnx2x_queue_state_params q_params;
283 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
285 /* Prepare ramrod information */
286 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
287 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
288 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
290 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
291 BNX2X_Q_LOGICAL_STATE_STOPPED) {
292 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
296 /* Run Queue 'destruction' ramrods */
297 for (i = 0; i < ARRAY_SIZE(cmds); i++) {
298 q_params.cmd = cmds[i];
299 rc = bnx2x_queue_state_change(bp, &q_params);
301 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
307 if (bnx2x_vfq(vf, qid, cxt)) {
308 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
309 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
316 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
318 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
320 /* the first igu entry belonging to VFs of this PF */
321 if (!BP_VFDB(bp)->first_vf_igu_entry)
322 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
324 /* the first igu entry belonging to this VF */
325 if (!vf_sb_count(vf))
326 vf->igu_base_id = igu_sb_id;
331 BP_VFDB(bp)->vf_sbs_pool++;
334 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
335 int qid, bool drv_only, int type)
337 struct bnx2x_vlan_mac_ramrod_params ramrod;
340 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
341 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
342 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
344 /* Prepare ramrod params */
345 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
346 if (type == BNX2X_VF_FILTER_VLAN_MAC) {
347 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
348 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
349 } else if (type == BNX2X_VF_FILTER_MAC) {
350 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
351 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
353 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
355 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
357 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
359 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
361 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
364 rc = ramrod.vlan_mac_obj->delete_all(bp,
366 &ramrod.user_req.vlan_mac_flags,
367 &ramrod.ramrod_flags);
369 BNX2X_ERR("Failed to delete all %s\n",
370 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
371 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
378 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
379 struct bnx2x_virtf *vf, int qid,
380 struct bnx2x_vf_mac_vlan_filter *filter,
383 struct bnx2x_vlan_mac_ramrod_params ramrod;
386 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
387 vf->abs_vfid, filter->add ? "Adding" : "Deleting",
388 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
389 (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
391 /* Prepare ramrod params */
392 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
393 if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
394 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
395 ramrod.user_req.u.vlan.vlan = filter->vid;
396 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
397 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
398 } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
399 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
400 ramrod.user_req.u.vlan.vlan = filter->vid;
402 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
403 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
404 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
406 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
409 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
411 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
413 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
415 /* Add/Remove the filter */
416 rc = bnx2x_config_vlan_mac(bp, &ramrod);
420 BNX2X_ERR("Failed to %s %s\n",
421 filter->add ? "add" : "delete",
422 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
424 (filter->type == BNX2X_VF_FILTER_MAC) ?
429 filter->applied = true;
434 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
435 struct bnx2x_vf_mac_vlan_filters *filters,
436 int qid, bool drv_only)
440 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
442 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
445 /* Prepare ramrod params */
446 for (i = 0; i < filters->count; i++) {
447 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
448 &filters->filters[i], drv_only);
453 /* Rollback if needed */
454 if (i != filters->count) {
455 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
458 if (!filters->filters[i].applied)
460 filters->filters[i].add = !filters->filters[i].add;
461 bnx2x_vf_mac_vlan_config(bp, vf, qid,
462 &filters->filters[i],
467 /* It's our responsibility to free the filters */
473 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
474 struct bnx2x_vf_queue_construct_params *qctor)
478 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
480 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
484 /* Schedule the configuration of any pending vlan filters */
485 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
489 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
493 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
498 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
500 /* If needed, clean the filtering data base */
501 if ((qid == LEADING_IDX) &&
502 bnx2x_validate_vf_sp_objs(bp, vf, false)) {
503 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
504 BNX2X_VF_FILTER_VLAN_MAC);
507 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
508 BNX2X_VF_FILTER_VLAN);
511 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
512 BNX2X_VF_FILTER_MAC);
517 /* Terminate queue */
518 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
519 struct bnx2x_queue_state_params qstate;
521 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
522 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
523 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
524 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
525 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
526 rc = bnx2x_queue_state_change(bp, &qstate);
533 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
537 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
538 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
540 struct bnx2x_mcast_list_elem *mc = NULL;
541 struct bnx2x_mcast_ramrod_params mcast;
544 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
546 /* Prepare Multicast command */
547 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
548 mcast.mcast_obj = &vf->mcast_obj;
550 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
552 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
554 mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
557 BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
563 INIT_LIST_HEAD(&mcast.mcast_list);
564 for (i = 0; i < mc_num; i++) {
565 mc[i].mac = mcasts[i];
566 list_add_tail(&mc[i].link,
571 mcast.mcast_list_len = mc_num;
572 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
574 BNX2X_ERR("Failed to set multicasts\n");
576 /* clear existing mcasts */
577 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
579 BNX2X_ERR("Failed to remove multicasts\n");
587 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
588 struct bnx2x_rx_mode_ramrod_params *ramrod,
589 struct bnx2x_virtf *vf,
590 unsigned long accept_flags)
592 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
594 memset(ramrod, 0, sizeof(*ramrod));
595 ramrod->cid = vfq->cid;
596 ramrod->cl_id = vfq_cl_id(vf, vfq);
597 ramrod->rx_mode_obj = &bp->rx_mode_obj;
598 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
599 ramrod->rx_accept_flags = accept_flags;
600 ramrod->tx_accept_flags = accept_flags;
601 ramrod->pstate = &vf->filter_state;
602 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
604 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
605 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
606 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
608 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
609 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
612 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
613 int qid, unsigned long accept_flags)
615 struct bnx2x_rx_mode_ramrod_params ramrod;
617 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
619 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
620 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
621 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
622 return bnx2x_config_rx_mode(bp, &ramrod);
625 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
629 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
631 /* Remove all classification configuration for leading queue */
632 if (qid == LEADING_IDX) {
633 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
637 /* Remove filtering if feasible */
638 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
639 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
641 BNX2X_VF_FILTER_VLAN_MAC);
644 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
646 BNX2X_VF_FILTER_VLAN);
649 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
651 BNX2X_VF_FILTER_MAC);
654 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
661 rc = bnx2x_vf_queue_destroy(bp, vf, qid);
666 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
667 vf->abs_vfid, qid, rc);
671 /* VF enable primitives
672 * when pretend is required the caller is responsible
673 * for calling pretend prior to calling these routines
676 /* internal vf enable - until vf is enabled internally all transactions
677 * are blocked. This routine should always be called last with pretend.
679 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
681 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
684 /* clears vf error in all semi blocks */
685 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
687 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
688 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
689 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
690 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
693 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
695 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
698 switch (was_err_group) {
700 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
703 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
706 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
709 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
712 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
715 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
720 /* Set VF masks and configuration - pretend */
721 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
723 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
724 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
725 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
726 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
727 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
728 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
730 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
731 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
732 val &= ~IGU_VF_CONF_PARENT_MASK;
733 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
734 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
737 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
740 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
742 /* iterate over all queues, clear sb consumer */
743 for (i = 0; i < vf_sb_count(vf); i++) {
744 u8 igu_sb_id = vf_igu_sb(vf, i);
746 /* zero prod memory */
747 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
749 /* clear sb state machine */
750 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
753 /* disable + update */
754 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
759 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
761 /* set the VF-PF association in the FW */
762 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
763 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
766 bnx2x_vf_semi_clear_err(bp, abs_vfid);
767 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
769 /* internal vf-enable - pretend */
770 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
771 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
772 bnx2x_vf_enable_internal(bp, true);
773 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
776 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
778 /* Reset vf in IGU interrupts are still disabled */
779 bnx2x_vf_igu_reset(bp, vf);
781 /* pretend to enable the vf with the PBF */
782 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
783 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
784 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
787 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
790 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
795 dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
797 return bnx2x_is_pcie_pending(dev);
801 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
803 /* Verify no pending pci transactions */
804 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
805 BNX2X_ERR("PCIE Transactions still pending\n");
810 /* must be called after the number of PF queues and the number of VFs are
814 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
816 struct vf_pf_resc_request *resc = &vf->alloc_resc;
818 /* will be set only during VF-ACQUIRE */
822 resc->num_mac_filters = VF_MAC_CREDIT_CNT;
823 resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
825 /* no real limitation */
826 resc->num_mc_filters = 0;
828 /* num_sbs already set */
829 resc->num_sbs = vf->sb_count;
833 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
835 /* reset the state variables */
836 bnx2x_iov_static_resc(bp, vf);
840 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
842 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
844 /* DQ usage counter */
845 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
846 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
847 "DQ VF usage counter timed out",
849 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
851 /* FW cleanup command - poll for the results */
852 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
854 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
856 /* verify TX hw is flushed */
857 bnx2x_tx_hw_flushed(bp, poll_cnt);
860 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
864 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
866 /* the cleanup operations are valid if and only if the VF
867 * was first acquired.
869 for (i = 0; i < vf_rxq_count(vf); i++) {
870 rc = bnx2x_vf_queue_flr(bp, vf, i);
875 /* remove multicasts */
876 bnx2x_vf_mcast(bp, vf, NULL, 0, true);
878 /* dispatch final cleanup and wait for HW queues to flush */
879 bnx2x_vf_flr_clnup_hw(bp, vf);
881 /* release VF resources */
882 bnx2x_vf_free_resc(bp, vf);
884 vf->malicious = false;
886 /* re-open the mailbox */
887 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
890 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
891 vf->abs_vfid, i, rc);
894 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
896 struct bnx2x_virtf *vf;
899 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
900 /* VF should be RESET & in FLR cleanup states */
901 if (bnx2x_vf(bp, i, state) != VF_RESET ||
902 !bnx2x_vf(bp, i, flr_clnup_stage))
905 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
906 i, BNX2X_NR_VIRTFN(bp));
910 /* lock the vf pf channel */
911 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
913 /* invoke the VF FLR SM */
914 bnx2x_vf_flr(bp, vf);
916 /* mark the VF to be ACKED and continue */
917 vf->flr_clnup_stage = false;
918 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
921 /* Acknowledge the handled VFs.
922 * we are acknowledge all the vfs which an flr was requested for, even
923 * if amongst them there are such that we never opened, since the mcp
924 * will interrupt us immediately again if we only ack some of the bits,
925 * resulting in an endless loop. This can happen for example in KVM
926 * where an 'all ones' flr request is sometimes given by hyper visor
928 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
929 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
930 for (i = 0; i < FLRD_VFS_DWORDS; i++)
931 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
932 bp->vfdb->flrd_vfs[i]);
934 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
936 /* clear the acked bits - better yet if the MCP implemented
937 * write to clear semantics
939 for (i = 0; i < FLRD_VFS_DWORDS; i++)
940 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
943 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
948 for (i = 0; i < FLRD_VFS_DWORDS; i++)
949 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
952 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
953 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
956 struct bnx2x_virtf *vf = BP_VF(bp, i);
959 if (vf->abs_vfid < 32)
960 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
962 reset = bp->vfdb->flrd_vfs[1] &
963 (1 << (vf->abs_vfid - 32));
966 /* set as reset and ready for cleanup */
967 vf->state = VF_RESET;
968 vf->flr_clnup_stage = true;
971 "Initiating Final cleanup for VF %d\n",
976 /* do the FLR cleanup for all marked VFs*/
977 bnx2x_vf_flr_clnup(bp);
980 /* IOV global initialization routines */
981 void bnx2x_iov_init_dq(struct bnx2x *bp)
986 /* Set the DQ such that the CID reflect the abs_vfid */
987 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
988 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
990 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
993 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
995 /* The VF window size is the log2 of the max number of CIDs per VF */
996 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
998 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
999 * the Pf doorbell size although the 2 are independent.
1001 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1003 /* No security checks for now -
1004 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1005 * CID range 0 - 0x1ffff
1007 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1008 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1009 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1010 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1012 /* set the VF doorbell threshold. This threshold represents the amount
1013 * of doorbells allowed in the main DORQ fifo for a specific VF.
1015 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1018 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1020 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1021 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1024 static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1026 struct pci_dev *dev = bp->pdev;
1028 return pci_domain_nr(dev->bus);
1031 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1033 struct pci_dev *dev = bp->pdev;
1034 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1036 return dev->bus->number + ((dev->devfn + iov->offset +
1037 iov->stride * vfid) >> 8);
1040 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1042 struct pci_dev *dev = bp->pdev;
1043 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1045 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1048 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1051 struct pci_dev *dev = bp->pdev;
1052 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1054 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1055 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1056 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1059 vf->bars[n].bar = start + size * vf->abs_vfid;
1060 vf->bars[n].size = size;
1065 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1069 u8 fid, current_pf = 0;
1071 /* IGU in normal mode - read CAM */
1072 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1073 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1074 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1076 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1077 if (fid & IGU_FID_ENCODE_IS_PF)
1078 current_pf = fid & IGU_FID_PF_NUM_MASK;
1079 else if (current_pf == BP_FUNC(bp))
1080 bnx2x_vf_set_igu_info(bp, sb_id,
1081 (fid & IGU_FID_VF_NUM_MASK));
1082 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1083 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1084 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1085 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1086 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1088 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1089 return BP_VFDB(bp)->vf_sbs_pool;
1092 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1095 kfree(bp->vfdb->vfqs);
1096 kfree(bp->vfdb->vfs);
1102 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1105 struct pci_dev *dev = bp->pdev;
1107 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1109 BNX2X_ERR("failed to find SRIOV capability in device\n");
1114 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1115 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1116 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1117 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1118 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1119 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1120 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1121 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1122 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1127 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1131 /* read the SRIOV capability structure
1132 * The fields can be read via configuration read or
1133 * directly from the device (starting at offset PCICFG_OFFSET)
1135 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1138 /* get the number of SRIOV bars */
1141 /* read the first_vfid */
1142 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1143 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1144 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1147 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1149 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1150 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1155 /* must be called after PF bars are mapped */
1156 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1160 struct bnx2x_sriov *iov;
1161 struct pci_dev *dev = bp->pdev;
1169 /* verify sriov capability is present in configuration space */
1170 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1173 /* verify chip revision */
1174 if (CHIP_IS_E1x(bp))
1177 /* check if SRIOV support is turned off */
1181 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1182 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1183 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1184 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1188 /* SRIOV can be enabled only with MSIX */
1189 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1190 int_mode_param == BNX2X_INT_MODE_INTX) {
1191 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1196 /* verify ari is enabled */
1197 if (!pci_ari_enabled(bp->pdev->bus)) {
1198 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1202 /* verify igu is in normal mode */
1203 if (CHIP_INT_MODE_IS_BC(bp)) {
1204 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1208 /* allocate the vfs database */
1209 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1211 BNX2X_ERR("failed to allocate vf database\n");
1216 /* get the sriov info - Linux already collected all the pertinent
1217 * information, however the sriov structure is for the private use
1218 * of the pci module. Also we want this information regardless
1219 * of the hyper-visor.
1221 iov = &(bp->vfdb->sriov);
1222 err = bnx2x_sriov_info(bp, iov);
1226 /* SR-IOV capability was enabled but there are no VFs*/
1227 if (iov->total == 0)
1230 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1232 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1233 num_vfs_param, iov->nr_virtfn);
1235 /* allocate the vf array */
1236 bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
1237 sizeof(struct bnx2x_virtf),
1239 if (!bp->vfdb->vfs) {
1240 BNX2X_ERR("failed to allocate vf array\n");
1245 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1246 for_each_vf(bp, i) {
1247 bnx2x_vf(bp, i, index) = i;
1248 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1249 bnx2x_vf(bp, i, state) = VF_FREE;
1250 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1251 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1252 /* enable spoofchk by default */
1253 bnx2x_vf(bp, i, spoofchk) = 1;
1256 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1257 if (!bnx2x_get_vf_igu_cam_info(bp)) {
1258 BNX2X_ERR("No entries in IGU CAM for vfs\n");
1263 /* allocate the queue arrays for all VFs */
1264 bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
1265 sizeof(struct bnx2x_vf_queue),
1268 if (!bp->vfdb->vfqs) {
1269 BNX2X_ERR("failed to allocate vf queue array\n");
1274 /* Prepare the VFs event synchronization mechanism */
1275 mutex_init(&bp->vfdb->event_mutex);
1277 mutex_init(&bp->vfdb->bulletin_mutex);
1279 if (SHMEM2_HAS(bp, sriov_switch_mode))
1280 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1284 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1285 __bnx2x_iov_free_vfdb(bp);
1289 void bnx2x_iov_remove_one(struct bnx2x *bp)
1293 /* if SRIOV is not enabled there's nothing to do */
1297 bnx2x_disable_sriov(bp);
1299 /* disable access to all VFs */
1300 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1301 bnx2x_pretend_func(bp,
1303 bp->vfdb->sriov.first_vf_in_pf +
1305 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1306 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1307 bnx2x_vf_enable_internal(bp, 0);
1308 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1311 /* free vf database */
1312 __bnx2x_iov_free_vfdb(bp);
1315 void bnx2x_iov_free_mem(struct bnx2x *bp)
1322 /* free vfs hw contexts */
1323 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1324 struct hw_dma *cxt = &bp->vfdb->context[i];
1325 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1328 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1329 BP_VFDB(bp)->sp_dma.mapping,
1330 BP_VFDB(bp)->sp_dma.size);
1332 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1333 BP_VF_MBX_DMA(bp)->mapping,
1334 BP_VF_MBX_DMA(bp)->size);
1336 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1337 BP_VF_BULLETIN_DMA(bp)->mapping,
1338 BP_VF_BULLETIN_DMA(bp)->size);
1341 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1349 /* allocate vfs hw contexts */
1350 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1351 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1353 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1354 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1355 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1358 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1365 tot_size -= cxt->size;
1368 /* allocate vfs ramrods dma memory - client_init and set_mac */
1369 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1370 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1372 if (!BP_VFDB(bp)->sp_dma.addr)
1374 BP_VFDB(bp)->sp_dma.size = tot_size;
1376 /* allocate mailboxes */
1377 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1378 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1380 if (!BP_VF_MBX_DMA(bp)->addr)
1383 BP_VF_MBX_DMA(bp)->size = tot_size;
1385 /* allocate local bulletin boards */
1386 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1387 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1389 if (!BP_VF_BULLETIN_DMA(bp)->addr)
1392 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1400 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1401 struct bnx2x_vf_queue *q)
1403 u8 cl_id = vfq_cl_id(vf, q);
1404 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1405 unsigned long q_type = 0;
1407 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1408 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1410 /* Queue State object */
1411 bnx2x_init_queue_obj(bp, &q->sp_obj,
1412 cl_id, &q->cid, 1, func_id,
1413 bnx2x_vf_sp(bp, vf, q_data),
1414 bnx2x_vf_sp_map(bp, vf, q_data),
1417 /* sp indication is set only when vlan/mac/etc. are initialized */
1418 q->sp_initialized = false;
1421 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1422 vf->abs_vfid, q->sp_obj.func_id, q->cid);
1425 static int bnx2x_max_speed_cap(struct bnx2x *bp)
1427 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1430 (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1433 return 10000; /* assume lowest supported speed is 10G */
1436 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1438 struct bnx2x_link_report_data *state = &bp->last_reported_link;
1439 struct pf_vf_bulletin_content *bulletin;
1440 struct bnx2x_virtf *vf;
1444 /* sanity and init */
1445 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1449 mutex_lock(&bp->vfdb->bulletin_mutex);
1451 if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1452 bulletin->valid_bitmap |= 1 << LINK_VALID;
1454 bulletin->link_speed = state->line_speed;
1455 bulletin->link_flags = 0;
1456 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1457 &state->link_report_flags))
1458 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1459 if (test_bit(BNX2X_LINK_REPORT_FD,
1460 &state->link_report_flags))
1461 bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1462 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1463 &state->link_report_flags))
1464 bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1465 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1466 &state->link_report_flags))
1467 bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1468 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1469 !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1470 bulletin->valid_bitmap |= 1 << LINK_VALID;
1471 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1472 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1473 (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1474 bulletin->valid_bitmap |= 1 << LINK_VALID;
1475 bulletin->link_speed = bnx2x_max_speed_cap(bp);
1476 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1482 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1483 "vf %d mode %u speed %d flags %x\n", idx,
1484 vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1486 /* Post update on VF's bulletin board */
1487 rc = bnx2x_post_vf_bulletin(bp, idx);
1489 BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1495 mutex_unlock(&bp->vfdb->bulletin_mutex);
1499 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1501 struct bnx2x *bp = netdev_priv(dev);
1502 struct bnx2x_virtf *vf = BP_VF(bp, idx);
1507 if (vf->link_cfg == link_state)
1508 return 0; /* nothing todo */
1510 vf->link_cfg = link_state;
1512 return bnx2x_iov_link_update_vf(bp, idx);
1515 void bnx2x_iov_link_update(struct bnx2x *bp)
1522 for_each_vf(bp, vfid)
1523 bnx2x_iov_link_update_vf(bp, vfid);
1526 /* called by bnx2x_nic_load */
1527 int bnx2x_iov_nic_init(struct bnx2x *bp)
1531 if (!IS_SRIOV(bp)) {
1532 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1536 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1538 /* let FLR complete ... */
1541 /* initialize vf database */
1542 for_each_vf(bp, vfid) {
1543 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1545 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1548 union cdu_context *base_cxt = (union cdu_context *)
1549 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1550 (base_vf_cid & (ILT_PAGE_CIDS-1));
1553 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1554 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1555 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1557 /* init statically provisioned resources */
1558 bnx2x_iov_static_resc(bp, vf);
1560 /* queues are initialized during VF-ACQUIRE */
1561 vf->filter_state = 0;
1562 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1564 bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1565 vf_vlan_rules_cnt(vf));
1566 bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1567 vf_mac_rules_cnt(vf));
1569 /* init mcast object - This object will be re-initialized
1570 * during VF-ACQUIRE with the proper cl_id and cid.
1571 * It needs to be initialized here so that it can be safely
1572 * handled by a subsequent FLR flow.
1574 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1576 bnx2x_vf_sp(bp, vf, mcast_rdata),
1577 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1578 BNX2X_FILTER_MCAST_PENDING,
1580 BNX2X_OBJ_TYPE_RX_TX);
1582 /* set the mailbox message addresses */
1583 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1584 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1585 MBX_MSG_ALIGNED_SIZE);
1587 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1588 vfid * MBX_MSG_ALIGNED_SIZE;
1590 /* Enable vf mailbox */
1591 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1595 for_each_vf(bp, vfid) {
1596 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1598 /* fill in the BDF and bars */
1599 vf->domain = bnx2x_vf_domain(bp, vfid);
1600 vf->bus = bnx2x_vf_bus(bp, vfid);
1601 vf->devfn = bnx2x_vf_devfn(bp, vfid);
1602 bnx2x_vf_set_bars(bp, vf);
1605 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1606 vf->abs_vfid, vf->bus, vf->devfn,
1607 (unsigned)vf->bars[0].bar, vf->bars[0].size,
1608 (unsigned)vf->bars[1].bar, vf->bars[1].size,
1609 (unsigned)vf->bars[2].bar, vf->bars[2].size);
1615 /* called by bnx2x_chip_cleanup */
1616 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1623 /* release all the VFs */
1625 bnx2x_vf_release(bp, BP_VF(bp, i));
1630 /* called by bnx2x_init_hw_func, returns the next ilt line */
1631 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1634 struct bnx2x_ilt *ilt = BP_ILT(bp);
1639 /* set vfs ilt lines */
1640 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1641 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1643 ilt->lines[line+i].page = hw_cxt->addr;
1644 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1645 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1650 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1652 return ((cid >= BNX2X_FIRST_VF_CID) &&
1653 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1657 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1658 struct bnx2x_vf_queue *vfq,
1659 union event_ring_elem *elem)
1661 unsigned long ramrod_flags = 0;
1663 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1665 /* Always push next commands out, don't wait here */
1666 set_bit(RAMROD_CONT, &ramrod_flags);
1668 switch (echo >> BNX2X_SWCID_SHIFT) {
1669 case BNX2X_FILTER_MAC_PENDING:
1670 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1673 case BNX2X_FILTER_VLAN_PENDING:
1674 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1678 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1682 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1684 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1688 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1689 struct bnx2x_virtf *vf)
1691 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1694 rparam.mcast_obj = &vf->mcast_obj;
1695 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1697 /* If there are pending mcast commands - send them */
1698 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1699 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1701 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1707 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1708 struct bnx2x_virtf *vf)
1710 smp_mb__before_atomic();
1711 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1712 smp_mb__after_atomic();
1715 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1716 struct bnx2x_virtf *vf)
1718 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1721 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1723 struct bnx2x_virtf *vf;
1724 int qidx = 0, abs_vfid;
1731 /* first get the cid - the only events we handle here are cfc-delete
1732 * and set-mac completion
1734 opcode = elem->message.opcode;
1737 case EVENT_RING_OPCODE_CFC_DEL:
1738 cid = SW_CID(elem->message.data.cfc_del_event.cid);
1739 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1741 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1742 case EVENT_RING_OPCODE_MULTICAST_RULES:
1743 case EVENT_RING_OPCODE_FILTERS_RULES:
1744 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1745 cid = SW_CID(elem->message.data.eth_event.echo);
1746 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1748 case EVENT_RING_OPCODE_VF_FLR:
1749 abs_vfid = elem->message.data.vf_flr_event.vf_id;
1750 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1753 case EVENT_RING_OPCODE_MALICIOUS_VF:
1754 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1755 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1757 elem->message.data.malicious_vf_event.err_id);
1763 /* check if the cid is the VF range */
1764 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1765 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1769 /* extract vf and rxq index from vf_cid - relies on the following:
1770 * 1. vfid on cid reflects the true abs_vfid
1771 * 2. The max number of VFs (per path) is 64
1773 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1774 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1776 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1779 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1785 case EVENT_RING_OPCODE_CFC_DEL:
1786 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1787 vf->abs_vfid, qidx);
1788 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1791 BNX2X_Q_CMD_CFC_DEL);
1793 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1794 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1795 vf->abs_vfid, qidx);
1796 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1798 case EVENT_RING_OPCODE_MULTICAST_RULES:
1799 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1800 vf->abs_vfid, qidx);
1801 bnx2x_vf_handle_mcast_eqe(bp, vf);
1803 case EVENT_RING_OPCODE_FILTERS_RULES:
1804 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1805 vf->abs_vfid, qidx);
1806 bnx2x_vf_handle_filters_eqe(bp, vf);
1808 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1809 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1810 vf->abs_vfid, qidx);
1811 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1813 case EVENT_RING_OPCODE_VF_FLR:
1814 /* Do nothing for now */
1816 case EVENT_RING_OPCODE_MALICIOUS_VF:
1817 vf->malicious = true;
1824 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1826 /* extract the vf from vf_cid - relies on the following:
1827 * 1. vfid on cid reflects the true abs_vfid
1828 * 2. The max number of VFs (per path) is 64
1830 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1831 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1834 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1835 struct bnx2x_queue_sp_obj **q_obj)
1837 struct bnx2x_virtf *vf;
1842 vf = bnx2x_vf_by_cid(bp, vf_cid);
1845 /* extract queue index from vf_cid - relies on the following:
1846 * 1. vfid on cid reflects the true abs_vfid
1847 * 2. The max number of VFs (per path) is 64
1849 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1850 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1852 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1856 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1859 int first_queue_query_index, num_queues_req;
1860 dma_addr_t cur_data_offset;
1861 struct stats_query_entry *cur_query_entry;
1863 bool is_fcoe = false;
1871 /* fcoe adds one global request and one queue request */
1872 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1873 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1876 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1877 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1878 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1879 first_queue_query_index + num_queues_req);
1881 cur_data_offset = bp->fw_stats_data_mapping +
1882 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1883 num_queues_req * sizeof(struct per_queue_stats);
1885 cur_query_entry = &bp->fw_stats_req->
1886 query[first_queue_query_index + num_queues_req];
1888 for_each_vf(bp, i) {
1890 struct bnx2x_virtf *vf = BP_VF(bp, i);
1892 if (vf->state != VF_ENABLED) {
1893 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1894 "vf %d not enabled so no stats for it\n",
1899 if (vf->malicious) {
1900 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1901 "vf %d malicious so no stats for it\n",
1906 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1907 "add addresses for vf %d\n", vf->abs_vfid);
1908 for_each_vfq(vf, j) {
1909 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1911 dma_addr_t q_stats_addr =
1912 vf->fw_stat_map + j * vf->stats_stride;
1914 /* collect stats fro active queues only */
1915 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1916 BNX2X_Q_LOGICAL_STATE_STOPPED)
1919 /* create stats query entry for this queue */
1920 cur_query_entry->kind = STATS_TYPE_QUEUE;
1921 cur_query_entry->index = vfq_stat_id(vf, rxq);
1922 cur_query_entry->funcID =
1923 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1924 cur_query_entry->address.hi =
1925 cpu_to_le32(U64_HI(q_stats_addr));
1926 cur_query_entry->address.lo =
1927 cpu_to_le32(U64_LO(q_stats_addr));
1928 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1929 "added address %x %x for vf %d queue %d client %d\n",
1930 cur_query_entry->address.hi,
1931 cur_query_entry->address.lo,
1932 cur_query_entry->funcID,
1933 j, cur_query_entry->index);
1935 cur_data_offset += sizeof(struct per_queue_stats);
1938 /* all stats are coalesced to the leading queue */
1939 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1943 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1946 /* VF API helpers */
1947 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1950 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1951 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1953 REG_WR(bp, reg, val);
1956 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1961 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1962 vfq_qzone_id(vf, vfq_get(vf, i)), false);
1965 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1969 /* clear the VF configuration - pretend */
1970 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1971 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1972 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1973 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1974 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1975 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1978 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1980 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1981 BNX2X_VF_MAX_QUEUES);
1985 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1986 struct vf_pf_resc_request *req_resc)
1988 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1989 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
1991 return ((req_resc->num_rxqs <= rxq_cnt) &&
1992 (req_resc->num_txqs <= txq_cnt) &&
1993 (req_resc->num_sbs <= vf_sb_count(vf)) &&
1994 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
1995 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
1999 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2000 struct vf_pf_resc_request *resc)
2002 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2005 union cdu_context *base_cxt = (union cdu_context *)
2006 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2007 (base_vf_cid & (ILT_PAGE_CIDS-1));
2010 /* if state is 'acquired' the VF was not released or FLR'd, in
2011 * this case the returned resources match the acquired already
2012 * acquired resources. Verify that the requested numbers do
2013 * not exceed the already acquired numbers.
2015 if (vf->state == VF_ACQUIRED) {
2016 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2019 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2020 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2027 /* Otherwise vf state must be 'free' or 'reset' */
2028 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2029 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2030 vf->abs_vfid, vf->state);
2034 /* static allocation:
2035 * the global maximum number are fixed per VF. Fail the request if
2036 * requested number exceed these globals
2038 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2040 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2041 /* set the max resource in the vf */
2045 /* Set resources counters - 0 request means max available */
2046 vf_sb_count(vf) = resc->num_sbs;
2047 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2048 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2051 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2052 vf_sb_count(vf), vf_rxq_count(vf),
2053 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2054 vf_vlan_rules_cnt(vf));
2056 /* Initialize the queues */
2058 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2062 for_each_vfq(vf, i) {
2063 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2066 BNX2X_ERR("q number %d was not allocated\n", i);
2071 q->cxt = &((base_cxt + i)->eth);
2072 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2074 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2075 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2077 /* init SP objects */
2078 bnx2x_vfq_init(bp, vf, q);
2080 vf->state = VF_ACQUIRED;
2084 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2086 struct bnx2x_func_init_params func_init = {0};
2089 /* the sb resources are initialized at this point, do the
2090 * FW/HW initializations
2092 for_each_vf_sb(vf, i)
2093 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2094 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2097 if (vf->state != VF_ACQUIRED) {
2098 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2099 vf->abs_vfid, vf->state);
2103 /* let FLR complete ... */
2106 /* FLR cleanup epilogue */
2107 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2110 /* reset IGU VF statistics: MSIX */
2111 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2113 /* function setup */
2114 func_init.pf_id = BP_FUNC(bp);
2115 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2116 bnx2x_func_init(bp, &func_init);
2119 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2120 bnx2x_vf_enable_traffic(bp, vf);
2122 /* queue protection table */
2124 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2125 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2127 vf->state = VF_ENABLED;
2129 /* update vf bulletin board */
2130 bnx2x_post_vf_bulletin(bp, vf->index);
2135 struct set_vf_state_cookie {
2136 struct bnx2x_virtf *vf;
2140 static void bnx2x_set_vf_state(void *cookie)
2142 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2144 p->vf->state = p->state;
2147 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2151 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2153 /* Close all queues */
2154 for (i = 0; i < vf_rxq_count(vf); i++) {
2155 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2160 /* disable the interrupts */
2161 DP(BNX2X_MSG_IOV, "disabling igu\n");
2162 bnx2x_vf_igu_disable(bp, vf);
2164 /* disable the VF */
2165 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2166 bnx2x_vf_clr_qtbl(bp, vf);
2168 /* need to make sure there are no outstanding stats ramrods which may
2169 * cause the device to access the VF's stats buffer which it will free
2170 * as soon as we return from the close flow.
2173 struct set_vf_state_cookie cookie;
2176 cookie.state = VF_ACQUIRED;
2177 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2182 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2186 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2190 /* VF release can be called either: 1. The VF was acquired but
2191 * not enabled 2. the vf was enabled or in the process of being
2194 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2198 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2199 vf->state == VF_FREE ? "Free" :
2200 vf->state == VF_ACQUIRED ? "Acquired" :
2201 vf->state == VF_ENABLED ? "Enabled" :
2202 vf->state == VF_RESET ? "Reset" :
2205 switch (vf->state) {
2207 rc = bnx2x_vf_close(bp, vf);
2210 /* Fall through - to release resources */
2212 DP(BNX2X_MSG_IOV, "about to free resources\n");
2213 bnx2x_vf_free_resc(bp, vf);
2223 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2227 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2228 struct bnx2x_config_rss_params *rss)
2230 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2231 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2232 return bnx2x_config_rss(bp, rss);
2235 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2236 struct vfpf_tpa_tlv *tlv,
2237 struct bnx2x_queue_update_tpa_params *params)
2239 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2240 struct bnx2x_queue_state_params qstate;
2243 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2245 /* Set ramrod params */
2246 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2247 memcpy(&qstate.params.update_tpa, params,
2248 sizeof(struct bnx2x_queue_update_tpa_params));
2249 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2250 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2252 for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2253 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2254 qstate.params.update_tpa.sge_map = sge_addr[qid];
2255 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2256 vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2257 U64_LO(sge_addr[qid]));
2258 rc = bnx2x_queue_state_change(bp, &qstate);
2260 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2261 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2270 /* VF release ~ VF close + VF release-resources
2271 * Release is the ultimate SW shutdown and is called whenever an
2272 * irrecoverable error is encountered.
2274 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2278 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2279 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2281 rc = bnx2x_vf_free(bp, vf);
2284 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2286 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2290 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2291 enum channel_tlvs tlv)
2293 /* we don't lock the channel for unsupported tlvs */
2294 if (!bnx2x_tlv_supported(tlv)) {
2295 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2299 /* lock the channel */
2300 mutex_lock(&vf->op_mutex);
2302 /* record the locking op */
2303 vf->op_current = tlv;
2306 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2310 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2311 enum channel_tlvs expected_tlv)
2313 enum channel_tlvs current_tlv;
2316 BNX2X_ERR("VF was %p\n", vf);
2320 current_tlv = vf->op_current;
2322 /* we don't unlock the channel for unsupported tlvs */
2323 if (!bnx2x_tlv_supported(expected_tlv))
2326 WARN(expected_tlv != vf->op_current,
2327 "lock mismatch: expected %d found %d", expected_tlv,
2330 /* record the locking op */
2331 vf->op_current = CHANNEL_TLV_NONE;
2333 /* lock the channel */
2334 mutex_unlock(&vf->op_mutex);
2336 /* log the unlock */
2337 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2338 vf->abs_vfid, current_tlv);
2341 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2343 struct bnx2x_queue_state_params q_params;
2347 /* Verify changes are needed and record current Tx switching state */
2348 prev_flags = bp->flags;
2350 bp->flags |= TX_SWITCHING;
2352 bp->flags &= ~TX_SWITCHING;
2353 if (prev_flags == bp->flags)
2356 /* Verify state enables the sending of queue ramrods */
2357 if ((bp->state != BNX2X_STATE_OPEN) ||
2358 (bnx2x_get_q_logical_state(bp,
2359 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2360 BNX2X_Q_LOGICAL_STATE_ACTIVE))
2363 /* send q. update ramrod to configure Tx switching */
2364 memset(&q_params, 0, sizeof(q_params));
2365 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2366 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2367 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2368 &q_params.params.update.update_flags);
2370 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2371 &q_params.params.update.update_flags);
2373 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2374 &q_params.params.update.update_flags);
2376 /* send the ramrod on all the queues of the PF */
2377 for_each_eth_queue(bp, i) {
2378 struct bnx2x_fastpath *fp = &bp->fp[i];
2381 /* Set the appropriate Queue object */
2382 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2384 for (tx_idx = FIRST_TX_COS_INDEX;
2385 tx_idx < fp->max_cos; tx_idx++) {
2386 q_params.params.update.cid_index = tx_idx;
2388 /* Update the Queue state */
2389 rc = bnx2x_queue_state_change(bp, &q_params);
2391 BNX2X_ERR("Failed to configure Tx switching\n");
2397 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2401 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2403 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2405 if (!IS_SRIOV(bp)) {
2406 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2410 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2411 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2413 /* HW channel is only operational when PF is up */
2414 if (bp->state != BNX2X_STATE_OPEN) {
2415 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2419 /* we are always bound by the total_vfs in the configuration space */
2420 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2421 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2422 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2423 num_vfs_param = BNX2X_NR_VIRTFN(bp);
2426 bp->requested_nr_virtfn = num_vfs_param;
2427 if (num_vfs_param == 0) {
2428 bnx2x_set_pf_tx_switching(bp, false);
2429 bnx2x_disable_sriov(bp);
2432 return bnx2x_enable_sriov(bp);
2436 #define IGU_ENTRY_SIZE 4
2438 int bnx2x_enable_sriov(struct bnx2x *bp)
2440 int rc = 0, req_vfs = bp->requested_nr_virtfn;
2441 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2442 u32 igu_entry, address;
2448 first_vf = bp->vfdb->sriov.first_vf_in_pf;
2450 /* statically distribute vf sb pool between VFs */
2451 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2452 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2454 /* zero previous values learned from igu cam */
2455 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2456 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2459 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2461 bp->vfdb->vf_sbs_pool = 0;
2463 /* prepare IGU cam */
2464 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2465 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2466 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2467 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2468 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2469 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2470 IGU_REG_MAPPING_MEMORY_VALID;
2471 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2473 REG_WR(bp, address, igu_entry);
2475 address += IGU_ENTRY_SIZE;
2479 /* Reinitialize vf database according to igu cam */
2480 bnx2x_get_vf_igu_cam_info(bp);
2482 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2483 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2486 for_each_vf(bp, vf_idx) {
2487 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2489 /* set local queue arrays */
2490 vf->vfqs = &bp->vfdb->vfqs[qcount];
2491 qcount += vf_sb_count(vf);
2492 bnx2x_iov_static_resc(bp, vf);
2495 /* prepare msix vectors in VF configuration space - the value in the
2496 * PCI configuration space should be the index of the last entry,
2497 * namely one less than the actual size of the table
2499 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2500 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2501 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2503 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2504 vf_idx, num_vf_queues - 1);
2506 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2508 /* enable sriov. This will probe all the VFs, and consequentially cause
2509 * the "acquire" messages to appear on the VF PF channel.
2511 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2512 bnx2x_disable_sriov(bp);
2514 rc = bnx2x_set_pf_tx_switching(bp, true);
2518 rc = pci_enable_sriov(bp->pdev, req_vfs);
2520 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2523 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2527 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2530 struct pf_vf_bulletin_content *bulletin;
2532 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2533 for_each_vf(bp, vfidx) {
2534 bulletin = BP_VF_BULLETIN(bp, vfidx);
2535 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2536 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2537 htons(ETH_P_8021Q));
2541 void bnx2x_disable_sriov(struct bnx2x *bp)
2543 if (pci_vfs_assigned(bp->pdev)) {
2545 "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2549 pci_disable_sriov(bp->pdev);
2552 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2553 struct bnx2x_virtf **vf,
2554 struct pf_vf_bulletin_content **bulletin,
2557 if (bp->state != BNX2X_STATE_OPEN) {
2558 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2562 if (!IS_SRIOV(bp)) {
2563 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2567 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2568 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2569 vfidx, BNX2X_NR_VIRTFN(bp));
2574 *vf = BP_VF(bp, vfidx);
2575 *bulletin = BP_VF_BULLETIN(bp, vfidx);
2578 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2582 if (test_queue && !(*vf)->vfqs) {
2583 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2589 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2597 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2598 struct ifla_vf_info *ivi)
2600 struct bnx2x *bp = netdev_priv(dev);
2601 struct bnx2x_virtf *vf = NULL;
2602 struct pf_vf_bulletin_content *bulletin = NULL;
2603 struct bnx2x_vlan_mac_obj *mac_obj;
2604 struct bnx2x_vlan_mac_obj *vlan_obj;
2607 /* sanity and init */
2608 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2612 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2613 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2614 if (!mac_obj || !vlan_obj) {
2615 BNX2X_ERR("VF partially initialized\n");
2621 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2622 ivi->min_tx_rate = 0;
2623 ivi->spoofchk = vf->spoofchk ? 1 : 0;
2624 ivi->linkstate = vf->link_cfg;
2625 if (vf->state == VF_ENABLED) {
2626 /* mac and vlan are in vlan_mac objects */
2627 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2628 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2630 vlan_obj->get_n_elements(bp, vlan_obj, 1,
2631 (u8 *)&ivi->vlan, 0,
2635 mutex_lock(&bp->vfdb->bulletin_mutex);
2637 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2638 /* mac configured by ndo so its in bulletin board */
2639 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2641 /* function has not been loaded yet. Show mac as 0s */
2642 eth_zero_addr(ivi->mac);
2645 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2646 /* vlan configured by ndo so its in bulletin board */
2647 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2649 /* function has not been loaded yet. Show vlans as 0s */
2650 memset(&ivi->vlan, 0, VLAN_HLEN);
2652 mutex_unlock(&bp->vfdb->bulletin_mutex);
2658 /* New mac for VF. Consider these cases:
2659 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2660 * supply at acquire.
2661 * 2. VF has already been acquired but has not yet initialized - store in local
2662 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
2663 * will configure this mac when it is ready.
2664 * 3. VF has already initialized but has not yet setup a queue - post the new
2665 * mac on VF's bulletin board right now. VF will configure this mac when it
2667 * 4. VF has already set a queue - delete any macs already configured for this
2668 * queue and manually config the new mac.
2669 * In any event, once this function has been called refuse any attempts by the
2670 * VF to configure any mac for itself except for this mac. In case of a race
2671 * where the VF fails to see the new post on its bulletin board before sending a
2672 * mac configuration request, the PF will simply fail the request and VF can try
2673 * again after consulting its bulletin board.
2675 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2677 struct bnx2x *bp = netdev_priv(dev);
2678 int rc, q_logical_state;
2679 struct bnx2x_virtf *vf = NULL;
2680 struct pf_vf_bulletin_content *bulletin = NULL;
2682 if (!is_valid_ether_addr(mac)) {
2683 BNX2X_ERR("mac address invalid\n");
2687 /* sanity and init */
2688 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2692 mutex_lock(&bp->vfdb->bulletin_mutex);
2694 /* update PF's copy of the VF's bulletin. Will no longer accept mac
2695 * configuration requests from vf unless match this mac
2697 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2698 memcpy(bulletin->mac, mac, ETH_ALEN);
2700 /* Post update on VF's bulletin board */
2701 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2703 /* release lock before checking return code */
2704 mutex_unlock(&bp->vfdb->bulletin_mutex);
2707 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2712 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2713 if (vf->state == VF_ENABLED &&
2714 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2715 /* configure the mac in device on this vf's queue */
2716 unsigned long ramrod_flags = 0;
2717 struct bnx2x_vlan_mac_obj *mac_obj;
2719 /* User should be able to see failure reason in system logs */
2720 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2723 /* must lock vfpf channel to protect against vf flows */
2724 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2726 /* remove existing eth macs */
2727 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2728 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2730 BNX2X_ERR("failed to delete eth macs\n");
2735 /* remove existing uc list macs */
2736 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2738 BNX2X_ERR("failed to delete uc_list macs\n");
2743 /* configure the new mac to device */
2744 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2745 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2746 BNX2X_ETH_MAC, &ramrod_flags);
2749 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2755 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2756 struct bnx2x_virtf *vf, bool accept)
2758 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2759 unsigned long accept_flags;
2761 /* need to remove/add the VF's accept_any_vlan bit */
2762 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2764 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2766 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2768 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2770 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2771 bnx2x_config_rx_mode(bp, &rx_ramrod);
2774 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2777 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2778 unsigned long ramrod_flags = 0;
2781 /* configure the new vlan to device */
2782 memset(&ramrod_param, 0, sizeof(ramrod_param));
2783 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2784 ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2785 ramrod_param.ramrod_flags = ramrod_flags;
2786 ramrod_param.user_req.u.vlan.vlan = vlan;
2787 ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2788 : BNX2X_VLAN_MAC_DEL;
2789 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2791 BNX2X_ERR("failed to configure vlan\n");
2798 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2801 struct pf_vf_bulletin_content *bulletin = NULL;
2802 struct bnx2x *bp = netdev_priv(dev);
2803 struct bnx2x_vlan_mac_obj *vlan_obj;
2804 unsigned long vlan_mac_flags = 0;
2805 unsigned long ramrod_flags = 0;
2806 struct bnx2x_virtf *vf = NULL;
2810 BNX2X_ERR("illegal vlan value %d\n", vlan);
2814 if (vlan_proto != htons(ETH_P_8021Q))
2815 return -EPROTONOSUPPORT;
2817 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2820 /* sanity and init */
2821 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2825 /* update PF's copy of the VF's bulletin. No point in posting the vlan
2826 * to the VF since it doesn't have anything to do with it. But it useful
2827 * to store it here in case the VF is not up yet and we can only
2828 * configure the vlan later when it does. Treat vlan id 0 as remove the
2831 mutex_lock(&bp->vfdb->bulletin_mutex);
2834 bulletin->valid_bitmap |= 1 << VLAN_VALID;
2836 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2837 bulletin->vlan = vlan;
2839 /* Post update on VF's bulletin board */
2840 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2842 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2843 mutex_unlock(&bp->vfdb->bulletin_mutex);
2845 /* is vf initialized and queue set up? */
2846 if (vf->state != VF_ENABLED ||
2847 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2848 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2851 /* User should be able to see error in system logs */
2852 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2855 /* must lock vfpf channel to protect against vf flows */
2856 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2858 /* remove existing vlans */
2859 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2860 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2861 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2864 BNX2X_ERR("failed to delete vlans\n");
2869 /* clear accept_any_vlan when HV forces vlan, otherwise
2870 * according to VF capabilities
2872 if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2873 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2875 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2879 /* send queue update ramrods to configure default vlan and
2880 * silent vlan removal
2882 for_each_vfq(vf, i) {
2883 struct bnx2x_queue_state_params q_params = {NULL};
2884 struct bnx2x_queue_update_params *update_params;
2886 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2888 /* validate the Q is UP */
2889 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2890 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2893 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2894 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2895 update_params = &q_params.params.update;
2896 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2897 &update_params->update_flags);
2898 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2899 &update_params->update_flags);
2901 /* if vlan is 0 then we want to leave the VF traffic
2902 * untagged, and leave the incoming traffic untouched
2903 * (i.e. do not remove any vlan tags).
2905 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2906 &update_params->update_flags);
2907 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2908 &update_params->update_flags);
2910 /* configure default vlan to vf queue and set silent
2911 * vlan removal (the vf remains unaware of this vlan).
2913 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2914 &update_params->update_flags);
2915 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2916 &update_params->update_flags);
2917 update_params->def_vlan = vlan;
2918 update_params->silent_removal_value =
2919 vlan & VLAN_VID_MASK;
2920 update_params->silent_removal_mask = VLAN_VID_MASK;
2923 /* Update the Queue state */
2924 rc = bnx2x_queue_state_change(bp, &q_params);
2926 BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2932 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2936 "updated VF[%d] vlan configuration (vlan = %d)\n",
2942 int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
2944 struct bnx2x *bp = netdev_priv(dev);
2945 struct bnx2x_virtf *vf;
2948 vf = BP_VF(bp, idx);
2953 if (vf->spoofchk == val)
2956 vf->spoofchk = val ? 1 : 0;
2958 DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
2959 val ? "enabling" : "disabling", idx);
2961 /* is vf initialized and queue set up? */
2962 if (vf->state != VF_ENABLED ||
2963 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2964 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2967 /* User should be able to see error in system logs */
2968 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2971 /* send queue update ramrods to configure spoofchk */
2972 for_each_vfq(vf, i) {
2973 struct bnx2x_queue_state_params q_params = {NULL};
2974 struct bnx2x_queue_update_params *update_params;
2976 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2978 /* validate the Q is UP */
2979 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2980 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2983 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2984 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2985 update_params = &q_params.params.update;
2986 __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
2987 &update_params->update_flags);
2989 __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2990 &update_params->update_flags);
2992 __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
2993 &update_params->update_flags);
2996 /* Update the Queue state */
2997 rc = bnx2x_queue_state_change(bp, &q_params);
2999 BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
3000 val ? "enable" : "disable", idx, i);
3007 "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
3013 /* crc is the first field in the bulletin board. Compute the crc over the
3014 * entire bulletin board excluding the crc field itself. Use the length field
3015 * as the Bulletin Board was posted by a PF with possibly a different version
3016 * from the vf which will sample it. Therefore, the length is computed by the
3017 * PF and then used blindly by the VF.
3019 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
3021 return crc32(BULLETIN_CRC_SEED,
3022 ((u8 *)bulletin) + sizeof(bulletin->crc),
3023 bulletin->length - sizeof(bulletin->crc));
3026 /* Check for new posts on the bulletin board */
3027 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3029 struct pf_vf_bulletin_content *bulletin;
3032 /* sampling structure in mid post may result with corrupted data
3033 * validate crc to ensure coherency.
3035 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3038 /* sample the bulletin board */
3039 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
3040 sizeof(union pf_vf_bulletin));
3042 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3044 if (bp->shadow_bulletin.content.crc == crc)
3047 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3048 bp->shadow_bulletin.content.crc, crc);
3051 if (attempts >= BULLETIN_ATTEMPTS) {
3052 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3054 return PFVF_BULLETIN_CRC_ERR;
3056 bulletin = &bp->shadow_bulletin.content;
3058 /* bulletin board hasn't changed since last sample */
3059 if (bp->old_bulletin.version == bulletin->version)
3060 return PFVF_BULLETIN_UNCHANGED;
3062 /* the mac address in bulletin board is valid and is new */
3063 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3064 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3065 /* update new mac to net device */
3066 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3069 if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3070 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3071 bulletin->link_speed, bulletin->link_flags);
3073 bp->vf_link_vars.line_speed = bulletin->link_speed;
3074 bp->vf_link_vars.link_report_flags = 0;
3076 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3077 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3078 &bp->vf_link_vars.link_report_flags);
3080 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3081 __set_bit(BNX2X_LINK_REPORT_FD,
3082 &bp->vf_link_vars.link_report_flags);
3083 /* Rx Flow Control is ON */
3084 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3085 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3086 &bp->vf_link_vars.link_report_flags);
3087 /* Tx Flow Control is ON */
3088 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3089 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3090 &bp->vf_link_vars.link_report_flags);
3091 __bnx2x_link_report(bp);
3094 /* copy new bulletin board to bp */
3095 memcpy(&bp->old_bulletin, bulletin,
3096 sizeof(struct pf_vf_bulletin_content));
3098 return PFVF_BULLETIN_UPDATED;
3101 void bnx2x_timer_sriov(struct bnx2x *bp)
3103 bnx2x_sample_bulletin(bp);
3105 /* if channel is down we need to self destruct */
3106 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3107 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3111 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3113 /* vf doorbells are embedded within the regview */
3114 return bp->regview + PXP_VF_ADDR_DB_START;
3117 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3119 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3120 sizeof(struct bnx2x_vf_mbx_msg));
3121 BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3122 sizeof(union pf_vf_bulletin));
3125 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3127 mutex_init(&bp->vf2pf_mutex);
3129 /* allocate vf2pf mailbox for vf to pf channel */
3130 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3131 sizeof(struct bnx2x_vf_mbx_msg));
3132 if (!bp->vf2pf_mbox)
3135 /* allocate pf 2 vf bulletin board */
3136 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3137 sizeof(union pf_vf_bulletin));
3138 if (!bp->pf2vf_bulletin)
3141 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3146 bnx2x_vf_pci_dealloc(bp);
3150 void bnx2x_iov_channel_down(struct bnx2x *bp)
3153 struct pf_vf_bulletin_content *bulletin;
3158 for_each_vf(bp, vf_idx) {
3159 /* locate this VFs bulletin board and update the channel down
3162 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3163 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3165 /* update vf bulletin board */
3166 bnx2x_post_vf_bulletin(bp, vf_idx);
3170 void bnx2x_iov_task(struct work_struct *work)
3172 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3174 if (!netif_running(bp->dev))
3177 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3178 &bp->iov_task_state))
3179 bnx2x_vf_handle_flr_event(bp);
3181 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3182 &bp->iov_task_state))
3186 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3188 smp_mb__before_atomic();
3189 set_bit(flag, &bp->iov_task_state);
3190 smp_mb__after_atomic();
3191 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3192 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);