1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
46 * send a link status message to a single VF
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
59 /* Always report link is down if the VF queues aren't enabled */
60 if (!vf->queues_enabled) {
61 pfe.event_data.link_event.link_status = false;
62 pfe.event_data.link_event.link_speed = 0;
63 } else if (vf->link_forced) {
64 pfe.event_data.link_event.link_status = vf->link_up;
65 pfe.event_data.link_event.link_speed =
66 (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
68 pfe.event_data.link_event.link_status =
69 ls->link_info & I40E_AQ_LINK_UP;
70 pfe.event_data.link_event.link_speed =
71 i40e_virtchnl_link_speed(ls->link_speed);
74 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
75 0, (u8 *)&pfe, sizeof(pfe), NULL);
79 * i40e_vc_notify_link_state
80 * @pf: pointer to the PF structure
82 * send a link status message to all VFs on a given PF
84 void i40e_vc_notify_link_state(struct i40e_pf *pf)
88 for (i = 0; i < pf->num_alloc_vfs; i++)
89 i40e_vc_notify_vf_link_state(&pf->vf[i]);
93 * i40e_vc_notify_reset
94 * @pf: pointer to the PF structure
96 * indicate a pending reset to all VFs on a given PF
98 void i40e_vc_notify_reset(struct i40e_pf *pf)
100 struct virtchnl_pf_event pfe;
102 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
103 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
104 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
105 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
109 * i40e_vc_notify_vf_reset
110 * @vf: pointer to the VF structure
112 * indicate a pending reset to the given VF
114 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
116 struct virtchnl_pf_event pfe;
119 /* validate the request */
120 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
123 /* verify if the VF is in either init or active before proceeding */
124 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
125 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
128 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
130 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
131 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
132 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
134 sizeof(struct virtchnl_pf_event), NULL);
136 /***********************misc routines*****************************/
140 * @vf: pointer to the VF info
142 * Disable the VF through a SW reset.
144 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
148 i40e_vc_notify_vf_reset(vf);
150 /* We want to ensure that an actual reset occurs initiated after this
151 * function was called. However, we do not want to wait forever, so
152 * we'll give a reasonable time and print a message if we failed to
155 for (i = 0; i < 20; i++) {
156 if (i40e_reset_vf(vf, false))
158 usleep_range(10000, 20000);
161 dev_warn(&vf->pf->pdev->dev,
162 "Failed to initiate reset for VF %d after 200 milliseconds\n",
167 * i40e_vc_isvalid_vsi_id
168 * @vf: pointer to the VF info
169 * @vsi_id: VF relative VSI id
171 * check for the valid VSI id
173 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
175 struct i40e_pf *pf = vf->pf;
176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
178 return (vsi && (vsi->vf_id == vf->vf_id));
182 * i40e_vc_isvalid_queue_id
183 * @vf: pointer to the VF info
185 * @qid: vsi relative queue id
187 * check for the valid queue id
189 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
195 return (vsi && (qid < vsi->alloc_queue_pairs));
199 * i40e_vc_isvalid_vector_id
200 * @vf: pointer to the VF info
201 * @vector_id: VF relative vector id
203 * check for the valid vector id
205 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
207 struct i40e_pf *pf = vf->pf;
209 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
212 /***********************vf resource mgmt routines*****************/
215 * i40e_vc_get_pf_queue_id
216 * @vf: pointer to the VF info
217 * @vsi_id: id of VSI as provided by the FW
218 * @vsi_queue_id: vsi relative queue id
220 * return PF relative queue id
222 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
225 struct i40e_pf *pf = vf->pf;
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
232 if (le16_to_cpu(vsi->info.mapping_flags) &
233 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
244 * i40e_get_real_pf_qid
245 * @vf: pointer to the VF info
247 * @queue_id: queue number
249 * wrapper function to get pf_queue_id handling ADq code as well
251 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
255 if (vf->adq_enabled) {
256 /* Although VF considers all the queues(can be 1 to 16) as its
257 * own but they may actually belong to different VSIs(up to 4).
258 * We need to find which queues belongs to which VSI.
260 for (i = 0; i < vf->num_tc; i++) {
261 if (queue_id < vf->ch[i].num_qps) {
262 vsi_id = vf->ch[i].vsi_id;
265 /* find right queue id which is relative to a
268 queue_id -= vf->ch[i].num_qps;
272 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
276 * i40e_config_irq_link_list
277 * @vf: pointer to the VF info
278 * @vsi_id: id of VSI as given by the FW
279 * @vecmap: irq map info
281 * configure irq link list from the map
283 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
284 struct virtchnl_vector_map *vecmap)
286 unsigned long linklistmap = 0, tempmap;
287 struct i40e_pf *pf = vf->pf;
288 struct i40e_hw *hw = &pf->hw;
289 u16 vsi_queue_id, pf_queue_id;
290 enum i40e_queue_type qtype;
291 u16 next_q, vector_id, size;
295 vector_id = vecmap->vector_id;
298 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
300 reg_idx = I40E_VPINT_LNKLSTN(
301 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
304 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
305 /* Special case - No queues mapped on this vector */
306 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
309 tempmap = vecmap->rxq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
315 tempmap = vecmap->txq_map;
316 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
317 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
321 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 next_q = find_first_bit(&linklistmap, size);
323 if (unlikely(next_q == size))
326 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
327 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
328 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
329 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
331 wr32(hw, reg_idx, reg);
333 while (next_q < size) {
335 case I40E_QUEUE_TYPE_RX:
336 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
337 itr_idx = vecmap->rxitr_idx;
339 case I40E_QUEUE_TYPE_TX:
340 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
341 itr_idx = vecmap->txitr_idx;
347 next_q = find_next_bit(&linklistmap, size, next_q + 1);
349 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
350 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
351 pf_queue_id = i40e_get_real_pf_qid(vf,
355 pf_queue_id = I40E_QUEUE_END_OF_LIST;
359 /* format for the RQCTL & TQCTL regs is same */
361 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
362 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
363 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
364 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
365 wr32(hw, reg_idx, reg);
368 /* if the vf is running in polling mode and using interrupt zero,
369 * need to disable auto-mask on enabling zero interrupt for VFs.
371 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
373 reg = rd32(hw, I40E_GLINT_CTL);
374 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
375 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
376 wr32(hw, I40E_GLINT_CTL, reg);
385 * i40e_release_iwarp_qvlist
386 * @vf: pointer to the VF.
389 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
391 struct i40e_pf *pf = vf->pf;
392 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
396 if (!vf->qvlist_info)
399 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
400 for (i = 0; i < qvlist_info->num_vectors; i++) {
401 struct virtchnl_iwarp_qv_info *qv_info;
402 u32 next_q_index, next_q_type;
403 struct i40e_hw *hw = &pf->hw;
404 u32 v_idx, reg_idx, reg;
406 qv_info = &qvlist_info->qv_info[i];
409 v_idx = qv_info->v_idx;
410 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
411 /* Figure out the queue after CEQ and make that the
414 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
415 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
416 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
417 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
418 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
419 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
421 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
422 reg = (next_q_index &
423 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
425 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
427 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
430 kfree(vf->qvlist_info);
431 vf->qvlist_info = NULL;
435 * i40e_config_iwarp_qvlist
436 * @vf: pointer to the VF info
437 * @qvlist_info: queue and vector list
439 * Return 0 on success or < 0 on error
441 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
442 struct virtchnl_iwarp_qvlist_info *qvlist_info)
444 struct i40e_pf *pf = vf->pf;
445 struct i40e_hw *hw = &pf->hw;
446 struct virtchnl_iwarp_qv_info *qv_info;
447 u32 v_idx, i, reg_idx, reg;
448 u32 next_q_idx, next_q_type;
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
454 if (qvlist_info->num_vectors > msix_vf) {
455 dev_warn(&pf->pdev->dev,
456 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
457 qvlist_info->num_vectors,
463 kfree(vf->qvlist_info);
464 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
465 qvlist_info->num_vectors - 1),
467 if (!vf->qvlist_info) {
471 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
473 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
474 for (i = 0; i < qvlist_info->num_vectors; i++) {
475 qv_info = &qvlist_info->qv_info[i];
479 /* Validate vector id belongs to this vf */
480 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
485 v_idx = qv_info->v_idx;
487 vf->qvlist_info->qv_info[i] = *qv_info;
489 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
490 /* We might be sharing the interrupt, so get the first queue
491 * index and type, push it down the list by adding the new
492 * queue on top. Also link it with the new queue in CEQCTL.
494 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
495 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
496 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
497 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
500 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
501 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
503 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
504 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
505 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
506 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
507 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
509 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
510 reg = (qv_info->ceq_idx &
511 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
512 (I40E_QUEUE_TYPE_PE_CEQ <<
513 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
514 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
517 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
518 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
519 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
520 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
522 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
528 kfree(vf->qvlist_info);
529 vf->qvlist_info = NULL;
535 * i40e_config_vsi_tx_queue
536 * @vf: pointer to the VF info
537 * @vsi_id: id of VSI as provided by the FW
538 * @vsi_queue_id: vsi relative queue index
539 * @info: config. info
543 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
545 struct virtchnl_txq_info *info)
547 struct i40e_pf *pf = vf->pf;
548 struct i40e_hw *hw = &pf->hw;
549 struct i40e_hmc_obj_txq tx_ctx;
550 struct i40e_vsi *vsi;
555 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
559 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
560 vsi = i40e_find_vsi_from_id(pf, vsi_id);
566 /* clear the context structure first */
567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
569 /* only set the required fields */
570 tx_ctx.base = info->dma_ring_addr / 128;
571 tx_ctx.qlen = info->ring_len;
572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
573 tx_ctx.rdylist_act = 0;
574 tx_ctx.head_wb_ena = info->headwb_enabled;
575 tx_ctx.head_wb_addr = info->dma_headwb_addr;
577 /* clear the context in the HMC */
578 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
580 dev_err(&pf->pdev->dev,
581 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
587 /* set the context in the HMC */
588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
590 dev_err(&pf->pdev->dev,
591 "Failed to set VF LAN Tx queue context %d error: %d\n",
597 /* associate this queue with the PCI VF function */
598 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
599 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
600 & I40E_QTX_CTL_PF_INDX_MASK);
601 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
602 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
603 & I40E_QTX_CTL_VFVM_INDX_MASK);
604 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
612 * i40e_config_vsi_rx_queue
613 * @vf: pointer to the VF info
614 * @vsi_id: id of VSI as provided by the FW
615 * @vsi_queue_id: vsi relative queue index
616 * @info: config. info
620 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
622 struct virtchnl_rxq_info *info)
624 struct i40e_pf *pf = vf->pf;
625 struct i40e_hw *hw = &pf->hw;
626 struct i40e_hmc_obj_rxq rx_ctx;
630 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
632 /* clear the context structure first */
633 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
635 /* only set the required fields */
636 rx_ctx.base = info->dma_ring_addr / 128;
637 rx_ctx.qlen = info->ring_len;
639 if (info->splithdr_enabled) {
640 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
642 I40E_RX_SPLIT_TCP_UDP |
644 /* header length validation */
645 if (info->hdr_size > ((2 * 1024) - 64)) {
649 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
651 /* set split mode 10b */
652 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
655 /* databuffer length validation */
656 if (info->databuffer_size > ((16 * 1024) - 128)) {
660 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
662 /* max pkt. length validation */
663 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
667 rx_ctx.rxmax = info->max_pkt_size;
669 /* enable 32bytes desc always */
673 rx_ctx.lrxqthresh = 1;
678 /* clear the context in the HMC */
679 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
681 dev_err(&pf->pdev->dev,
682 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
688 /* set the context in the HMC */
689 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
691 dev_err(&pf->pdev->dev,
692 "Failed to set VF LAN Rx queue context %d error: %d\n",
704 * @vf: pointer to the VF info
705 * @idx: VSI index, applies only for ADq mode, zero otherwise
707 * alloc VF vsi context & resources
709 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
711 struct i40e_mac_filter *f = NULL;
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi;
717 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
721 dev_err(&pf->pdev->dev,
722 "add vsi failed for VF %d, aq_err %d\n",
723 vf->vf_id, pf->hw.aq.asq_last_status);
725 goto error_alloc_vsi_res;
729 u64 hena = i40e_pf_get_default_rss_hena(pf);
730 u8 broadcast[ETH_ALEN];
732 vf->lan_vsi_idx = vsi->idx;
733 vf->lan_vsi_id = vsi->id;
734 /* If the port VLAN has been configured and then the
735 * VF driver was removed then the VSI port VLAN
736 * configuration was destroyed. Check if there is
737 * a port VLAN and restore the VSI configuration if
740 if (vf->port_vlan_id)
741 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
743 spin_lock_bh(&vsi->mac_filter_hash_lock);
744 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
745 f = i40e_add_mac_filter(vsi,
746 vf->default_lan_addr.addr);
748 dev_info(&pf->pdev->dev,
749 "Could not add MAC filter %pM for VF %d\n",
750 vf->default_lan_addr.addr, vf->vf_id);
752 eth_broadcast_addr(broadcast);
753 f = i40e_add_mac_filter(vsi, broadcast);
755 dev_info(&pf->pdev->dev,
756 "Could not allocate VF broadcast filter\n");
757 spin_unlock_bh(&vsi->mac_filter_hash_lock);
758 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
759 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
760 /* program mac filter only for VF VSI */
761 ret = i40e_sync_vsi_filters(vsi);
763 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
766 /* storing VSI index and id for ADq and don't apply the mac filter */
767 if (vf->adq_enabled) {
768 vf->ch[idx].vsi_idx = vsi->idx;
769 vf->ch[idx].vsi_id = vsi->id;
772 /* Set VF bandwidth if specified */
774 max_tx_rate = vf->tx_rate;
775 } else if (vf->ch[idx].max_tx_rate) {
776 max_tx_rate = vf->ch[idx].max_tx_rate;
780 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
781 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
782 max_tx_rate, 0, NULL);
784 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
793 * i40e_map_pf_queues_to_vsi
794 * @vf: pointer to the VF info
796 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
797 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
799 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
801 struct i40e_pf *pf = vf->pf;
802 struct i40e_hw *hw = &pf->hw;
803 u32 reg, num_tc = 1; /* VF has at least one traffic class */
810 for (i = 0; i < num_tc; i++) {
811 if (vf->adq_enabled) {
812 qps = vf->ch[i].num_qps;
813 vsi_id = vf->ch[i].vsi_id;
815 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
816 vsi_id = vf->lan_vsi_id;
819 for (j = 0; j < 7; j++) {
824 u16 qid = i40e_vc_get_pf_queue_id(vf,
828 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
832 i40e_write_rx_ctl(hw,
833 I40E_VSILAN_QTABLE(j, vsi_id),
840 * i40e_map_pf_to_vf_queues
841 * @vf: pointer to the VF info
843 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
844 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
846 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
848 struct i40e_pf *pf = vf->pf;
849 struct i40e_hw *hw = &pf->hw;
850 u32 reg, total_qps = 0;
851 u32 qps, num_tc = 1; /* VF has at least one traffic class */
858 for (i = 0; i < num_tc; i++) {
859 if (vf->adq_enabled) {
860 qps = vf->ch[i].num_qps;
861 vsi_id = vf->ch[i].vsi_id;
863 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
864 vsi_id = vf->lan_vsi_id;
867 for (j = 0; j < qps; j++) {
868 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
870 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
871 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
879 * i40e_enable_vf_mappings
880 * @vf: pointer to the VF info
884 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
886 struct i40e_pf *pf = vf->pf;
887 struct i40e_hw *hw = &pf->hw;
890 /* Tell the hardware we're using noncontiguous mapping. HW requires
891 * that VF queues be mapped using this method, even when they are
892 * contiguous in real life
894 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
895 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
897 /* enable VF vplan_qtable mappings */
898 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
899 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
901 i40e_map_pf_to_vf_queues(vf);
902 i40e_map_pf_queues_to_vsi(vf);
908 * i40e_disable_vf_mappings
909 * @vf: pointer to the VF info
911 * disable VF mappings
913 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
915 struct i40e_pf *pf = vf->pf;
916 struct i40e_hw *hw = &pf->hw;
919 /* disable qp mappings */
920 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
921 for (i = 0; i < I40E_MAX_VSI_QP; i++)
922 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
923 I40E_QUEUE_END_OF_LIST);
929 * @vf: pointer to the VF info
933 static void i40e_free_vf_res(struct i40e_vf *vf)
935 struct i40e_pf *pf = vf->pf;
936 struct i40e_hw *hw = &pf->hw;
940 /* Start by disabling VF's configuration API to prevent the OS from
941 * accessing the VF's VSI after it's freed / invalidated.
943 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
945 /* It's possible the VF had requeuested more queues than the default so
946 * do the accounting here when we're about to free them.
948 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
949 pf->queues_left += vf->num_queue_pairs -
950 I40E_DEFAULT_QUEUES_PER_VF;
953 /* free vsi & disconnect it from the parent uplink */
954 if (vf->lan_vsi_idx) {
955 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
960 /* do the accounting and remove additional ADq VSI's */
961 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
962 for (j = 0; j < vf->num_tc; j++) {
963 /* At this point VSI0 is already released so don't
964 * release it again and only clear their values in
965 * structure variables
968 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
969 vf->ch[j].vsi_idx = 0;
970 vf->ch[j].vsi_id = 0;
973 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
975 /* disable interrupts so the VF starts in a known state */
976 for (i = 0; i < msix_vf; i++) {
977 /* format is same for both registers */
979 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
981 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
984 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
988 /* clear the irq settings */
989 for (i = 0; i < msix_vf; i++) {
990 /* format is same for both registers */
992 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
994 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
997 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
998 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
999 wr32(hw, reg_idx, reg);
1002 /* reset some of the state variables keeping track of the resources */
1003 vf->num_queue_pairs = 0;
1004 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1005 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1010 * @vf: pointer to the VF info
1012 * allocate VF resources
1014 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1016 struct i40e_pf *pf = vf->pf;
1017 int total_queue_pairs = 0;
1020 if (vf->num_req_queues &&
1021 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1022 pf->num_vf_qps = vf->num_req_queues;
1024 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1026 /* allocate hw vsi context & associated resources */
1027 ret = i40e_alloc_vsi_res(vf, 0);
1030 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1032 /* allocate additional VSIs based on tc information for ADq */
1033 if (vf->adq_enabled) {
1034 if (pf->queues_left >=
1035 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1036 /* TC 0 always belongs to VF VSI */
1037 for (idx = 1; idx < vf->num_tc; idx++) {
1038 ret = i40e_alloc_vsi_res(vf, idx);
1042 /* send correct number of queues */
1043 total_queue_pairs = I40E_MAX_VF_QUEUES;
1045 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1047 vf->adq_enabled = false;
1051 /* We account for each VF to get a default number of queue pairs. If
1052 * the VF has now requested more, we need to account for that to make
1053 * certain we never request more queues than we actually have left in
1056 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1058 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1061 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1063 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1065 /* store the total qps number for the runtime
1068 vf->num_queue_pairs = total_queue_pairs;
1070 /* VF is now completely initialized */
1071 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1075 i40e_free_vf_res(vf);
1080 #define VF_DEVICE_STATUS 0xAA
1081 #define VF_TRANS_PENDING_MASK 0x20
1083 * i40e_quiesce_vf_pci
1084 * @vf: pointer to the VF structure
1086 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1087 * if the transactions never clear.
1089 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1091 struct i40e_pf *pf = vf->pf;
1092 struct i40e_hw *hw = &pf->hw;
1096 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1098 wr32(hw, I40E_PF_PCI_CIAA,
1099 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1100 for (i = 0; i < 100; i++) {
1101 reg = rd32(hw, I40E_PF_PCI_CIAD);
1102 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1110 * i40e_getnum_vf_vsi_vlan_filters
1111 * @vsi: pointer to the vsi
1113 * called to get the number of VLANs offloaded on this VF
1115 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1117 struct i40e_mac_filter *f;
1118 u16 num_vlans = 0, bkt;
1120 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1121 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1129 * i40e_get_vlan_list_sync
1130 * @vsi: pointer to the VSI
1131 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1132 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1133 * This array is allocated here, but has to be freed in caller.
1135 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1137 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1140 struct i40e_mac_filter *f;
1144 spin_lock_bh(&vsi->mac_filter_hash_lock);
1145 *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
1146 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1150 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1151 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1153 (*vlan_list)[i++] = f->vlan;
1156 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1160 * i40e_set_vsi_promisc
1161 * @vf: pointer to the VF struct
1163 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1165 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1167 * @vl: List of VLANs - apply filter for given VLANs
1168 * @num_vlans: Number of elements in @vl
1171 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1172 bool unicast_enable, s16 *vl, u16 num_vlans)
1174 i40e_status aq_ret, aq_tmp = 0;
1175 struct i40e_pf *pf = vf->pf;
1176 struct i40e_hw *hw = &pf->hw;
1179 /* No VLAN to set promisc on, set on VSI */
1180 if (!num_vlans || !vl) {
1181 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1185 int aq_err = pf->hw.aq.asq_last_status;
1187 dev_err(&pf->pdev->dev,
1188 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1190 i40e_stat_str(&pf->hw, aq_ret),
1191 i40e_aq_str(&pf->hw, aq_err));
1196 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1201 int aq_err = pf->hw.aq.asq_last_status;
1203 dev_err(&pf->pdev->dev,
1204 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1206 i40e_stat_str(&pf->hw, aq_ret),
1207 i40e_aq_str(&pf->hw, aq_err));
1213 for (i = 0; i < num_vlans; i++) {
1214 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1218 int aq_err = pf->hw.aq.asq_last_status;
1220 dev_err(&pf->pdev->dev,
1221 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1223 i40e_stat_str(&pf->hw, aq_ret),
1224 i40e_aq_str(&pf->hw, aq_err));
1230 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1234 int aq_err = pf->hw.aq.asq_last_status;
1236 dev_err(&pf->pdev->dev,
1237 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1239 i40e_stat_str(&pf->hw, aq_ret),
1240 i40e_aq_str(&pf->hw, aq_err));
1254 * i40e_config_vf_promiscuous_mode
1255 * @vf: pointer to the VF info
1257 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1258 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1260 * Called from the VF to configure the promiscuous mode of
1261 * VF vsis and from the VF reset path to reset promiscuous mode.
1263 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1268 i40e_status aq_ret = I40E_SUCCESS;
1269 struct i40e_pf *pf = vf->pf;
1270 struct i40e_vsi *vsi;
1274 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1275 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1276 return I40E_ERR_PARAM;
1278 if (vf->port_vlan_id) {
1279 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1280 alluni, &vf->port_vlan_id, 1);
1282 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1283 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1286 return I40E_ERR_NO_MEMORY;
1288 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1294 /* no VLANs to set on, set on VSI */
1295 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1301 * i40e_trigger_vf_reset
1302 * @vf: pointer to the VF structure
1303 * @flr: VFLR was issued or not
1305 * Trigger hardware to start a reset for a particular VF. Expects the caller
1306 * to wait the proper amount of time to allow hardware to reset the VF before
1307 * it cleans up and restores VF functionality.
1309 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1311 struct i40e_pf *pf = vf->pf;
1312 struct i40e_hw *hw = &pf->hw;
1313 u32 reg, reg_idx, bit_idx;
1316 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1318 /* Disable VF's configuration API during reset. The flag is re-enabled
1319 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1320 * It's normally disabled in i40e_free_vf_res(), but it's safer
1321 * to do it earlier to give some time to finish to any VF config
1322 * functions that may still be running at this point.
1324 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1326 /* In the case of a VFLR, the HW has already reset the VF and we
1327 * just need to clean up, so don't hit the VFRTRIG register.
1330 /* reset VF using VPGEN_VFRTRIG reg */
1331 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1332 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1333 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1336 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1337 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1338 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1339 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1342 if (i40e_quiesce_vf_pci(vf))
1343 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1348 * i40e_cleanup_reset_vf
1349 * @vf: pointer to the VF structure
1351 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1352 * have verified whether the reset is finished properly, and ensure the
1353 * minimum amount of wait time has passed.
1355 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1357 struct i40e_pf *pf = vf->pf;
1358 struct i40e_hw *hw = &pf->hw;
1361 /* disable promisc modes in case they were enabled */
1362 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1364 /* free VF resources to begin resetting the VSI state */
1365 i40e_free_vf_res(vf);
1367 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1368 * By doing this we allow HW to access VF memory at any point. If we
1369 * did it any sooner, HW could access memory while it was being freed
1370 * in i40e_free_vf_res(), causing an IOMMU fault.
1372 * On the other hand, this needs to be done ASAP, because the VF driver
1373 * is waiting for this to happen and may report a timeout. It's
1374 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1377 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1378 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1379 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1381 /* reallocate VF resources to finish resetting the VSI state */
1382 if (!i40e_alloc_vf_res(vf)) {
1383 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1384 i40e_enable_vf_mappings(vf);
1385 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1386 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1387 /* Do not notify the client during VF init */
1388 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1390 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1394 /* Tell the VF driver the reset is done. This needs to be done only
1395 * after VF has been fully initialized, because the VF driver may
1396 * request resources immediately after setting this flag.
1398 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1403 * @vf: pointer to the VF structure
1404 * @flr: VFLR was issued or not
1406 * Returns true if the VF is in reset, resets successfully, or resets
1407 * are disabled and false otherwise.
1409 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1411 struct i40e_pf *pf = vf->pf;
1412 struct i40e_hw *hw = &pf->hw;
1417 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1420 /* If the VFs have been disabled, this means something else is
1421 * resetting the VF, so we shouldn't continue.
1423 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1426 i40e_trigger_vf_reset(vf, flr);
1428 /* poll VPGEN_VFRSTAT reg to make sure
1429 * that reset is complete
1431 for (i = 0; i < 10; i++) {
1432 /* VF reset requires driver to first reset the VF and then
1433 * poll the status register to make sure that the reset
1434 * completed successfully. Due to internal HW FIFO flushes,
1435 * we must wait 10ms before the register will be valid.
1437 usleep_range(10000, 20000);
1438 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1439 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1446 usleep_range(10000, 20000);
1449 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1451 usleep_range(10000, 20000);
1453 /* On initial reset, we don't have any queues to disable */
1454 if (vf->lan_vsi_idx != 0)
1455 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1457 i40e_cleanup_reset_vf(vf);
1460 clear_bit(__I40E_VF_DISABLE, pf->state);
1466 * i40e_reset_all_vfs
1467 * @pf: pointer to the PF structure
1468 * @flr: VFLR was issued or not
1470 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1471 * VF, then do all the waiting in one chunk, and finally finish restoring each
1472 * VF after the wait. This is useful during PF routines which need to reset
1473 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1475 * Returns true if any VFs were reset, and false otherwise.
1477 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1479 struct i40e_hw *hw = &pf->hw;
1484 /* If we don't have any VFs, then there is nothing to reset */
1485 if (!pf->num_alloc_vfs)
1488 /* If VFs have been disabled, there is no need to reset */
1489 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1492 /* Begin reset on all VFs at once */
1493 for (v = 0; v < pf->num_alloc_vfs; v++)
1494 i40e_trigger_vf_reset(&pf->vf[v], flr);
1496 /* HW requires some time to make sure it can flush the FIFO for a VF
1497 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1498 * sequence to make sure that it has completed. We'll keep track of
1499 * the VFs using a simple iterator that increments once that VF has
1500 * finished resetting.
1502 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1503 usleep_range(10000, 20000);
1505 /* Check each VF in sequence, beginning with the VF to fail
1506 * the previous check.
1508 while (v < pf->num_alloc_vfs) {
1510 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1511 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1514 /* If the current VF has finished resetting, move on
1515 * to the next VF in sequence.
1522 usleep_range(10000, 20000);
1524 /* Display a warning if at least one VF didn't manage to reset in
1525 * time, but continue on with the operation.
1527 if (v < pf->num_alloc_vfs)
1528 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1530 usleep_range(10000, 20000);
1532 /* Begin disabling all the rings associated with VFs, but do not wait
1535 for (v = 0; v < pf->num_alloc_vfs; v++) {
1536 /* On initial reset, we don't have any queues to disable */
1537 if (pf->vf[v].lan_vsi_idx == 0)
1540 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1543 /* Now that we've notified HW to disable all of the VF rings, wait
1544 * until they finish.
1546 for (v = 0; v < pf->num_alloc_vfs; v++) {
1547 /* On initial reset, we don't have any queues to disable */
1548 if (pf->vf[v].lan_vsi_idx == 0)
1551 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1554 /* Hw may need up to 50ms to finish disabling the RX queues. We
1555 * minimize the wait by delaying only once for all VFs.
1559 /* Finish the reset on each VF */
1560 for (v = 0; v < pf->num_alloc_vfs; v++)
1561 i40e_cleanup_reset_vf(&pf->vf[v]);
1564 clear_bit(__I40E_VF_DISABLE, pf->state);
1571 * @pf: pointer to the PF structure
1575 void i40e_free_vfs(struct i40e_pf *pf)
1577 struct i40e_hw *hw = &pf->hw;
1578 u32 reg_idx, bit_idx;
1583 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1584 usleep_range(1000, 2000);
1586 i40e_notify_client_of_vf_enable(pf, 0);
1588 /* Disable IOV before freeing resources. This lets any VF drivers
1589 * running in the host get themselves cleaned up before we yank
1590 * the carpet out from underneath their feet.
1592 if (!pci_vfs_assigned(pf->pdev))
1593 pci_disable_sriov(pf->pdev);
1595 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1597 /* Amortize wait time by stopping all VFs at the same time */
1598 for (i = 0; i < pf->num_alloc_vfs; i++) {
1599 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1602 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1605 for (i = 0; i < pf->num_alloc_vfs; i++) {
1606 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1609 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1612 /* free up VF resources */
1613 tmp = pf->num_alloc_vfs;
1614 pf->num_alloc_vfs = 0;
1615 for (i = 0; i < tmp; i++) {
1616 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1617 i40e_free_vf_res(&pf->vf[i]);
1618 /* disable qp mappings */
1619 i40e_disable_vf_mappings(&pf->vf[i]);
1625 /* This check is for when the driver is unloaded while VFs are
1626 * assigned. Setting the number of VFs to 0 through sysfs is caught
1627 * before this function ever gets called.
1629 if (!pci_vfs_assigned(pf->pdev)) {
1630 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1631 * work correctly when SR-IOV gets re-enabled.
1633 for (vf_id = 0; vf_id < tmp; vf_id++) {
1634 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1635 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1636 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1639 clear_bit(__I40E_VF_DISABLE, pf->state);
1642 #ifdef CONFIG_PCI_IOV
1645 * @pf: pointer to the PF structure
1646 * @num_alloc_vfs: number of VFs to allocate
1648 * allocate VF resources
1650 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1652 struct i40e_vf *vfs;
1655 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1656 i40e_irq_dynamic_disable_icr0(pf);
1658 /* Check to see if we're just allocating resources for extant VFs */
1659 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1660 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1662 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1663 pf->num_alloc_vfs = 0;
1667 /* allocate memory */
1668 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1675 /* apply default profile */
1676 for (i = 0; i < num_alloc_vfs; i++) {
1678 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1681 /* assign default capabilities */
1682 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1683 vfs[i].spoofchk = true;
1685 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1688 pf->num_alloc_vfs = num_alloc_vfs;
1690 /* VF resources get allocated during reset */
1691 i40e_reset_all_vfs(pf, false);
1693 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1699 /* Re-enable interrupt 0. */
1700 i40e_irq_dynamic_enable_icr0(pf);
1706 * i40e_pci_sriov_enable
1707 * @pdev: pointer to a pci_dev structure
1708 * @num_vfs: number of VFs to allocate
1710 * Enable or change the number of VFs
1712 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1714 #ifdef CONFIG_PCI_IOV
1715 struct i40e_pf *pf = pci_get_drvdata(pdev);
1716 int pre_existing_vfs = pci_num_vf(pdev);
1719 if (test_bit(__I40E_TESTING, pf->state)) {
1720 dev_warn(&pdev->dev,
1721 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1726 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1728 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1731 if (num_vfs > pf->num_req_vfs) {
1732 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1733 num_vfs, pf->num_req_vfs);
1738 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1739 err = i40e_alloc_vfs(pf, num_vfs);
1741 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1755 * i40e_pci_sriov_configure
1756 * @pdev: pointer to a pci_dev structure
1757 * @num_vfs: number of VFs to allocate
1759 * Enable or change the number of VFs. Called when the user updates the number
1762 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1764 struct i40e_pf *pf = pci_get_drvdata(pdev);
1767 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1768 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1773 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1774 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1775 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1777 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1778 goto sriov_configure_out;
1781 if (!pci_vfs_assigned(pf->pdev)) {
1783 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1784 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1786 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1788 goto sriov_configure_out;
1790 sriov_configure_out:
1791 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1795 /***********************virtual channel routines******************/
1798 * i40e_vc_send_msg_to_vf
1799 * @vf: pointer to the VF info
1800 * @v_opcode: virtual channel opcode
1801 * @v_retval: virtual channel return value
1802 * @msg: pointer to the msg buffer
1803 * @msglen: msg length
1807 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1808 u32 v_retval, u8 *msg, u16 msglen)
1815 /* validate the request */
1816 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1821 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1823 /* single place to detect unsuccessful return values */
1825 vf->num_invalid_msgs++;
1826 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1827 vf->vf_id, v_opcode, v_retval);
1828 if (vf->num_invalid_msgs >
1829 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1830 dev_err(&pf->pdev->dev,
1831 "Number of invalid messages exceeded for VF %d\n",
1833 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1834 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1837 vf->num_valid_msgs++;
1838 /* reset the invalid counter, if a valid message is received. */
1839 vf->num_invalid_msgs = 0;
1842 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1845 dev_info(&pf->pdev->dev,
1846 "Unable to send the message to VF %d aq_err %d\n",
1847 vf->vf_id, pf->hw.aq.asq_last_status);
1855 * i40e_vc_send_resp_to_vf
1856 * @vf: pointer to the VF info
1857 * @opcode: operation code
1858 * @retval: return value
1860 * send resp msg to VF
1862 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1863 enum virtchnl_ops opcode,
1866 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1870 * i40e_vc_get_version_msg
1871 * @vf: pointer to the VF info
1872 * @msg: pointer to the msg buffer
1874 * called from the VF to request the API version used by the PF
1876 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1878 struct virtchnl_version_info info = {
1879 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1882 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1883 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1884 if (VF_IS_V10(&vf->vf_ver))
1885 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1886 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1887 I40E_SUCCESS, (u8 *)&info,
1888 sizeof(struct virtchnl_version_info));
1892 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1893 * @vf: pointer to VF structure
1895 static void i40e_del_qch(struct i40e_vf *vf)
1897 struct i40e_pf *pf = vf->pf;
1900 /* first element in the array belongs to primary VF VSI and we shouldn't
1901 * delete it. We should however delete the rest of the VSIs created
1903 for (i = 1; i < vf->num_tc; i++) {
1904 if (vf->ch[i].vsi_idx) {
1905 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1906 vf->ch[i].vsi_idx = 0;
1907 vf->ch[i].vsi_id = 0;
1913 * i40e_vc_get_vf_resources_msg
1914 * @vf: pointer to the VF info
1915 * @msg: pointer to the msg buffer
1917 * called from the VF to request its resources
1919 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1921 struct virtchnl_vf_resource *vfres = NULL;
1922 struct i40e_pf *pf = vf->pf;
1923 i40e_status aq_ret = 0;
1924 struct i40e_vsi *vsi;
1929 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1930 aq_ret = I40E_ERR_PARAM;
1934 len = struct_size(vfres, vsi_res, num_vsis);
1935 vfres = kzalloc(len, GFP_KERNEL);
1937 aq_ret = I40E_ERR_NO_MEMORY;
1941 if (VF_IS_V11(&vf->vf_ver))
1942 vf->driver_caps = *(u32 *)msg;
1944 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1945 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1946 VIRTCHNL_VF_OFFLOAD_VLAN;
1948 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1949 vsi = pf->vsi[vf->lan_vsi_idx];
1950 if (!vsi->info.pvid)
1951 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1953 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1954 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1955 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1956 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1958 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1961 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1962 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1964 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1965 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1966 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1968 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1971 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1972 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1973 vfres->vf_cap_flags |=
1974 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1977 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1978 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1980 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1981 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1982 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1984 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1985 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1986 dev_err(&pf->pdev->dev,
1987 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1989 aq_ret = I40E_ERR_PARAM;
1992 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1995 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1996 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1997 vfres->vf_cap_flags |=
1998 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2001 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2002 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2004 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2005 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2007 vfres->num_vsis = num_vsis;
2008 vfres->num_queue_pairs = vf->num_queue_pairs;
2009 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2010 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2011 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2013 if (vf->lan_vsi_idx) {
2014 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2015 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2016 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2017 /* VFs only use TC 0 */
2018 vfres->vsi_res[0].qset_handle
2019 = le16_to_cpu(vsi->info.qs_handle[0]);
2020 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2021 vf->default_lan_addr.addr);
2023 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2026 /* send the response back to the VF */
2027 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2028 aq_ret, (u8 *)vfres, len);
2035 * i40e_vc_reset_vf_msg
2036 * @vf: pointer to the VF info
2038 * called from the VF to reset itself,
2039 * unlike other virtchnl messages, PF driver
2040 * doesn't send the response back to the VF
2042 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
2044 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2045 i40e_reset_vf(vf, false);
2049 * i40e_vc_config_promiscuous_mode_msg
2050 * @vf: pointer to the VF info
2051 * @msg: pointer to the msg buffer
2053 * called from the VF to configure the promiscuous mode of
2056 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2058 struct virtchnl_promisc_info *info =
2059 (struct virtchnl_promisc_info *)msg;
2060 struct i40e_pf *pf = vf->pf;
2061 i40e_status aq_ret = 0;
2062 bool allmulti = false;
2063 bool alluni = false;
2065 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2066 aq_ret = I40E_ERR_PARAM;
2069 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2070 dev_err(&pf->pdev->dev,
2071 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2074 /* Lie to the VF on purpose, because this is an error we can
2075 * ignore. Unprivileged VF is not a virtual channel error.
2081 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2082 aq_ret = I40E_ERR_PARAM;
2086 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2087 aq_ret = I40E_ERR_PARAM;
2091 /* Multicast promiscuous handling*/
2092 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2095 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2097 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2103 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2105 dev_info(&pf->pdev->dev,
2106 "VF %d successfully set multicast promiscuous mode\n",
2108 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2110 dev_info(&pf->pdev->dev,
2111 "VF %d successfully unset multicast promiscuous mode\n",
2115 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2117 dev_info(&pf->pdev->dev,
2118 "VF %d successfully set unicast promiscuous mode\n",
2120 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2122 dev_info(&pf->pdev->dev,
2123 "VF %d successfully unset unicast promiscuous mode\n",
2127 /* send the response to the VF */
2128 return i40e_vc_send_resp_to_vf(vf,
2129 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2134 * i40e_vc_config_queues_msg
2135 * @vf: pointer to the VF info
2136 * @msg: pointer to the msg buffer
2138 * called from the VF to configure the rx/tx
2141 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2143 struct virtchnl_vsi_queue_config_info *qci =
2144 (struct virtchnl_vsi_queue_config_info *)msg;
2145 struct virtchnl_queue_pair_info *qpi;
2146 struct i40e_pf *pf = vf->pf;
2147 u16 vsi_id, vsi_queue_id = 0;
2148 u16 num_qps_all = 0;
2149 i40e_status aq_ret = 0;
2150 int i, j = 0, idx = 0;
2152 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2153 aq_ret = I40E_ERR_PARAM;
2157 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2158 aq_ret = I40E_ERR_PARAM;
2162 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2163 aq_ret = I40E_ERR_PARAM;
2167 if (vf->adq_enabled) {
2168 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2169 num_qps_all += vf->ch[i].num_qps;
2170 if (num_qps_all != qci->num_queue_pairs) {
2171 aq_ret = I40E_ERR_PARAM;
2176 vsi_id = qci->vsi_id;
2178 for (i = 0; i < qci->num_queue_pairs; i++) {
2179 qpi = &qci->qpair[i];
2181 if (!vf->adq_enabled) {
2182 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2183 qpi->txq.queue_id)) {
2184 aq_ret = I40E_ERR_PARAM;
2188 vsi_queue_id = qpi->txq.queue_id;
2190 if (qpi->txq.vsi_id != qci->vsi_id ||
2191 qpi->rxq.vsi_id != qci->vsi_id ||
2192 qpi->rxq.queue_id != vsi_queue_id) {
2193 aq_ret = I40E_ERR_PARAM;
2198 if (vf->adq_enabled) {
2199 if (idx >= ARRAY_SIZE(vf->ch)) {
2200 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2203 vsi_id = vf->ch[idx].vsi_id;
2206 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2208 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2210 aq_ret = I40E_ERR_PARAM;
2214 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2215 * VF does not know about these additional VSIs and all
2216 * it cares is about its own queues. PF configures these queues
2217 * to its appropriate VSIs based on TC mapping
2219 if (vf->adq_enabled) {
2220 if (idx >= ARRAY_SIZE(vf->ch)) {
2221 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2224 if (j == (vf->ch[idx].num_qps - 1)) {
2226 j = 0; /* resetting the queue count */
2234 /* set vsi num_queue_pairs in use to num configured by VF */
2235 if (!vf->adq_enabled) {
2236 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2237 qci->num_queue_pairs;
2239 for (i = 0; i < vf->num_tc; i++)
2240 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2245 /* send the response to the VF */
2246 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2251 * i40e_validate_queue_map - check queue map is valid
2252 * @vf: the VF structure pointer
2254 * @queuemap: Tx or Rx queue map
2256 * check if Tx or Rx queue map is valid
2258 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2259 unsigned long queuemap)
2261 u16 vsi_queue_id, queue_id;
2263 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2264 if (vf->adq_enabled) {
2265 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2266 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2268 queue_id = vsi_queue_id;
2271 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2279 * i40e_vc_config_irq_map_msg
2280 * @vf: pointer to the VF info
2281 * @msg: pointer to the msg buffer
2283 * called from the VF to configure the irq to
2286 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2288 struct virtchnl_irq_map_info *irqmap_info =
2289 (struct virtchnl_irq_map_info *)msg;
2290 struct virtchnl_vector_map *map;
2292 i40e_status aq_ret = 0;
2295 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2296 aq_ret = I40E_ERR_PARAM;
2300 if (irqmap_info->num_vectors >
2301 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2302 aq_ret = I40E_ERR_PARAM;
2306 for (i = 0; i < irqmap_info->num_vectors; i++) {
2307 map = &irqmap_info->vecmap[i];
2308 /* validate msg params */
2309 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2310 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2311 aq_ret = I40E_ERR_PARAM;
2314 vsi_id = map->vsi_id;
2316 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2317 aq_ret = I40E_ERR_PARAM;
2321 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2322 aq_ret = I40E_ERR_PARAM;
2326 i40e_config_irq_link_list(vf, vsi_id, map);
2329 /* send the response to the VF */
2330 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2335 * i40e_ctrl_vf_tx_rings
2336 * @vsi: the SRIOV VSI being configured
2337 * @q_map: bit map of the queues to be enabled
2338 * @enable: start or stop the queue
2340 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2343 struct i40e_pf *pf = vsi->back;
2347 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2348 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2349 vsi->base_queue + q_id,
2350 false /*is xdp*/, enable);
2358 * i40e_ctrl_vf_rx_rings
2359 * @vsi: the SRIOV VSI being configured
2360 * @q_map: bit map of the queues to be enabled
2361 * @enable: start or stop the queue
2363 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2366 struct i40e_pf *pf = vsi->back;
2370 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2371 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2380 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2381 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2383 * Returns true if validation was successful, else false.
2385 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2387 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2388 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2389 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2396 * i40e_vc_enable_queues_msg
2397 * @vf: pointer to the VF info
2398 * @msg: pointer to the msg buffer
2400 * called from the VF to enable all or specific queue(s)
2402 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2404 struct virtchnl_queue_select *vqs =
2405 (struct virtchnl_queue_select *)msg;
2406 struct i40e_pf *pf = vf->pf;
2407 i40e_status aq_ret = 0;
2410 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2411 aq_ret = I40E_ERR_PARAM;
2415 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2416 aq_ret = I40E_ERR_PARAM;
2420 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2421 aq_ret = I40E_ERR_PARAM;
2425 /* Use the queue bit map sent by the VF */
2426 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2428 aq_ret = I40E_ERR_TIMEOUT;
2431 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2433 aq_ret = I40E_ERR_TIMEOUT;
2437 /* need to start the rings for additional ADq VSI's as well */
2438 if (vf->adq_enabled) {
2439 /* zero belongs to LAN VSI */
2440 for (i = 1; i < vf->num_tc; i++) {
2441 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2442 aq_ret = I40E_ERR_TIMEOUT;
2446 vf->queues_enabled = true;
2449 /* send the response to the VF */
2450 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2455 * i40e_vc_disable_queues_msg
2456 * @vf: pointer to the VF info
2457 * @msg: pointer to the msg buffer
2459 * called from the VF to disable all or specific
2462 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2464 struct virtchnl_queue_select *vqs =
2465 (struct virtchnl_queue_select *)msg;
2466 struct i40e_pf *pf = vf->pf;
2467 i40e_status aq_ret = 0;
2469 /* Immediately mark queues as disabled */
2470 vf->queues_enabled = false;
2472 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2473 aq_ret = I40E_ERR_PARAM;
2477 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2478 aq_ret = I40E_ERR_PARAM;
2482 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2483 aq_ret = I40E_ERR_PARAM;
2487 /* Use the queue bit map sent by the VF */
2488 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2490 aq_ret = I40E_ERR_TIMEOUT;
2493 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2495 aq_ret = I40E_ERR_TIMEOUT;
2499 /* send the response to the VF */
2500 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2505 * i40e_vc_request_queues_msg
2506 * @vf: pointer to the VF info
2507 * @msg: pointer to the msg buffer
2509 * VFs get a default number of queues but can use this message to request a
2510 * different number. If the request is successful, PF will reset the VF and
2511 * return 0. If unsuccessful, PF will send message informing VF of number of
2512 * available queues and return result of sending VF a message.
2514 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2516 struct virtchnl_vf_res_request *vfres =
2517 (struct virtchnl_vf_res_request *)msg;
2518 u16 req_pairs = vfres->num_queue_pairs;
2519 u8 cur_pairs = vf->num_queue_pairs;
2520 struct i40e_pf *pf = vf->pf;
2522 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2525 if (req_pairs > I40E_MAX_VF_QUEUES) {
2526 dev_err(&pf->pdev->dev,
2527 "VF %d tried to request more than %d queues.\n",
2529 I40E_MAX_VF_QUEUES);
2530 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2531 } else if (req_pairs - cur_pairs > pf->queues_left) {
2532 dev_warn(&pf->pdev->dev,
2533 "VF %d requested %d more queues, but only %d left.\n",
2535 req_pairs - cur_pairs,
2537 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2539 /* successful request */
2540 vf->num_req_queues = req_pairs;
2541 i40e_vc_notify_vf_reset(vf);
2542 i40e_reset_vf(vf, false);
2546 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2547 (u8 *)vfres, sizeof(*vfres));
2551 * i40e_vc_get_stats_msg
2552 * @vf: pointer to the VF info
2553 * @msg: pointer to the msg buffer
2555 * called from the VF to get vsi stats
2557 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2559 struct virtchnl_queue_select *vqs =
2560 (struct virtchnl_queue_select *)msg;
2561 struct i40e_pf *pf = vf->pf;
2562 struct i40e_eth_stats stats;
2563 i40e_status aq_ret = 0;
2564 struct i40e_vsi *vsi;
2566 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2568 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2569 aq_ret = I40E_ERR_PARAM;
2573 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2574 aq_ret = I40E_ERR_PARAM;
2578 vsi = pf->vsi[vf->lan_vsi_idx];
2580 aq_ret = I40E_ERR_PARAM;
2583 i40e_update_eth_stats(vsi);
2584 stats = vsi->eth_stats;
2587 /* send the response back to the VF */
2588 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2589 (u8 *)&stats, sizeof(stats));
2592 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2593 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2595 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2596 #define I40E_VC_MAX_VLAN_PER_VF 16
2599 * i40e_check_vf_permission
2600 * @vf: pointer to the VF info
2601 * @al: MAC address list from virtchnl
2603 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2604 * if any address in the list is not valid. Checks the following conditions:
2606 * 1) broadcast and zero addresses are never valid
2607 * 2) unicast addresses are not allowed if the VMM has administratively set
2608 * the VF MAC address, unless the VF is marked as privileged.
2609 * 3) There is enough space to add all the addresses.
2611 * Note that to guarantee consistency, it is expected this function be called
2612 * while holding the mac_filter_hash_lock, as otherwise the current number of
2613 * addresses might not be accurate.
2615 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2616 struct virtchnl_ether_addr_list *al)
2618 struct i40e_pf *pf = vf->pf;
2619 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2620 int mac2add_cnt = 0;
2623 for (i = 0; i < al->num_elements; i++) {
2624 struct i40e_mac_filter *f;
2625 u8 *addr = al->list[i].addr;
2627 if (is_broadcast_ether_addr(addr) ||
2628 is_zero_ether_addr(addr)) {
2629 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2631 return I40E_ERR_INVALID_MAC_ADDR;
2634 /* If the host VMM administrator has set the VF MAC address
2635 * administratively via the ndo_set_vf_mac command then deny
2636 * permission to the VF to add or delete unicast MAC addresses.
2637 * Unless the VF is privileged and then it can do whatever.
2638 * The VF may request to set the MAC address filter already
2639 * assigned to it so do not return an error in that case.
2641 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2642 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2643 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2644 dev_err(&pf->pdev->dev,
2645 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2649 /*count filters that really will be added*/
2650 f = i40e_find_mac(vsi, addr);
2655 /* If this VF is not privileged, then we can't add more than a limited
2656 * number of addresses. Check to make sure that the additions do not
2657 * push us over the limit.
2659 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2660 (i40e_count_filters(vsi) + mac2add_cnt) >
2661 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2662 dev_err(&pf->pdev->dev,
2663 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2670 * i40e_vc_add_mac_addr_msg
2671 * @vf: pointer to the VF info
2672 * @msg: pointer to the msg buffer
2674 * add guest mac address filter
2676 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2678 struct virtchnl_ether_addr_list *al =
2679 (struct virtchnl_ether_addr_list *)msg;
2680 struct i40e_pf *pf = vf->pf;
2681 struct i40e_vsi *vsi = NULL;
2682 i40e_status ret = 0;
2685 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2686 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2687 ret = I40E_ERR_PARAM;
2691 vsi = pf->vsi[vf->lan_vsi_idx];
2693 /* Lock once, because all function inside for loop accesses VSI's
2694 * MAC filter list which needs to be protected using same lock.
2696 spin_lock_bh(&vsi->mac_filter_hash_lock);
2698 ret = i40e_check_vf_permission(vf, al);
2700 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2704 /* add new addresses to the list */
2705 for (i = 0; i < al->num_elements; i++) {
2706 struct i40e_mac_filter *f;
2708 f = i40e_find_mac(vsi, al->list[i].addr);
2710 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2713 dev_err(&pf->pdev->dev,
2714 "Unable to add MAC filter %pM for VF %d\n",
2715 al->list[i].addr, vf->vf_id);
2716 ret = I40E_ERR_PARAM;
2717 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2720 if (is_valid_ether_addr(al->list[i].addr) &&
2721 is_zero_ether_addr(vf->default_lan_addr.addr))
2722 ether_addr_copy(vf->default_lan_addr.addr,
2726 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2728 /* program the updated filter list */
2729 ret = i40e_sync_vsi_filters(vsi);
2731 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2735 /* send the response to the VF */
2736 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2741 * i40e_vc_del_mac_addr_msg
2742 * @vf: pointer to the VF info
2743 * @msg: pointer to the msg buffer
2745 * remove guest mac address filter
2747 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2749 struct virtchnl_ether_addr_list *al =
2750 (struct virtchnl_ether_addr_list *)msg;
2751 bool was_unimac_deleted = false;
2752 struct i40e_pf *pf = vf->pf;
2753 struct i40e_vsi *vsi = NULL;
2754 i40e_status ret = 0;
2757 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2758 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2759 ret = I40E_ERR_PARAM;
2763 for (i = 0; i < al->num_elements; i++) {
2764 if (is_broadcast_ether_addr(al->list[i].addr) ||
2765 is_zero_ether_addr(al->list[i].addr)) {
2766 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2767 al->list[i].addr, vf->vf_id);
2768 ret = I40E_ERR_INVALID_MAC_ADDR;
2771 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2772 was_unimac_deleted = true;
2774 vsi = pf->vsi[vf->lan_vsi_idx];
2776 spin_lock_bh(&vsi->mac_filter_hash_lock);
2777 /* delete addresses from the list */
2778 for (i = 0; i < al->num_elements; i++)
2779 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2780 ret = I40E_ERR_INVALID_MAC_ADDR;
2781 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2785 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2787 /* program the updated filter list */
2788 ret = i40e_sync_vsi_filters(vsi);
2790 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2793 if (vf->trusted && was_unimac_deleted) {
2794 struct i40e_mac_filter *f;
2795 struct hlist_node *h;
2799 /* set last unicast mac address as default */
2800 spin_lock_bh(&vsi->mac_filter_hash_lock);
2801 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2802 if (is_valid_ether_addr(f->macaddr))
2803 macaddr = f->macaddr;
2806 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
2807 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2810 /* send the response to the VF */
2811 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
2815 * i40e_vc_add_vlan_msg
2816 * @vf: pointer to the VF info
2817 * @msg: pointer to the msg buffer
2819 * program guest vlan id
2821 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2823 struct virtchnl_vlan_filter_list *vfl =
2824 (struct virtchnl_vlan_filter_list *)msg;
2825 struct i40e_pf *pf = vf->pf;
2826 struct i40e_vsi *vsi = NULL;
2827 i40e_status aq_ret = 0;
2830 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2831 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2832 dev_err(&pf->pdev->dev,
2833 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2836 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2837 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2838 aq_ret = I40E_ERR_PARAM;
2842 for (i = 0; i < vfl->num_elements; i++) {
2843 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2844 aq_ret = I40E_ERR_PARAM;
2845 dev_err(&pf->pdev->dev,
2846 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2850 vsi = pf->vsi[vf->lan_vsi_idx];
2851 if (vsi->info.pvid) {
2852 aq_ret = I40E_ERR_PARAM;
2856 i40e_vlan_stripping_enable(vsi);
2857 for (i = 0; i < vfl->num_elements; i++) {
2858 /* add new VLAN filter */
2859 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2863 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2864 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2868 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2869 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2875 dev_err(&pf->pdev->dev,
2876 "Unable to add VLAN filter %d for VF %d, error %d\n",
2877 vfl->vlan_id[i], vf->vf_id, ret);
2881 /* send the response to the VF */
2882 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2886 * i40e_vc_remove_vlan_msg
2887 * @vf: pointer to the VF info
2888 * @msg: pointer to the msg buffer
2890 * remove programmed guest vlan id
2892 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2894 struct virtchnl_vlan_filter_list *vfl =
2895 (struct virtchnl_vlan_filter_list *)msg;
2896 struct i40e_pf *pf = vf->pf;
2897 struct i40e_vsi *vsi = NULL;
2898 i40e_status aq_ret = 0;
2901 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2902 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2903 aq_ret = I40E_ERR_PARAM;
2907 for (i = 0; i < vfl->num_elements; i++) {
2908 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2909 aq_ret = I40E_ERR_PARAM;
2914 vsi = pf->vsi[vf->lan_vsi_idx];
2915 if (vsi->info.pvid) {
2916 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2917 aq_ret = I40E_ERR_PARAM;
2921 for (i = 0; i < vfl->num_elements; i++) {
2922 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2925 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2926 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2930 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2931 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2938 /* send the response to the VF */
2939 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2944 * @vf: pointer to the VF info
2945 * @msg: pointer to the msg buffer
2946 * @msglen: msg length
2948 * called from the VF for the iwarp msgs
2950 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2952 struct i40e_pf *pf = vf->pf;
2953 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2954 i40e_status aq_ret = 0;
2956 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2957 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2958 aq_ret = I40E_ERR_PARAM;
2962 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2966 /* send the response to the VF */
2967 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2972 * i40e_vc_iwarp_qvmap_msg
2973 * @vf: pointer to the VF info
2974 * @msg: pointer to the msg buffer
2975 * @config: config qvmap or release it
2977 * called from the VF for the iwarp msgs
2979 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2981 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2982 (struct virtchnl_iwarp_qvlist_info *)msg;
2983 i40e_status aq_ret = 0;
2985 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2986 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2987 aq_ret = I40E_ERR_PARAM;
2992 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2993 aq_ret = I40E_ERR_PARAM;
2995 i40e_release_iwarp_qvlist(vf);
2999 /* send the response to the VF */
3000 return i40e_vc_send_resp_to_vf(vf,
3001 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3002 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3007 * i40e_vc_config_rss_key
3008 * @vf: pointer to the VF info
3009 * @msg: pointer to the msg buffer
3011 * Configure the VF's RSS key
3013 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3015 struct virtchnl_rss_key *vrk =
3016 (struct virtchnl_rss_key *)msg;
3017 struct i40e_pf *pf = vf->pf;
3018 struct i40e_vsi *vsi = NULL;
3019 i40e_status aq_ret = 0;
3021 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3022 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3023 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
3024 aq_ret = I40E_ERR_PARAM;
3028 vsi = pf->vsi[vf->lan_vsi_idx];
3029 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3031 /* send the response to the VF */
3032 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3037 * i40e_vc_config_rss_lut
3038 * @vf: pointer to the VF info
3039 * @msg: pointer to the msg buffer
3041 * Configure the VF's RSS LUT
3043 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3045 struct virtchnl_rss_lut *vrl =
3046 (struct virtchnl_rss_lut *)msg;
3047 struct i40e_pf *pf = vf->pf;
3048 struct i40e_vsi *vsi = NULL;
3049 i40e_status aq_ret = 0;
3052 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3053 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3054 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
3055 aq_ret = I40E_ERR_PARAM;
3059 for (i = 0; i < vrl->lut_entries; i++)
3060 if (vrl->lut[i] >= vf->num_queue_pairs) {
3061 aq_ret = I40E_ERR_PARAM;
3065 vsi = pf->vsi[vf->lan_vsi_idx];
3066 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3067 /* send the response to the VF */
3069 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3074 * i40e_vc_get_rss_hena
3075 * @vf: pointer to the VF info
3076 * @msg: pointer to the msg buffer
3078 * Return the RSS HENA bits allowed by the hardware
3080 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3082 struct virtchnl_rss_hena *vrh = NULL;
3083 struct i40e_pf *pf = vf->pf;
3084 i40e_status aq_ret = 0;
3087 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3088 aq_ret = I40E_ERR_PARAM;
3091 len = sizeof(struct virtchnl_rss_hena);
3093 vrh = kzalloc(len, GFP_KERNEL);
3095 aq_ret = I40E_ERR_NO_MEMORY;
3099 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3101 /* send the response back to the VF */
3102 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3103 aq_ret, (u8 *)vrh, len);
3109 * i40e_vc_set_rss_hena
3110 * @vf: pointer to the VF info
3111 * @msg: pointer to the msg buffer
3113 * Set the RSS HENA bits for the VF
3115 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3117 struct virtchnl_rss_hena *vrh =
3118 (struct virtchnl_rss_hena *)msg;
3119 struct i40e_pf *pf = vf->pf;
3120 struct i40e_hw *hw = &pf->hw;
3121 i40e_status aq_ret = 0;
3123 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3124 aq_ret = I40E_ERR_PARAM;
3127 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3128 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3129 (u32)(vrh->hena >> 32));
3131 /* send the response to the VF */
3133 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3137 * i40e_vc_enable_vlan_stripping
3138 * @vf: pointer to the VF info
3139 * @msg: pointer to the msg buffer
3141 * Enable vlan header stripping for the VF
3143 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3145 i40e_status aq_ret = 0;
3146 struct i40e_vsi *vsi;
3148 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3149 aq_ret = I40E_ERR_PARAM;
3153 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3154 i40e_vlan_stripping_enable(vsi);
3156 /* send the response to the VF */
3158 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3163 * i40e_vc_disable_vlan_stripping
3164 * @vf: pointer to the VF info
3165 * @msg: pointer to the msg buffer
3167 * Disable vlan header stripping for the VF
3169 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3171 i40e_status aq_ret = 0;
3172 struct i40e_vsi *vsi;
3174 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3175 aq_ret = I40E_ERR_PARAM;
3179 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3180 i40e_vlan_stripping_disable(vsi);
3182 /* send the response to the VF */
3184 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3189 * i40e_validate_cloud_filter
3190 * @vf: pointer to VF structure
3191 * @tc_filter: pointer to filter requested
3193 * This function validates cloud filter programmed as TC filter for ADq
3195 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3196 struct virtchnl_filter *tc_filter)
3198 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3199 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3200 struct i40e_pf *pf = vf->pf;
3201 struct i40e_vsi *vsi = NULL;
3202 struct i40e_mac_filter *f;
3203 struct hlist_node *h;
3207 if (!tc_filter->action) {
3208 dev_info(&pf->pdev->dev,
3209 "VF %d: Currently ADq doesn't support Drop Action\n",
3214 /* action_meta is TC number here to which the filter is applied */
3215 if (!tc_filter->action_meta ||
3216 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3217 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3218 vf->vf_id, tc_filter->action_meta);
3222 /* Check filter if it's programmed for advanced mode or basic mode.
3223 * There are two ADq modes (for VF only),
3224 * 1. Basic mode: intended to allow as many filter options as possible
3225 * to be added to a VF in Non-trusted mode. Main goal is
3226 * to add filters to its own MAC and VLAN id.
3227 * 2. Advanced mode: is for allowing filters to be applied other than
3228 * its own MAC or VLAN. This mode requires the VF to be
3231 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3232 vsi = pf->vsi[vf->lan_vsi_idx];
3233 f = i40e_find_mac(vsi, data.dst_mac);
3236 dev_info(&pf->pdev->dev,
3237 "Destination MAC %pM doesn't belong to VF %d\n",
3238 data.dst_mac, vf->vf_id);
3243 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3245 if (f->vlan == ntohs(data.vlan_id)) {
3251 dev_info(&pf->pdev->dev,
3252 "VF %d doesn't have any VLAN id %u\n",
3253 vf->vf_id, ntohs(data.vlan_id));
3258 /* Check if VF is trusted */
3259 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3260 dev_err(&pf->pdev->dev,
3261 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3263 return I40E_ERR_CONFIG;
3267 if (mask.dst_mac[0] & data.dst_mac[0]) {
3268 if (is_broadcast_ether_addr(data.dst_mac) ||
3269 is_zero_ether_addr(data.dst_mac)) {
3270 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3271 vf->vf_id, data.dst_mac);
3276 if (mask.src_mac[0] & data.src_mac[0]) {
3277 if (is_broadcast_ether_addr(data.src_mac) ||
3278 is_zero_ether_addr(data.src_mac)) {
3279 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3280 vf->vf_id, data.src_mac);
3285 if (mask.dst_port & data.dst_port) {
3286 if (!data.dst_port) {
3287 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3293 if (mask.src_port & data.src_port) {
3294 if (!data.src_port) {
3295 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3301 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3302 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3303 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3308 if (mask.vlan_id & data.vlan_id) {
3309 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3310 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3316 return I40E_SUCCESS;
3318 return I40E_ERR_CONFIG;
3322 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3323 * @vf: pointer to the VF info
3324 * @seid: seid of the vsi it is searching for
3326 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3328 struct i40e_pf *pf = vf->pf;
3329 struct i40e_vsi *vsi = NULL;
3332 for (i = 0; i < vf->num_tc ; i++) {
3333 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3334 if (vsi && vsi->seid == seid)
3341 * i40e_del_all_cloud_filters
3342 * @vf: pointer to the VF info
3344 * This function deletes all cloud filters
3346 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3348 struct i40e_cloud_filter *cfilter = NULL;
3349 struct i40e_pf *pf = vf->pf;
3350 struct i40e_vsi *vsi = NULL;
3351 struct hlist_node *node;
3354 hlist_for_each_entry_safe(cfilter, node,
3355 &vf->cloud_filter_list, cloud_node) {
3356 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3359 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3360 vf->vf_id, cfilter->seid);
3364 if (cfilter->dst_port)
3365 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3368 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3370 dev_err(&pf->pdev->dev,
3371 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3372 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3373 i40e_aq_str(&pf->hw,
3374 pf->hw.aq.asq_last_status));
3376 hlist_del(&cfilter->cloud_node);
3378 vf->num_cloud_filters--;
3383 * i40e_vc_del_cloud_filter
3384 * @vf: pointer to the VF info
3385 * @msg: pointer to the msg buffer
3387 * This function deletes a cloud filter programmed as TC filter for ADq
3389 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3391 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3392 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3393 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3394 struct i40e_cloud_filter cfilter, *cf = NULL;
3395 struct i40e_pf *pf = vf->pf;
3396 struct i40e_vsi *vsi = NULL;
3397 struct hlist_node *node;
3398 i40e_status aq_ret = 0;
3401 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3402 aq_ret = I40E_ERR_PARAM;
3406 if (!vf->adq_enabled) {
3407 dev_info(&pf->pdev->dev,
3408 "VF %d: ADq not enabled, can't apply cloud filter\n",
3410 aq_ret = I40E_ERR_PARAM;
3414 if (i40e_validate_cloud_filter(vf, vcf)) {
3415 dev_info(&pf->pdev->dev,
3416 "VF %d: Invalid input, can't apply cloud filter\n",
3418 aq_ret = I40E_ERR_PARAM;
3422 memset(&cfilter, 0, sizeof(cfilter));
3423 /* parse destination mac address */
3424 for (i = 0; i < ETH_ALEN; i++)
3425 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3427 /* parse source mac address */
3428 for (i = 0; i < ETH_ALEN; i++)
3429 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3431 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3432 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3433 cfilter.src_port = mask.src_port & tcf.src_port;
3435 switch (vcf->flow_type) {
3436 case VIRTCHNL_TCP_V4_FLOW:
3437 cfilter.n_proto = ETH_P_IP;
3438 if (mask.dst_ip[0] & tcf.dst_ip[0])
3439 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3440 ARRAY_SIZE(tcf.dst_ip));
3441 else if (mask.src_ip[0] & tcf.dst_ip[0])
3442 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3443 ARRAY_SIZE(tcf.dst_ip));
3445 case VIRTCHNL_TCP_V6_FLOW:
3446 cfilter.n_proto = ETH_P_IPV6;
3447 if (mask.dst_ip[3] & tcf.dst_ip[3])
3448 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3449 sizeof(cfilter.ip.v6.dst_ip6));
3450 if (mask.src_ip[3] & tcf.src_ip[3])
3451 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3452 sizeof(cfilter.ip.v6.src_ip6));
3455 /* TC filter can be configured based on different combinations
3456 * and in this case IP is not a part of filter config
3458 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3462 /* get the vsi to which the tc belongs to */
3463 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3464 cfilter.seid = vsi->seid;
3465 cfilter.flags = vcf->field_flags;
3467 /* Deleting TC filter */
3469 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3471 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3473 dev_err(&pf->pdev->dev,
3474 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3475 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3476 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3480 hlist_for_each_entry_safe(cf, node,
3481 &vf->cloud_filter_list, cloud_node) {
3482 if (cf->seid != cfilter.seid)
3485 if (cfilter.dst_port != cf->dst_port)
3487 if (mask.dst_mac[0])
3488 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3490 /* for ipv4 data to be valid, only first byte of mask is set */
3491 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3492 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3493 ARRAY_SIZE(tcf.dst_ip)))
3495 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3496 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3497 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3498 sizeof(cfilter.ip.v6.src_ip6)))
3501 if (cfilter.vlan_id != cf->vlan_id)
3504 hlist_del(&cf->cloud_node);
3506 vf->num_cloud_filters--;
3510 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3515 * i40e_vc_add_cloud_filter
3516 * @vf: pointer to the VF info
3517 * @msg: pointer to the msg buffer
3519 * This function adds a cloud filter programmed as TC filter for ADq
3521 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3523 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3524 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3525 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3526 struct i40e_cloud_filter *cfilter = NULL;
3527 struct i40e_pf *pf = vf->pf;
3528 struct i40e_vsi *vsi = NULL;
3529 i40e_status aq_ret = 0;
3532 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3533 aq_ret = I40E_ERR_PARAM;
3537 if (!vf->adq_enabled) {
3538 dev_info(&pf->pdev->dev,
3539 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3541 aq_ret = I40E_ERR_PARAM;
3545 if (i40e_validate_cloud_filter(vf, vcf)) {
3546 dev_info(&pf->pdev->dev,
3547 "VF %d: Invalid input/s, can't apply cloud filter\n",
3549 aq_ret = I40E_ERR_PARAM;
3553 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3557 /* parse destination mac address */
3558 for (i = 0; i < ETH_ALEN; i++)
3559 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3561 /* parse source mac address */
3562 for (i = 0; i < ETH_ALEN; i++)
3563 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3565 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3566 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3567 cfilter->src_port = mask.src_port & tcf.src_port;
3569 switch (vcf->flow_type) {
3570 case VIRTCHNL_TCP_V4_FLOW:
3571 cfilter->n_proto = ETH_P_IP;
3572 if (mask.dst_ip[0] & tcf.dst_ip[0])
3573 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3574 ARRAY_SIZE(tcf.dst_ip));
3575 else if (mask.src_ip[0] & tcf.dst_ip[0])
3576 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3577 ARRAY_SIZE(tcf.dst_ip));
3579 case VIRTCHNL_TCP_V6_FLOW:
3580 cfilter->n_proto = ETH_P_IPV6;
3581 if (mask.dst_ip[3] & tcf.dst_ip[3])
3582 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3583 sizeof(cfilter->ip.v6.dst_ip6));
3584 if (mask.src_ip[3] & tcf.src_ip[3])
3585 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3586 sizeof(cfilter->ip.v6.src_ip6));
3589 /* TC filter can be configured based on different combinations
3590 * and in this case IP is not a part of filter config
3592 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3596 /* get the VSI to which the TC belongs to */
3597 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3598 cfilter->seid = vsi->seid;
3599 cfilter->flags = vcf->field_flags;
3601 /* Adding cloud filter programmed as TC filter */
3603 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3605 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3607 dev_err(&pf->pdev->dev,
3608 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3609 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3610 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3614 INIT_HLIST_NODE(&cfilter->cloud_node);
3615 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3616 /* release the pointer passing it to the collection */
3618 vf->num_cloud_filters++;
3622 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3627 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3628 * @vf: pointer to the VF info
3629 * @msg: pointer to the msg buffer
3631 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3633 struct virtchnl_tc_info *tci =
3634 (struct virtchnl_tc_info *)msg;
3635 struct i40e_pf *pf = vf->pf;
3636 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3637 int i, adq_request_qps = 0;
3638 i40e_status aq_ret = 0;
3641 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3642 aq_ret = I40E_ERR_PARAM;
3646 /* ADq cannot be applied if spoof check is ON */
3648 dev_err(&pf->pdev->dev,
3649 "Spoof check is ON, turn it OFF to enable ADq\n");
3650 aq_ret = I40E_ERR_PARAM;
3654 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3655 dev_err(&pf->pdev->dev,
3656 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3658 aq_ret = I40E_ERR_PARAM;
3662 /* max number of traffic classes for VF currently capped at 4 */
3663 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3664 dev_err(&pf->pdev->dev,
3665 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3666 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3667 aq_ret = I40E_ERR_PARAM;
3671 /* validate queues for each TC */
3672 for (i = 0; i < tci->num_tc; i++)
3673 if (!tci->list[i].count ||
3674 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3675 dev_err(&pf->pdev->dev,
3676 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3677 vf->vf_id, i, tci->list[i].count,
3678 I40E_DEFAULT_QUEUES_PER_VF);
3679 aq_ret = I40E_ERR_PARAM;
3683 /* need Max VF queues but already have default number of queues */
3684 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3686 if (pf->queues_left < adq_request_qps) {
3687 dev_err(&pf->pdev->dev,
3688 "No queues left to allocate to VF %d\n",
3690 aq_ret = I40E_ERR_PARAM;
3693 /* we need to allocate max VF queues to enable ADq so as to
3694 * make sure ADq enabled VF always gets back queues when it
3695 * goes through a reset.
3697 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3700 /* get link speed in MB to validate rate limit */
3701 switch (ls->link_speed) {
3702 case VIRTCHNL_LINK_SPEED_100MB:
3705 case VIRTCHNL_LINK_SPEED_1GB:
3708 case VIRTCHNL_LINK_SPEED_10GB:
3709 speed = SPEED_10000;
3711 case VIRTCHNL_LINK_SPEED_20GB:
3712 speed = SPEED_20000;
3714 case VIRTCHNL_LINK_SPEED_25GB:
3715 speed = SPEED_25000;
3717 case VIRTCHNL_LINK_SPEED_40GB:
3718 speed = SPEED_40000;
3721 dev_err(&pf->pdev->dev,
3722 "Cannot detect link speed\n");
3723 aq_ret = I40E_ERR_PARAM;
3727 /* parse data from the queue channel info */
3728 vf->num_tc = tci->num_tc;
3729 for (i = 0; i < vf->num_tc; i++) {
3730 if (tci->list[i].max_tx_rate) {
3731 if (tci->list[i].max_tx_rate > speed) {
3732 dev_err(&pf->pdev->dev,
3733 "Invalid max tx rate %llu specified for VF %d.",
3734 tci->list[i].max_tx_rate,
3736 aq_ret = I40E_ERR_PARAM;
3739 vf->ch[i].max_tx_rate =
3740 tci->list[i].max_tx_rate;
3743 vf->ch[i].num_qps = tci->list[i].count;
3746 /* set this flag only after making sure all inputs are sane */
3747 vf->adq_enabled = true;
3748 /* num_req_queues is set when user changes number of queues via ethtool
3749 * and this causes issue for default VSI(which depends on this variable)
3750 * when ADq is enabled, hence reset it.
3752 vf->num_req_queues = 0;
3754 /* reset the VF in order to allocate resources */
3755 i40e_vc_notify_vf_reset(vf);
3756 i40e_reset_vf(vf, false);
3758 return I40E_SUCCESS;
3760 /* send the response to the VF */
3762 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3767 * i40e_vc_del_qch_msg
3768 * @vf: pointer to the VF info
3769 * @msg: pointer to the msg buffer
3771 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3773 struct i40e_pf *pf = vf->pf;
3774 i40e_status aq_ret = 0;
3776 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3777 aq_ret = I40E_ERR_PARAM;
3781 if (vf->adq_enabled) {
3782 i40e_del_all_cloud_filters(vf);
3784 vf->adq_enabled = false;
3786 dev_info(&pf->pdev->dev,
3787 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3790 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3792 aq_ret = I40E_ERR_PARAM;
3795 /* reset the VF in order to allocate resources */
3796 i40e_vc_notify_vf_reset(vf);
3797 i40e_reset_vf(vf, false);
3799 return I40E_SUCCESS;
3802 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3807 * i40e_vc_process_vf_msg
3808 * @pf: pointer to the PF structure
3809 * @vf_id: source VF id
3810 * @v_opcode: operation code
3811 * @v_retval: unused return value code
3812 * @msg: pointer to the msg buffer
3813 * @msglen: msg length
3815 * called from the common aeq/arq handler to
3816 * process request from VF
3818 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3819 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3821 struct i40e_hw *hw = &pf->hw;
3822 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3826 pf->vf_aq_requests++;
3827 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3829 vf = &(pf->vf[local_vf_id]);
3831 /* Check if VF is disabled. */
3832 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3833 return I40E_ERR_PARAM;
3835 /* perform basic checks on the msg */
3836 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3839 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3840 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3841 local_vf_id, v_opcode, msglen);
3843 case VIRTCHNL_STATUS_ERR_PARAM:
3851 case VIRTCHNL_OP_VERSION:
3852 ret = i40e_vc_get_version_msg(vf, msg);
3854 case VIRTCHNL_OP_GET_VF_RESOURCES:
3855 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3856 i40e_vc_notify_vf_link_state(vf);
3858 case VIRTCHNL_OP_RESET_VF:
3859 i40e_vc_reset_vf_msg(vf);
3862 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3863 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3865 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3866 ret = i40e_vc_config_queues_msg(vf, msg);
3868 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3869 ret = i40e_vc_config_irq_map_msg(vf, msg);
3871 case VIRTCHNL_OP_ENABLE_QUEUES:
3872 ret = i40e_vc_enable_queues_msg(vf, msg);
3873 i40e_vc_notify_vf_link_state(vf);
3875 case VIRTCHNL_OP_DISABLE_QUEUES:
3876 ret = i40e_vc_disable_queues_msg(vf, msg);
3878 case VIRTCHNL_OP_ADD_ETH_ADDR:
3879 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3881 case VIRTCHNL_OP_DEL_ETH_ADDR:
3882 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3884 case VIRTCHNL_OP_ADD_VLAN:
3885 ret = i40e_vc_add_vlan_msg(vf, msg);
3887 case VIRTCHNL_OP_DEL_VLAN:
3888 ret = i40e_vc_remove_vlan_msg(vf, msg);
3890 case VIRTCHNL_OP_GET_STATS:
3891 ret = i40e_vc_get_stats_msg(vf, msg);
3893 case VIRTCHNL_OP_IWARP:
3894 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3896 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3897 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3899 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3900 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3902 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3903 ret = i40e_vc_config_rss_key(vf, msg);
3905 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3906 ret = i40e_vc_config_rss_lut(vf, msg);
3908 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3909 ret = i40e_vc_get_rss_hena(vf, msg);
3911 case VIRTCHNL_OP_SET_RSS_HENA:
3912 ret = i40e_vc_set_rss_hena(vf, msg);
3914 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3915 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3917 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3918 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3920 case VIRTCHNL_OP_REQUEST_QUEUES:
3921 ret = i40e_vc_request_queues_msg(vf, msg);
3923 case VIRTCHNL_OP_ENABLE_CHANNELS:
3924 ret = i40e_vc_add_qch_msg(vf, msg);
3926 case VIRTCHNL_OP_DISABLE_CHANNELS:
3927 ret = i40e_vc_del_qch_msg(vf, msg);
3929 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3930 ret = i40e_vc_add_cloud_filter(vf, msg);
3932 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3933 ret = i40e_vc_del_cloud_filter(vf, msg);
3935 case VIRTCHNL_OP_UNKNOWN:
3937 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3938 v_opcode, local_vf_id);
3939 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3940 I40E_ERR_NOT_IMPLEMENTED);
3948 * i40e_vc_process_vflr_event
3949 * @pf: pointer to the PF structure
3951 * called from the vlfr irq handler to
3952 * free up VF resources and state variables
3954 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3956 struct i40e_hw *hw = &pf->hw;
3957 u32 reg, reg_idx, bit_idx;
3961 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3964 /* Re-enable the VFLR interrupt cause here, before looking for which
3965 * VF got reset. Otherwise, if another VF gets a reset while the
3966 * first one is being processed, that interrupt will be lost, and
3967 * that VF will be stuck in reset forever.
3969 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3970 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3971 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3974 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3975 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3976 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3977 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3978 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3979 vf = &pf->vf[vf_id];
3980 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3981 if (reg & BIT(bit_idx))
3982 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3983 i40e_reset_vf(vf, true);
3991 * @pf: the physical function
3992 * @vf_id: VF identifier
3994 * Check that the VF is enabled and the VSI exists.
3996 * Returns 0 on success, negative on failure
3998 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4000 struct i40e_vsi *vsi;
4004 if (vf_id >= pf->num_alloc_vfs) {
4005 dev_err(&pf->pdev->dev,
4006 "Invalid VF Identifier %d\n", vf_id);
4010 vf = &pf->vf[vf_id];
4011 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4019 * i40e_ndo_set_vf_mac
4020 * @netdev: network interface device structure
4021 * @vf_id: VF identifier
4024 * program VF mac address
4026 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4028 struct i40e_netdev_priv *np = netdev_priv(netdev);
4029 struct i40e_vsi *vsi = np->vsi;
4030 struct i40e_pf *pf = vsi->back;
4031 struct i40e_mac_filter *f;
4034 struct hlist_node *h;
4038 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4039 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4043 /* validate the request */
4044 ret = i40e_validate_vf(pf, vf_id);
4048 vf = &pf->vf[vf_id];
4049 vsi = pf->vsi[vf->lan_vsi_idx];
4051 /* When the VF is resetting wait until it is done.
4052 * It can take up to 200 milliseconds,
4053 * but wait for up to 300 milliseconds to be safe.
4054 * If the VF is indeed in reset, the vsi pointer has
4055 * to show on the newly loaded vsi under pf->vsi[id].
4057 for (i = 0; i < 15; i++) {
4058 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4060 vsi = pf->vsi[vf->lan_vsi_idx];
4065 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4066 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4072 if (is_multicast_ether_addr(mac)) {
4073 dev_err(&pf->pdev->dev,
4074 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4079 /* Lock once because below invoked function add/del_filter requires
4080 * mac_filter_hash_lock to be held
4082 spin_lock_bh(&vsi->mac_filter_hash_lock);
4084 /* delete the temporary mac address */
4085 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4086 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4088 /* Delete all the filters for this VSI - we're going to kill it
4091 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4092 __i40e_del_filter(vsi, f);
4094 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4096 /* program mac filter */
4097 if (i40e_sync_vsi_filters(vsi)) {
4098 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4102 ether_addr_copy(vf->default_lan_addr.addr, mac);
4104 if (is_zero_ether_addr(mac)) {
4105 vf->pf_set_mac = false;
4106 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4108 vf->pf_set_mac = true;
4109 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4113 /* Force the VF interface down so it has to bring up with new MAC
4116 i40e_vc_disable_vf(vf);
4117 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4120 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4125 * i40e_vsi_has_vlans - True if VSI has configured VLANs
4126 * @vsi: pointer to the vsi
4128 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
4129 * we have no configured VLANs. Do not call while holding the
4130 * mac_filter_hash_lock.
4132 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4136 /* If we have a port VLAN, then the VSI cannot have any VLANs
4137 * configured, as all MAC/VLAN filters will be assigned to the PVID.
4142 /* Since we don't have a PVID, we know that if the device is in VLAN
4143 * mode it must be because of a VLAN filter configured on this VSI.
4145 spin_lock_bh(&vsi->mac_filter_hash_lock);
4146 have_vlans = i40e_is_vsi_in_vlan(vsi);
4147 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4153 * i40e_ndo_set_vf_port_vlan
4154 * @netdev: network interface device structure
4155 * @vf_id: VF identifier
4156 * @vlan_id: mac address
4157 * @qos: priority setting
4158 * @vlan_proto: vlan protocol
4160 * program VF vlan id and/or qos
4162 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4163 u16 vlan_id, u8 qos, __be16 vlan_proto)
4165 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4166 struct i40e_netdev_priv *np = netdev_priv(netdev);
4167 bool allmulti = false, alluni = false;
4168 struct i40e_pf *pf = np->vsi->back;
4169 struct i40e_vsi *vsi;
4173 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4174 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4178 /* validate the request */
4179 ret = i40e_validate_vf(pf, vf_id);
4183 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4184 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4189 if (vlan_proto != htons(ETH_P_8021Q)) {
4190 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4191 ret = -EPROTONOSUPPORT;
4195 vf = &pf->vf[vf_id];
4196 vsi = pf->vsi[vf->lan_vsi_idx];
4197 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4198 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4204 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4205 /* duplicate request, so just return success */
4208 if (i40e_vsi_has_vlans(vsi)) {
4209 dev_err(&pf->pdev->dev,
4210 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4212 /* Administrator Error - knock the VF offline until he does
4213 * the right thing by reconfiguring his network correctly
4214 * and then reloading the VF driver.
4216 i40e_vc_disable_vf(vf);
4217 /* During reset the VF got a new VSI, so refresh the pointer. */
4218 vsi = pf->vsi[vf->lan_vsi_idx];
4221 /* Locked once because multiple functions below iterate list */
4222 spin_lock_bh(&vsi->mac_filter_hash_lock);
4224 /* Check for condition where there was already a port VLAN ID
4225 * filter set and now it is being deleted by setting it to zero.
4226 * Additionally check for the condition where there was a port
4227 * VLAN but now there is a new and different port VLAN being set.
4228 * Before deleting all the old VLAN filters we must add new ones
4229 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4230 * MAC addresses deleted.
4232 if ((!(vlan_id || qos) ||
4233 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4235 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4237 dev_info(&vsi->back->pdev->dev,
4238 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4239 vsi->back->hw.aq.asq_last_status);
4240 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4245 if (vsi->info.pvid) {
4246 /* remove all filters on the old VLAN */
4247 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4251 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4253 /* disable promisc modes in case they were enabled */
4254 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4257 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4262 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4264 i40e_vsi_remove_pvid(vsi);
4265 spin_lock_bh(&vsi->mac_filter_hash_lock);
4268 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4269 vlan_id, qos, vf_id);
4271 /* add new VLAN filter for each MAC */
4272 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4274 dev_info(&vsi->back->pdev->dev,
4275 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4276 vsi->back->hw.aq.asq_last_status);
4277 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4281 /* remove the previously added non-VLAN MAC filters */
4282 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4285 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4287 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4290 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4293 /* Schedule the worker thread to take care of applying changes */
4294 i40e_service_event_schedule(vsi->back);
4297 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4301 /* The Port VLAN needs to be saved across resets the same as the
4302 * default LAN MAC address.
4304 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4306 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4308 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4315 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4320 * i40e_ndo_set_vf_bw
4321 * @netdev: network interface device structure
4322 * @vf_id: VF identifier
4323 * @min_tx_rate: Minimum Tx rate
4324 * @max_tx_rate: Maximum Tx rate
4326 * configure VF Tx rate
4328 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4331 struct i40e_netdev_priv *np = netdev_priv(netdev);
4332 struct i40e_pf *pf = np->vsi->back;
4333 struct i40e_vsi *vsi;
4337 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4338 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4342 /* validate the request */
4343 ret = i40e_validate_vf(pf, vf_id);
4348 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4349 min_tx_rate, vf_id);
4354 vf = &pf->vf[vf_id];
4355 vsi = pf->vsi[vf->lan_vsi_idx];
4356 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4357 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4363 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4367 vf->tx_rate = max_tx_rate;
4369 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4374 * i40e_ndo_get_vf_config
4375 * @netdev: network interface device structure
4376 * @vf_id: VF identifier
4377 * @ivi: VF configuration structure
4379 * return VF configuration
4381 int i40e_ndo_get_vf_config(struct net_device *netdev,
4382 int vf_id, struct ifla_vf_info *ivi)
4384 struct i40e_netdev_priv *np = netdev_priv(netdev);
4385 struct i40e_vsi *vsi = np->vsi;
4386 struct i40e_pf *pf = vsi->back;
4390 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4391 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4395 /* validate the request */
4396 ret = i40e_validate_vf(pf, vf_id);
4400 vf = &pf->vf[vf_id];
4401 /* first vsi is always the LAN vsi */
4402 vsi = pf->vsi[vf->lan_vsi_idx];
4410 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4412 ivi->max_tx_rate = vf->tx_rate;
4413 ivi->min_tx_rate = 0;
4414 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4415 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4416 I40E_VLAN_PRIORITY_SHIFT;
4417 if (vf->link_forced == false)
4418 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4419 else if (vf->link_up == true)
4420 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4422 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4423 ivi->spoofchk = vf->spoofchk;
4424 ivi->trusted = vf->trusted;
4428 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4433 * i40e_ndo_set_vf_link_state
4434 * @netdev: network interface device structure
4435 * @vf_id: VF identifier
4436 * @link: required link state
4438 * Set the link state of a specified VF, regardless of physical link state
4440 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4442 struct i40e_netdev_priv *np = netdev_priv(netdev);
4443 struct i40e_pf *pf = np->vsi->back;
4444 struct i40e_link_status *ls = &pf->hw.phy.link_info;
4445 struct virtchnl_pf_event pfe;
4446 struct i40e_hw *hw = &pf->hw;
4451 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4452 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4456 /* validate the request */
4457 if (vf_id >= pf->num_alloc_vfs) {
4458 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4463 vf = &pf->vf[vf_id];
4464 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4466 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4467 pfe.severity = PF_EVENT_SEVERITY_INFO;
4470 case IFLA_VF_LINK_STATE_AUTO:
4471 vf->link_forced = false;
4472 pfe.event_data.link_event.link_status =
4473 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4474 pfe.event_data.link_event.link_speed =
4475 (enum virtchnl_link_speed)
4476 pf->hw.phy.link_info.link_speed;
4478 case IFLA_VF_LINK_STATE_ENABLE:
4479 vf->link_forced = true;
4481 pfe.event_data.link_event.link_status = true;
4482 pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed);
4484 case IFLA_VF_LINK_STATE_DISABLE:
4485 vf->link_forced = true;
4486 vf->link_up = false;
4487 pfe.event_data.link_event.link_status = false;
4488 pfe.event_data.link_event.link_speed = 0;
4494 /* Notify the VF of its new link state */
4495 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4496 0, (u8 *)&pfe, sizeof(pfe), NULL);
4499 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4504 * i40e_ndo_set_vf_spoofchk
4505 * @netdev: network interface device structure
4506 * @vf_id: VF identifier
4507 * @enable: flag to enable or disable feature
4509 * Enable or disable VF spoof checking
4511 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4513 struct i40e_netdev_priv *np = netdev_priv(netdev);
4514 struct i40e_vsi *vsi = np->vsi;
4515 struct i40e_pf *pf = vsi->back;
4516 struct i40e_vsi_context ctxt;
4517 struct i40e_hw *hw = &pf->hw;
4521 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4522 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4526 /* validate the request */
4527 if (vf_id >= pf->num_alloc_vfs) {
4528 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4533 vf = &(pf->vf[vf_id]);
4534 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4535 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4541 if (enable == vf->spoofchk)
4544 vf->spoofchk = enable;
4545 memset(&ctxt, 0, sizeof(ctxt));
4546 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4547 ctxt.pf_num = pf->hw.pf_id;
4548 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4550 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4551 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4552 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4554 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4559 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4564 * i40e_ndo_set_vf_trust
4565 * @netdev: network interface device structure of the pf
4566 * @vf_id: VF identifier
4567 * @setting: trust setting
4569 * Enable or disable VF trust setting
4571 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4573 struct i40e_netdev_priv *np = netdev_priv(netdev);
4574 struct i40e_pf *pf = np->vsi->back;
4578 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4579 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4583 /* validate the request */
4584 if (vf_id >= pf->num_alloc_vfs) {
4585 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4590 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4591 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4596 vf = &pf->vf[vf_id];
4598 if (setting == vf->trusted)
4601 vf->trusted = setting;
4602 i40e_vc_disable_vf(vf);
4603 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4604 vf_id, setting ? "" : "un");
4606 if (vf->adq_enabled) {
4608 dev_info(&pf->pdev->dev,
4609 "VF %u no longer Trusted, deleting all cloud filters\n",
4611 i40e_del_all_cloud_filters(vf);
4616 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4621 * i40e_get_vf_stats - populate some stats for the VF
4622 * @netdev: the netdev of the PF
4623 * @vf_id: the host OS identifier (0-127)
4624 * @vf_stats: pointer to the OS memory to be initialized
4626 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4627 struct ifla_vf_stats *vf_stats)
4629 struct i40e_netdev_priv *np = netdev_priv(netdev);
4630 struct i40e_pf *pf = np->vsi->back;
4631 struct i40e_eth_stats *stats;
4632 struct i40e_vsi *vsi;
4635 /* validate the request */
4636 if (i40e_validate_vf(pf, vf_id))
4639 vf = &pf->vf[vf_id];
4640 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4641 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4645 vsi = pf->vsi[vf->lan_vsi_idx];
4649 i40e_update_eth_stats(vsi);
4650 stats = &vsi->eth_stats;
4652 memset(vf_stats, 0, sizeof(*vf_stats));
4654 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4655 stats->rx_multicast;
4656 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4657 stats->tx_multicast;
4658 vf_stats->rx_bytes = stats->rx_bytes;
4659 vf_stats->tx_bytes = stats->tx_bytes;
4660 vf_stats->broadcast = stats->rx_broadcast;
4661 vf_stats->multicast = stats->rx_multicast;
4662 vf_stats->rx_dropped = stats->rx_discards;
4663 vf_stats->tx_dropped = stats->tx_discards;