1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
46 * send a link status message to a single VF
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58 if (vf->link_forced) {
59 pfe.event_data.link_event.link_status = vf->link_up;
60 pfe.event_data.link_event.link_speed =
61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
63 pfe.event_data.link_event.link_status =
64 ls->link_info & I40E_AQ_LINK_UP;
65 pfe.event_data.link_event.link_speed =
66 i40e_virtchnl_link_speed(ls->link_speed);
68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
69 0, (u8 *)&pfe, sizeof(pfe), NULL);
73 * i40e_vc_notify_link_state
74 * @pf: pointer to the PF structure
76 * send a link status message to all VFs on a given PF
78 void i40e_vc_notify_link_state(struct i40e_pf *pf)
82 for (i = 0; i < pf->num_alloc_vfs; i++)
83 i40e_vc_notify_vf_link_state(&pf->vf[i]);
87 * i40e_vc_notify_reset
88 * @pf: pointer to the PF structure
90 * indicate a pending reset to all VFs on a given PF
92 void i40e_vc_notify_reset(struct i40e_pf *pf)
94 struct virtchnl_pf_event pfe;
96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
103 * i40e_vc_notify_vf_reset
104 * @vf: pointer to the VF structure
106 * indicate a pending reset to the given VF
108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
110 struct virtchnl_pf_event pfe;
113 /* validate the request */
114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
117 /* verify if the VF is in either init or active before proceeding */
118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
128 sizeof(struct virtchnl_pf_event), NULL);
130 /***********************misc routines*****************************/
134 * @vf: pointer to the VF info
136 * Disable the VF through a SW reset.
138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
142 i40e_vc_notify_vf_reset(vf);
144 /* We want to ensure that an actual reset occurs initiated after this
145 * function was called. However, we do not want to wait forever, so
146 * we'll give a reasonable time and print a message if we failed to
149 for (i = 0; i < 20; i++) {
150 if (i40e_reset_vf(vf, false))
152 usleep_range(10000, 20000);
155 dev_warn(&vf->pf->pdev->dev,
156 "Failed to initiate reset for VF %d after 200 milliseconds\n",
161 * i40e_vc_isvalid_vsi_id
162 * @vf: pointer to the VF info
163 * @vsi_id: VF relative VSI id
165 * check for the valid VSI id
167 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
169 struct i40e_pf *pf = vf->pf;
170 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
172 return (vsi && (vsi->vf_id == vf->vf_id));
176 * i40e_vc_isvalid_queue_id
177 * @vf: pointer to the VF info
179 * @qid: vsi relative queue id
181 * check for the valid queue id
183 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
186 struct i40e_pf *pf = vf->pf;
187 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
189 return (vsi && (qid < vsi->alloc_queue_pairs));
193 * i40e_vc_isvalid_vector_id
194 * @vf: pointer to the VF info
195 * @vector_id: VF relative vector id
197 * check for the valid vector id
199 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
201 struct i40e_pf *pf = vf->pf;
203 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
206 /***********************vf resource mgmt routines*****************/
209 * i40e_vc_get_pf_queue_id
210 * @vf: pointer to the VF info
211 * @vsi_id: id of VSI as provided by the FW
212 * @vsi_queue_id: vsi relative queue id
214 * return PF relative queue id
216 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
219 struct i40e_pf *pf = vf->pf;
220 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
221 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
226 if (le16_to_cpu(vsi->info.mapping_flags) &
227 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
229 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
231 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
238 * i40e_get_real_pf_qid
239 * @vf: pointer to the VF info
241 * @queue_id: queue number
243 * wrapper function to get pf_queue_id handling ADq code as well
245 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
249 if (vf->adq_enabled) {
250 /* Although VF considers all the queues(can be 1 to 16) as its
251 * own but they may actually belong to different VSIs(up to 4).
252 * We need to find which queues belongs to which VSI.
254 for (i = 0; i < vf->num_tc; i++) {
255 if (queue_id < vf->ch[i].num_qps) {
256 vsi_id = vf->ch[i].vsi_id;
259 /* find right queue id which is relative to a
262 queue_id -= vf->ch[i].num_qps;
266 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
270 * i40e_config_irq_link_list
271 * @vf: pointer to the VF info
272 * @vsi_id: id of VSI as given by the FW
273 * @vecmap: irq map info
275 * configure irq link list from the map
277 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
278 struct virtchnl_vector_map *vecmap)
280 unsigned long linklistmap = 0, tempmap;
281 struct i40e_pf *pf = vf->pf;
282 struct i40e_hw *hw = &pf->hw;
283 u16 vsi_queue_id, pf_queue_id;
284 enum i40e_queue_type qtype;
285 u16 next_q, vector_id, size;
289 vector_id = vecmap->vector_id;
292 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
294 reg_idx = I40E_VPINT_LNKLSTN(
295 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
298 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
299 /* Special case - No queues mapped on this vector */
300 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
303 tempmap = vecmap->rxq_map;
304 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
305 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
309 tempmap = vecmap->txq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
315 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
316 next_q = find_first_bit(&linklistmap, size);
317 if (unlikely(next_q == size))
320 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
321 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
323 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
325 wr32(hw, reg_idx, reg);
327 while (next_q < size) {
329 case I40E_QUEUE_TYPE_RX:
330 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
331 itr_idx = vecmap->rxitr_idx;
333 case I40E_QUEUE_TYPE_TX:
334 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
335 itr_idx = vecmap->txitr_idx;
341 next_q = find_next_bit(&linklistmap, size, next_q + 1);
343 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
344 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
345 pf_queue_id = i40e_get_real_pf_qid(vf,
349 pf_queue_id = I40E_QUEUE_END_OF_LIST;
353 /* format for the RQCTL & TQCTL regs is same */
355 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
356 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
357 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
358 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
359 wr32(hw, reg_idx, reg);
362 /* if the vf is running in polling mode and using interrupt zero,
363 * need to disable auto-mask on enabling zero interrupt for VFs.
365 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
367 reg = rd32(hw, I40E_GLINT_CTL);
368 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
369 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
370 wr32(hw, I40E_GLINT_CTL, reg);
379 * i40e_release_iwarp_qvlist
380 * @vf: pointer to the VF.
383 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
385 struct i40e_pf *pf = vf->pf;
386 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
390 if (!vf->qvlist_info)
393 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
394 for (i = 0; i < qvlist_info->num_vectors; i++) {
395 struct virtchnl_iwarp_qv_info *qv_info;
396 u32 next_q_index, next_q_type;
397 struct i40e_hw *hw = &pf->hw;
398 u32 v_idx, reg_idx, reg;
400 qv_info = &qvlist_info->qv_info[i];
403 v_idx = qv_info->v_idx;
404 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
405 /* Figure out the queue after CEQ and make that the
408 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
409 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
410 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
411 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
412 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
413 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
415 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
416 reg = (next_q_index &
417 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
419 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
421 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
424 kfree(vf->qvlist_info);
425 vf->qvlist_info = NULL;
429 * i40e_config_iwarp_qvlist
430 * @vf: pointer to the VF info
431 * @qvlist_info: queue and vector list
433 * Return 0 on success or < 0 on error
435 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
436 struct virtchnl_iwarp_qvlist_info *qvlist_info)
438 struct i40e_pf *pf = vf->pf;
439 struct i40e_hw *hw = &pf->hw;
440 struct virtchnl_iwarp_qv_info *qv_info;
441 u32 v_idx, i, reg_idx, reg;
442 u32 next_q_idx, next_q_type;
446 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
448 if (qvlist_info->num_vectors > msix_vf) {
449 dev_warn(&pf->pdev->dev,
450 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
451 qvlist_info->num_vectors,
457 kfree(vf->qvlist_info);
458 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
459 qvlist_info->num_vectors - 1),
461 if (!vf->qvlist_info) {
465 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
467 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
468 for (i = 0; i < qvlist_info->num_vectors; i++) {
469 qv_info = &qvlist_info->qv_info[i];
473 /* Validate vector id belongs to this vf */
474 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
479 v_idx = qv_info->v_idx;
481 vf->qvlist_info->qv_info[i] = *qv_info;
483 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
484 /* We might be sharing the interrupt, so get the first queue
485 * index and type, push it down the list by adding the new
486 * queue on top. Also link it with the new queue in CEQCTL.
488 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
489 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
490 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
491 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
492 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
494 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
495 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
496 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
497 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
498 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
499 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
500 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
501 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
503 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
504 reg = (qv_info->ceq_idx &
505 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
506 (I40E_QUEUE_TYPE_PE_CEQ <<
507 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
508 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
511 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
512 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
513 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
514 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
516 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
522 kfree(vf->qvlist_info);
523 vf->qvlist_info = NULL;
529 * i40e_config_vsi_tx_queue
530 * @vf: pointer to the VF info
531 * @vsi_id: id of VSI as provided by the FW
532 * @vsi_queue_id: vsi relative queue index
533 * @info: config. info
537 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
539 struct virtchnl_txq_info *info)
541 struct i40e_pf *pf = vf->pf;
542 struct i40e_hw *hw = &pf->hw;
543 struct i40e_hmc_obj_txq tx_ctx;
544 struct i40e_vsi *vsi;
549 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
553 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
554 vsi = i40e_find_vsi_from_id(pf, vsi_id);
560 /* clear the context structure first */
561 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
563 /* only set the required fields */
564 tx_ctx.base = info->dma_ring_addr / 128;
565 tx_ctx.qlen = info->ring_len;
566 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
567 tx_ctx.rdylist_act = 0;
568 tx_ctx.head_wb_ena = info->headwb_enabled;
569 tx_ctx.head_wb_addr = info->dma_headwb_addr;
571 /* clear the context in the HMC */
572 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
574 dev_err(&pf->pdev->dev,
575 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
581 /* set the context in the HMC */
582 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
584 dev_err(&pf->pdev->dev,
585 "Failed to set VF LAN Tx queue context %d error: %d\n",
591 /* associate this queue with the PCI VF function */
592 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
593 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
594 & I40E_QTX_CTL_PF_INDX_MASK);
595 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
596 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
597 & I40E_QTX_CTL_VFVM_INDX_MASK);
598 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
606 * i40e_config_vsi_rx_queue
607 * @vf: pointer to the VF info
608 * @vsi_id: id of VSI as provided by the FW
609 * @vsi_queue_id: vsi relative queue index
610 * @info: config. info
614 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
616 struct virtchnl_rxq_info *info)
618 struct i40e_pf *pf = vf->pf;
619 struct i40e_hw *hw = &pf->hw;
620 struct i40e_hmc_obj_rxq rx_ctx;
624 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
626 /* clear the context structure first */
627 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
629 /* only set the required fields */
630 rx_ctx.base = info->dma_ring_addr / 128;
631 rx_ctx.qlen = info->ring_len;
633 if (info->splithdr_enabled) {
634 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
636 I40E_RX_SPLIT_TCP_UDP |
638 /* header length validation */
639 if (info->hdr_size > ((2 * 1024) - 64)) {
643 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
645 /* set split mode 10b */
646 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
649 /* databuffer length validation */
650 if (info->databuffer_size > ((16 * 1024) - 128)) {
654 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
656 /* max pkt. length validation */
657 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
661 rx_ctx.rxmax = info->max_pkt_size;
663 /* enable 32bytes desc always */
667 rx_ctx.lrxqthresh = 1;
672 /* clear the context in the HMC */
673 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
675 dev_err(&pf->pdev->dev,
676 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
682 /* set the context in the HMC */
683 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
685 dev_err(&pf->pdev->dev,
686 "Failed to set VF LAN Rx queue context %d error: %d\n",
698 * @vf: pointer to the VF info
699 * @idx: VSI index, applies only for ADq mode, zero otherwise
701 * alloc VF vsi context & resources
703 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
705 struct i40e_mac_filter *f = NULL;
706 struct i40e_pf *pf = vf->pf;
707 struct i40e_vsi *vsi;
711 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
715 dev_err(&pf->pdev->dev,
716 "add vsi failed for VF %d, aq_err %d\n",
717 vf->vf_id, pf->hw.aq.asq_last_status);
719 goto error_alloc_vsi_res;
723 u64 hena = i40e_pf_get_default_rss_hena(pf);
724 u8 broadcast[ETH_ALEN];
726 vf->lan_vsi_idx = vsi->idx;
727 vf->lan_vsi_id = vsi->id;
728 /* If the port VLAN has been configured and then the
729 * VF driver was removed then the VSI port VLAN
730 * configuration was destroyed. Check if there is
731 * a port VLAN and restore the VSI configuration if
734 if (vf->port_vlan_id)
735 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
737 spin_lock_bh(&vsi->mac_filter_hash_lock);
738 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
739 f = i40e_add_mac_filter(vsi,
740 vf->default_lan_addr.addr);
742 dev_info(&pf->pdev->dev,
743 "Could not add MAC filter %pM for VF %d\n",
744 vf->default_lan_addr.addr, vf->vf_id);
746 eth_broadcast_addr(broadcast);
747 f = i40e_add_mac_filter(vsi, broadcast);
749 dev_info(&pf->pdev->dev,
750 "Could not allocate VF broadcast filter\n");
751 spin_unlock_bh(&vsi->mac_filter_hash_lock);
752 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
753 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
754 /* program mac filter only for VF VSI */
755 ret = i40e_sync_vsi_filters(vsi);
757 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
760 /* storing VSI index and id for ADq and don't apply the mac filter */
761 if (vf->adq_enabled) {
762 vf->ch[idx].vsi_idx = vsi->idx;
763 vf->ch[idx].vsi_id = vsi->id;
766 /* Set VF bandwidth if specified */
768 max_tx_rate = vf->tx_rate;
769 } else if (vf->ch[idx].max_tx_rate) {
770 max_tx_rate = vf->ch[idx].max_tx_rate;
774 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
775 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
776 max_tx_rate, 0, NULL);
778 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
787 * i40e_map_pf_queues_to_vsi
788 * @vf: pointer to the VF info
790 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
791 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
793 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
795 struct i40e_pf *pf = vf->pf;
796 struct i40e_hw *hw = &pf->hw;
797 u32 reg, num_tc = 1; /* VF has at least one traffic class */
804 for (i = 0; i < num_tc; i++) {
805 if (vf->adq_enabled) {
806 qps = vf->ch[i].num_qps;
807 vsi_id = vf->ch[i].vsi_id;
809 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
810 vsi_id = vf->lan_vsi_id;
813 for (j = 0; j < 7; j++) {
818 u16 qid = i40e_vc_get_pf_queue_id(vf,
822 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
826 i40e_write_rx_ctl(hw,
827 I40E_VSILAN_QTABLE(j, vsi_id),
834 * i40e_map_pf_to_vf_queues
835 * @vf: pointer to the VF info
837 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
838 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
840 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
842 struct i40e_pf *pf = vf->pf;
843 struct i40e_hw *hw = &pf->hw;
844 u32 reg, total_qps = 0;
845 u32 qps, num_tc = 1; /* VF has at least one traffic class */
852 for (i = 0; i < num_tc; i++) {
853 if (vf->adq_enabled) {
854 qps = vf->ch[i].num_qps;
855 vsi_id = vf->ch[i].vsi_id;
857 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
858 vsi_id = vf->lan_vsi_id;
861 for (j = 0; j < qps; j++) {
862 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
864 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
865 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
873 * i40e_enable_vf_mappings
874 * @vf: pointer to the VF info
878 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
880 struct i40e_pf *pf = vf->pf;
881 struct i40e_hw *hw = &pf->hw;
884 /* Tell the hardware we're using noncontiguous mapping. HW requires
885 * that VF queues be mapped using this method, even when they are
886 * contiguous in real life
888 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
889 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
891 /* enable VF vplan_qtable mappings */
892 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
893 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
895 i40e_map_pf_to_vf_queues(vf);
896 i40e_map_pf_queues_to_vsi(vf);
902 * i40e_disable_vf_mappings
903 * @vf: pointer to the VF info
905 * disable VF mappings
907 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
909 struct i40e_pf *pf = vf->pf;
910 struct i40e_hw *hw = &pf->hw;
913 /* disable qp mappings */
914 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
915 for (i = 0; i < I40E_MAX_VSI_QP; i++)
916 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
917 I40E_QUEUE_END_OF_LIST);
923 * @vf: pointer to the VF info
927 static void i40e_free_vf_res(struct i40e_vf *vf)
929 struct i40e_pf *pf = vf->pf;
930 struct i40e_hw *hw = &pf->hw;
934 /* Start by disabling VF's configuration API to prevent the OS from
935 * accessing the VF's VSI after it's freed / invalidated.
937 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
939 /* It's possible the VF had requeuested more queues than the default so
940 * do the accounting here when we're about to free them.
942 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
943 pf->queues_left += vf->num_queue_pairs -
944 I40E_DEFAULT_QUEUES_PER_VF;
947 /* free vsi & disconnect it from the parent uplink */
948 if (vf->lan_vsi_idx) {
949 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
955 /* do the accounting and remove additional ADq VSI's */
956 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
957 for (j = 0; j < vf->num_tc; j++) {
958 /* At this point VSI0 is already released so don't
959 * release it again and only clear their values in
960 * structure variables
963 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
964 vf->ch[j].vsi_idx = 0;
965 vf->ch[j].vsi_id = 0;
968 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
970 /* disable interrupts so the VF starts in a known state */
971 for (i = 0; i < msix_vf; i++) {
972 /* format is same for both registers */
974 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
976 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
979 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
983 /* clear the irq settings */
984 for (i = 0; i < msix_vf; i++) {
985 /* format is same for both registers */
987 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
989 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
992 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
993 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
994 wr32(hw, reg_idx, reg);
997 /* reset some of the state variables keeping track of the resources */
998 vf->num_queue_pairs = 0;
999 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1000 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1005 * @vf: pointer to the VF info
1007 * allocate VF resources
1009 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1011 struct i40e_pf *pf = vf->pf;
1012 int total_queue_pairs = 0;
1015 if (vf->num_req_queues &&
1016 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1017 pf->num_vf_qps = vf->num_req_queues;
1019 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1021 /* allocate hw vsi context & associated resources */
1022 ret = i40e_alloc_vsi_res(vf, 0);
1025 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1027 /* allocate additional VSIs based on tc information for ADq */
1028 if (vf->adq_enabled) {
1029 if (pf->queues_left >=
1030 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1031 /* TC 0 always belongs to VF VSI */
1032 for (idx = 1; idx < vf->num_tc; idx++) {
1033 ret = i40e_alloc_vsi_res(vf, idx);
1037 /* send correct number of queues */
1038 total_queue_pairs = I40E_MAX_VF_QUEUES;
1040 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1042 vf->adq_enabled = false;
1046 /* We account for each VF to get a default number of queue pairs. If
1047 * the VF has now requested more, we need to account for that to make
1048 * certain we never request more queues than we actually have left in
1051 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1053 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1056 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1058 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1060 /* store the total qps number for the runtime
1063 vf->num_queue_pairs = total_queue_pairs;
1065 /* VF is now completely initialized */
1066 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1070 i40e_free_vf_res(vf);
1075 #define VF_DEVICE_STATUS 0xAA
1076 #define VF_TRANS_PENDING_MASK 0x20
1078 * i40e_quiesce_vf_pci
1079 * @vf: pointer to the VF structure
1081 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1082 * if the transactions never clear.
1084 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1086 struct i40e_pf *pf = vf->pf;
1087 struct i40e_hw *hw = &pf->hw;
1091 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1093 wr32(hw, I40E_PF_PCI_CIAA,
1094 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1095 for (i = 0; i < 100; i++) {
1096 reg = rd32(hw, I40E_PF_PCI_CIAD);
1097 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1104 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
1107 * i40e_config_vf_promiscuous_mode
1108 * @vf: pointer to the VF info
1110 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1111 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1113 * Called from the VF to configure the promiscuous mode of
1114 * VF vsis and from the VF reset path to reset promiscuous mode.
1116 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1121 struct i40e_pf *pf = vf->pf;
1122 struct i40e_hw *hw = &pf->hw;
1123 struct i40e_mac_filter *f;
1124 i40e_status aq_ret = 0;
1125 struct i40e_vsi *vsi;
1128 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1129 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1130 return I40E_ERR_PARAM;
1132 if (vf->port_vlan_id) {
1133 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1138 int aq_err = pf->hw.aq.asq_last_status;
1140 dev_err(&pf->pdev->dev,
1141 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1143 i40e_stat_str(&pf->hw, aq_ret),
1144 i40e_aq_str(&pf->hw, aq_err));
1148 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1153 int aq_err = pf->hw.aq.asq_last_status;
1155 dev_err(&pf->pdev->dev,
1156 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1158 i40e_stat_str(&pf->hw, aq_ret),
1159 i40e_aq_str(&pf->hw, aq_err));
1162 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1163 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1164 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1166 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1172 int aq_err = pf->hw.aq.asq_last_status;
1174 dev_err(&pf->pdev->dev,
1175 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1177 i40e_stat_str(&pf->hw, aq_ret),
1178 i40e_aq_str(&pf->hw, aq_err));
1181 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1187 int aq_err = pf->hw.aq.asq_last_status;
1189 dev_err(&pf->pdev->dev,
1190 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1192 i40e_stat_str(&pf->hw, aq_ret),
1193 i40e_aq_str(&pf->hw, aq_err));
1198 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
1201 int aq_err = pf->hw.aq.asq_last_status;
1203 dev_err(&pf->pdev->dev,
1204 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1206 i40e_stat_str(&pf->hw, aq_ret),
1207 i40e_aq_str(&pf->hw, aq_err));
1211 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
1214 int aq_err = pf->hw.aq.asq_last_status;
1216 dev_err(&pf->pdev->dev,
1217 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1219 i40e_stat_str(&pf->hw, aq_ret),
1220 i40e_aq_str(&pf->hw, aq_err));
1227 * i40e_trigger_vf_reset
1228 * @vf: pointer to the VF structure
1229 * @flr: VFLR was issued or not
1231 * Trigger hardware to start a reset for a particular VF. Expects the caller
1232 * to wait the proper amount of time to allow hardware to reset the VF before
1233 * it cleans up and restores VF functionality.
1235 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1237 struct i40e_pf *pf = vf->pf;
1238 struct i40e_hw *hw = &pf->hw;
1239 u32 reg, reg_idx, bit_idx;
1242 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1244 /* Disable VF's configuration API during reset. The flag is re-enabled
1245 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1246 * It's normally disabled in i40e_free_vf_res(), but it's safer
1247 * to do it earlier to give some time to finish to any VF config
1248 * functions that may still be running at this point.
1250 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1252 /* In the case of a VFLR, the HW has already reset the VF and we
1253 * just need to clean up, so don't hit the VFRTRIG register.
1256 /* reset VF using VPGEN_VFRTRIG reg */
1257 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1258 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1259 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1262 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1263 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1264 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1265 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1268 if (i40e_quiesce_vf_pci(vf))
1269 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1274 * i40e_cleanup_reset_vf
1275 * @vf: pointer to the VF structure
1277 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1278 * have verified whether the reset is finished properly, and ensure the
1279 * minimum amount of wait time has passed.
1281 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1283 struct i40e_pf *pf = vf->pf;
1284 struct i40e_hw *hw = &pf->hw;
1287 /* disable promisc modes in case they were enabled */
1288 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1290 /* free VF resources to begin resetting the VSI state */
1291 i40e_free_vf_res(vf);
1293 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1294 * By doing this we allow HW to access VF memory at any point. If we
1295 * did it any sooner, HW could access memory while it was being freed
1296 * in i40e_free_vf_res(), causing an IOMMU fault.
1298 * On the other hand, this needs to be done ASAP, because the VF driver
1299 * is waiting for this to happen and may report a timeout. It's
1300 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1303 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1304 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1305 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1307 /* reallocate VF resources to finish resetting the VSI state */
1308 if (!i40e_alloc_vf_res(vf)) {
1309 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1310 i40e_enable_vf_mappings(vf);
1311 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1312 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1313 /* Do not notify the client during VF init */
1314 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1316 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1320 /* Tell the VF driver the reset is done. This needs to be done only
1321 * after VF has been fully initialized, because the VF driver may
1322 * request resources immediately after setting this flag.
1324 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1329 * @vf: pointer to the VF structure
1330 * @flr: VFLR was issued or not
1332 * Returns true if the VF is reset, false otherwise.
1334 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1336 struct i40e_pf *pf = vf->pf;
1337 struct i40e_hw *hw = &pf->hw;
1342 /* If the VFs have been disabled, this means something else is
1343 * resetting the VF, so we shouldn't continue.
1345 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1348 i40e_trigger_vf_reset(vf, flr);
1350 /* poll VPGEN_VFRSTAT reg to make sure
1351 * that reset is complete
1353 for (i = 0; i < 10; i++) {
1354 /* VF reset requires driver to first reset the VF and then
1355 * poll the status register to make sure that the reset
1356 * completed successfully. Due to internal HW FIFO flushes,
1357 * we must wait 10ms before the register will be valid.
1359 usleep_range(10000, 20000);
1360 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1361 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1368 usleep_range(10000, 20000);
1371 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1373 usleep_range(10000, 20000);
1375 /* On initial reset, we don't have any queues to disable */
1376 if (vf->lan_vsi_idx != 0)
1377 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1379 i40e_cleanup_reset_vf(vf);
1382 clear_bit(__I40E_VF_DISABLE, pf->state);
1388 * i40e_reset_all_vfs
1389 * @pf: pointer to the PF structure
1390 * @flr: VFLR was issued or not
1392 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1393 * VF, then do all the waiting in one chunk, and finally finish restoring each
1394 * VF after the wait. This is useful during PF routines which need to reset
1395 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1397 * Returns true if any VFs were reset, and false otherwise.
1399 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1401 struct i40e_hw *hw = &pf->hw;
1406 /* If we don't have any VFs, then there is nothing to reset */
1407 if (!pf->num_alloc_vfs)
1410 /* If VFs have been disabled, there is no need to reset */
1411 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1414 /* Begin reset on all VFs at once */
1415 for (v = 0; v < pf->num_alloc_vfs; v++)
1416 i40e_trigger_vf_reset(&pf->vf[v], flr);
1418 /* HW requires some time to make sure it can flush the FIFO for a VF
1419 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1420 * sequence to make sure that it has completed. We'll keep track of
1421 * the VFs using a simple iterator that increments once that VF has
1422 * finished resetting.
1424 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1425 usleep_range(10000, 20000);
1427 /* Check each VF in sequence, beginning with the VF to fail
1428 * the previous check.
1430 while (v < pf->num_alloc_vfs) {
1432 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1433 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1436 /* If the current VF has finished resetting, move on
1437 * to the next VF in sequence.
1444 usleep_range(10000, 20000);
1446 /* Display a warning if at least one VF didn't manage to reset in
1447 * time, but continue on with the operation.
1449 if (v < pf->num_alloc_vfs)
1450 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1452 usleep_range(10000, 20000);
1454 /* Begin disabling all the rings associated with VFs, but do not wait
1457 for (v = 0; v < pf->num_alloc_vfs; v++) {
1458 /* On initial reset, we don't have any queues to disable */
1459 if (pf->vf[v].lan_vsi_idx == 0)
1462 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1465 /* Now that we've notified HW to disable all of the VF rings, wait
1466 * until they finish.
1468 for (v = 0; v < pf->num_alloc_vfs; v++) {
1469 /* On initial reset, we don't have any queues to disable */
1470 if (pf->vf[v].lan_vsi_idx == 0)
1473 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1476 /* Hw may need up to 50ms to finish disabling the RX queues. We
1477 * minimize the wait by delaying only once for all VFs.
1481 /* Finish the reset on each VF */
1482 for (v = 0; v < pf->num_alloc_vfs; v++)
1483 i40e_cleanup_reset_vf(&pf->vf[v]);
1486 clear_bit(__I40E_VF_DISABLE, pf->state);
1493 * @pf: pointer to the PF structure
1497 void i40e_free_vfs(struct i40e_pf *pf)
1499 struct i40e_hw *hw = &pf->hw;
1500 u32 reg_idx, bit_idx;
1505 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1506 usleep_range(1000, 2000);
1508 i40e_notify_client_of_vf_enable(pf, 0);
1510 /* Amortize wait time by stopping all VFs at the same time */
1511 for (i = 0; i < pf->num_alloc_vfs; i++) {
1512 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1515 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1518 for (i = 0; i < pf->num_alloc_vfs; i++) {
1519 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1522 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1525 /* Disable IOV before freeing resources. This lets any VF drivers
1526 * running in the host get themselves cleaned up before we yank
1527 * the carpet out from underneath their feet.
1529 if (!pci_vfs_assigned(pf->pdev))
1530 pci_disable_sriov(pf->pdev);
1532 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1534 /* free up VF resources */
1535 tmp = pf->num_alloc_vfs;
1536 pf->num_alloc_vfs = 0;
1537 for (i = 0; i < tmp; i++) {
1538 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1539 i40e_free_vf_res(&pf->vf[i]);
1540 /* disable qp mappings */
1541 i40e_disable_vf_mappings(&pf->vf[i]);
1547 /* This check is for when the driver is unloaded while VFs are
1548 * assigned. Setting the number of VFs to 0 through sysfs is caught
1549 * before this function ever gets called.
1551 if (!pci_vfs_assigned(pf->pdev)) {
1552 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1553 * work correctly when SR-IOV gets re-enabled.
1555 for (vf_id = 0; vf_id < tmp; vf_id++) {
1556 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1557 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1558 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1561 clear_bit(__I40E_VF_DISABLE, pf->state);
1564 #ifdef CONFIG_PCI_IOV
1567 * @pf: pointer to the PF structure
1568 * @num_alloc_vfs: number of VFs to allocate
1570 * allocate VF resources
1572 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1574 struct i40e_vf *vfs;
1577 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1578 i40e_irq_dynamic_disable_icr0(pf);
1580 /* Check to see if we're just allocating resources for extant VFs */
1581 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1582 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1584 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1585 pf->num_alloc_vfs = 0;
1589 /* allocate memory */
1590 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1597 /* apply default profile */
1598 for (i = 0; i < num_alloc_vfs; i++) {
1600 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1603 /* assign default capabilities */
1604 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1605 vfs[i].spoofchk = true;
1607 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1610 pf->num_alloc_vfs = num_alloc_vfs;
1612 /* VF resources get allocated during reset */
1613 i40e_reset_all_vfs(pf, false);
1615 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1621 /* Re-enable interrupt 0. */
1622 i40e_irq_dynamic_enable_icr0(pf);
1628 * i40e_pci_sriov_enable
1629 * @pdev: pointer to a pci_dev structure
1630 * @num_vfs: number of VFs to allocate
1632 * Enable or change the number of VFs
1634 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1636 #ifdef CONFIG_PCI_IOV
1637 struct i40e_pf *pf = pci_get_drvdata(pdev);
1638 int pre_existing_vfs = pci_num_vf(pdev);
1641 if (test_bit(__I40E_TESTING, pf->state)) {
1642 dev_warn(&pdev->dev,
1643 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1648 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1650 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1653 if (num_vfs > pf->num_req_vfs) {
1654 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1655 num_vfs, pf->num_req_vfs);
1660 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1661 err = i40e_alloc_vfs(pf, num_vfs);
1663 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1677 * i40e_pci_sriov_configure
1678 * @pdev: pointer to a pci_dev structure
1679 * @num_vfs: number of VFs to allocate
1681 * Enable or change the number of VFs. Called when the user updates the number
1684 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1686 struct i40e_pf *pf = pci_get_drvdata(pdev);
1689 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1690 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1695 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1696 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1697 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1699 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1700 goto sriov_configure_out;
1703 if (!pci_vfs_assigned(pf->pdev)) {
1705 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1706 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
1708 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1710 goto sriov_configure_out;
1712 sriov_configure_out:
1713 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1717 /***********************virtual channel routines******************/
1720 * i40e_vc_send_msg_to_vf
1721 * @vf: pointer to the VF info
1722 * @v_opcode: virtual channel opcode
1723 * @v_retval: virtual channel return value
1724 * @msg: pointer to the msg buffer
1725 * @msglen: msg length
1729 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1730 u32 v_retval, u8 *msg, u16 msglen)
1737 /* validate the request */
1738 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1743 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1745 /* single place to detect unsuccessful return values */
1747 vf->num_invalid_msgs++;
1748 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1749 vf->vf_id, v_opcode, v_retval);
1750 if (vf->num_invalid_msgs >
1751 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1752 dev_err(&pf->pdev->dev,
1753 "Number of invalid messages exceeded for VF %d\n",
1755 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1756 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1759 vf->num_valid_msgs++;
1760 /* reset the invalid counter, if a valid message is received. */
1761 vf->num_invalid_msgs = 0;
1764 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1767 dev_info(&pf->pdev->dev,
1768 "Unable to send the message to VF %d aq_err %d\n",
1769 vf->vf_id, pf->hw.aq.asq_last_status);
1777 * i40e_vc_send_resp_to_vf
1778 * @vf: pointer to the VF info
1779 * @opcode: operation code
1780 * @retval: return value
1782 * send resp msg to VF
1784 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1785 enum virtchnl_ops opcode,
1788 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1792 * i40e_vc_get_version_msg
1793 * @vf: pointer to the VF info
1794 * @msg: pointer to the msg buffer
1796 * called from the VF to request the API version used by the PF
1798 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1800 struct virtchnl_version_info info = {
1801 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1804 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1805 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1806 if (VF_IS_V10(&vf->vf_ver))
1807 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1808 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1809 I40E_SUCCESS, (u8 *)&info,
1810 sizeof(struct virtchnl_version_info));
1814 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1815 * @vf: pointer to VF structure
1817 static void i40e_del_qch(struct i40e_vf *vf)
1819 struct i40e_pf *pf = vf->pf;
1822 /* first element in the array belongs to primary VF VSI and we shouldn't
1823 * delete it. We should however delete the rest of the VSIs created
1825 for (i = 1; i < vf->num_tc; i++) {
1826 if (vf->ch[i].vsi_idx) {
1827 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1828 vf->ch[i].vsi_idx = 0;
1829 vf->ch[i].vsi_id = 0;
1835 * i40e_vc_get_vf_resources_msg
1836 * @vf: pointer to the VF info
1837 * @msg: pointer to the msg buffer
1839 * called from the VF to request its resources
1841 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1843 struct virtchnl_vf_resource *vfres = NULL;
1844 struct i40e_pf *pf = vf->pf;
1845 i40e_status aq_ret = 0;
1846 struct i40e_vsi *vsi;
1851 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1852 aq_ret = I40E_ERR_PARAM;
1856 len = struct_size(vfres, vsi_res, num_vsis);
1857 vfres = kzalloc(len, GFP_KERNEL);
1859 aq_ret = I40E_ERR_NO_MEMORY;
1863 if (VF_IS_V11(&vf->vf_ver))
1864 vf->driver_caps = *(u32 *)msg;
1866 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1867 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1868 VIRTCHNL_VF_OFFLOAD_VLAN;
1870 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1871 vsi = pf->vsi[vf->lan_vsi_idx];
1872 if (!vsi->info.pvid)
1873 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1875 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1876 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1877 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1878 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1880 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1883 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1884 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1886 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1887 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1888 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1890 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1893 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1894 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1895 vfres->vf_cap_flags |=
1896 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1899 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1900 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1902 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1903 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1904 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1906 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1907 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1908 dev_err(&pf->pdev->dev,
1909 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1911 aq_ret = I40E_ERR_PARAM;
1914 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1917 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1918 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1919 vfres->vf_cap_flags |=
1920 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1923 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1924 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1926 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1927 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1929 vfres->num_vsis = num_vsis;
1930 vfres->num_queue_pairs = vf->num_queue_pairs;
1931 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1932 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1933 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1935 if (vf->lan_vsi_idx) {
1936 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1937 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1938 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1939 /* VFs only use TC 0 */
1940 vfres->vsi_res[0].qset_handle
1941 = le16_to_cpu(vsi->info.qs_handle[0]);
1942 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1943 vf->default_lan_addr.addr);
1945 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1948 /* send the response back to the VF */
1949 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1950 aq_ret, (u8 *)vfres, len);
1957 * i40e_vc_reset_vf_msg
1958 * @vf: pointer to the VF info
1960 * called from the VF to reset itself,
1961 * unlike other virtchnl messages, PF driver
1962 * doesn't send the response back to the VF
1964 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1966 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1967 i40e_reset_vf(vf, false);
1971 * i40e_getnum_vf_vsi_vlan_filters
1972 * @vsi: pointer to the vsi
1974 * called to get the number of VLANs offloaded on this VF
1976 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1978 struct i40e_mac_filter *f;
1979 int num_vlans = 0, bkt;
1981 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1982 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1990 * i40e_vc_config_promiscuous_mode_msg
1991 * @vf: pointer to the VF info
1992 * @msg: pointer to the msg buffer
1994 * called from the VF to configure the promiscuous mode of
1997 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
1999 struct virtchnl_promisc_info *info =
2000 (struct virtchnl_promisc_info *)msg;
2001 struct i40e_pf *pf = vf->pf;
2002 i40e_status aq_ret = 0;
2003 bool allmulti = false;
2004 bool alluni = false;
2006 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2007 aq_ret = I40E_ERR_PARAM;
2010 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2011 dev_err(&pf->pdev->dev,
2012 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2015 /* Lie to the VF on purpose, because this is an error we can
2016 * ignore. Unprivileged VF is not a virtual channel error.
2022 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2023 aq_ret = I40E_ERR_PARAM;
2027 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2028 aq_ret = I40E_ERR_PARAM;
2032 /* Multicast promiscuous handling*/
2033 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2036 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2038 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2042 dev_info(&pf->pdev->dev,
2043 "VF %d successfully set multicast promiscuous mode\n",
2045 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
2047 dev_info(&pf->pdev->dev,
2048 "VF %d successfully unset multicast promiscuous mode\n",
2050 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
2053 dev_info(&pf->pdev->dev,
2054 "VF %d successfully set unicast promiscuous mode\n",
2056 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2058 dev_info(&pf->pdev->dev,
2059 "VF %d successfully unset unicast promiscuous mode\n",
2061 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2065 /* send the response to the VF */
2066 return i40e_vc_send_resp_to_vf(vf,
2067 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2072 * i40e_vc_config_queues_msg
2073 * @vf: pointer to the VF info
2074 * @msg: pointer to the msg buffer
2076 * called from the VF to configure the rx/tx
2079 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2081 struct virtchnl_vsi_queue_config_info *qci =
2082 (struct virtchnl_vsi_queue_config_info *)msg;
2083 struct virtchnl_queue_pair_info *qpi;
2084 struct i40e_pf *pf = vf->pf;
2085 u16 vsi_id, vsi_queue_id = 0;
2086 u16 num_qps_all = 0;
2087 i40e_status aq_ret = 0;
2088 int i, j = 0, idx = 0;
2090 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2091 aq_ret = I40E_ERR_PARAM;
2095 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2096 aq_ret = I40E_ERR_PARAM;
2100 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2101 aq_ret = I40E_ERR_PARAM;
2105 if (vf->adq_enabled) {
2106 for (i = 0; i < I40E_MAX_VF_VSI; i++)
2107 num_qps_all += vf->ch[i].num_qps;
2108 if (num_qps_all != qci->num_queue_pairs) {
2109 aq_ret = I40E_ERR_PARAM;
2114 vsi_id = qci->vsi_id;
2116 for (i = 0; i < qci->num_queue_pairs; i++) {
2117 qpi = &qci->qpair[i];
2119 if (!vf->adq_enabled) {
2120 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2121 qpi->txq.queue_id)) {
2122 aq_ret = I40E_ERR_PARAM;
2126 vsi_queue_id = qpi->txq.queue_id;
2128 if (qpi->txq.vsi_id != qci->vsi_id ||
2129 qpi->rxq.vsi_id != qci->vsi_id ||
2130 qpi->rxq.queue_id != vsi_queue_id) {
2131 aq_ret = I40E_ERR_PARAM;
2136 if (vf->adq_enabled) {
2137 if (idx >= ARRAY_SIZE(vf->ch)) {
2138 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2141 vsi_id = vf->ch[idx].vsi_id;
2144 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2146 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2148 aq_ret = I40E_ERR_PARAM;
2152 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2153 * VF does not know about these additional VSIs and all
2154 * it cares is about its own queues. PF configures these queues
2155 * to its appropriate VSIs based on TC mapping
2157 if (vf->adq_enabled) {
2158 if (idx >= ARRAY_SIZE(vf->ch)) {
2159 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2162 if (j == (vf->ch[idx].num_qps - 1)) {
2164 j = 0; /* resetting the queue count */
2172 /* set vsi num_queue_pairs in use to num configured by VF */
2173 if (!vf->adq_enabled) {
2174 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2175 qci->num_queue_pairs;
2177 for (i = 0; i < vf->num_tc; i++)
2178 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2183 /* send the response to the VF */
2184 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2189 * i40e_validate_queue_map
2191 * @queuemap: Tx or Rx queue map
2193 * check if Tx or Rx queue map is valid
2195 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2196 unsigned long queuemap)
2198 u16 vsi_queue_id, queue_id;
2200 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2201 if (vf->adq_enabled) {
2202 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2203 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2205 queue_id = vsi_queue_id;
2208 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2216 * i40e_vc_config_irq_map_msg
2217 * @vf: pointer to the VF info
2218 * @msg: pointer to the msg buffer
2220 * called from the VF to configure the irq to
2223 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2225 struct virtchnl_irq_map_info *irqmap_info =
2226 (struct virtchnl_irq_map_info *)msg;
2227 struct virtchnl_vector_map *map;
2229 i40e_status aq_ret = 0;
2232 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2233 aq_ret = I40E_ERR_PARAM;
2237 if (irqmap_info->num_vectors >
2238 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2239 aq_ret = I40E_ERR_PARAM;
2243 for (i = 0; i < irqmap_info->num_vectors; i++) {
2244 map = &irqmap_info->vecmap[i];
2245 /* validate msg params */
2246 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2247 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2248 aq_ret = I40E_ERR_PARAM;
2251 vsi_id = map->vsi_id;
2253 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2254 aq_ret = I40E_ERR_PARAM;
2258 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2259 aq_ret = I40E_ERR_PARAM;
2263 i40e_config_irq_link_list(vf, vsi_id, map);
2266 /* send the response to the VF */
2267 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2272 * i40e_ctrl_vf_tx_rings
2273 * @vsi: the SRIOV VSI being configured
2274 * @q_map: bit map of the queues to be enabled
2275 * @enable: start or stop the queue
2277 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2280 struct i40e_pf *pf = vsi->back;
2284 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2285 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2286 vsi->base_queue + q_id,
2287 false /*is xdp*/, enable);
2295 * i40e_ctrl_vf_rx_rings
2296 * @vsi: the SRIOV VSI being configured
2297 * @q_map: bit map of the queues to be enabled
2298 * @enable: start or stop the queue
2300 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2303 struct i40e_pf *pf = vsi->back;
2307 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2308 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2317 * i40e_vc_enable_queues_msg
2318 * @vf: pointer to the VF info
2319 * @msg: pointer to the msg buffer
2321 * called from the VF to enable all or specific queue(s)
2323 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2325 struct virtchnl_queue_select *vqs =
2326 (struct virtchnl_queue_select *)msg;
2327 struct i40e_pf *pf = vf->pf;
2328 i40e_status aq_ret = 0;
2331 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2332 aq_ret = I40E_ERR_PARAM;
2336 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2337 aq_ret = I40E_ERR_PARAM;
2341 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2342 aq_ret = I40E_ERR_PARAM;
2346 /* Use the queue bit map sent by the VF */
2347 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2349 aq_ret = I40E_ERR_TIMEOUT;
2352 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2354 aq_ret = I40E_ERR_TIMEOUT;
2358 /* need to start the rings for additional ADq VSI's as well */
2359 if (vf->adq_enabled) {
2360 /* zero belongs to LAN VSI */
2361 for (i = 1; i < vf->num_tc; i++) {
2362 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2363 aq_ret = I40E_ERR_TIMEOUT;
2368 /* send the response to the VF */
2369 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2374 * i40e_vc_disable_queues_msg
2375 * @vf: pointer to the VF info
2376 * @msg: pointer to the msg buffer
2378 * called from the VF to disable all or specific
2381 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2383 struct virtchnl_queue_select *vqs =
2384 (struct virtchnl_queue_select *)msg;
2385 struct i40e_pf *pf = vf->pf;
2386 i40e_status aq_ret = 0;
2388 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2389 aq_ret = I40E_ERR_PARAM;
2393 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2394 aq_ret = I40E_ERR_PARAM;
2398 if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
2399 vqs->rx_queues > I40E_MAX_VF_QUEUES ||
2400 vqs->tx_queues > I40E_MAX_VF_QUEUES) {
2401 aq_ret = I40E_ERR_PARAM;
2405 /* Use the queue bit map sent by the VF */
2406 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2408 aq_ret = I40E_ERR_TIMEOUT;
2411 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2413 aq_ret = I40E_ERR_TIMEOUT;
2417 /* send the response to the VF */
2418 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2423 * i40e_vc_request_queues_msg
2424 * @vf: pointer to the VF info
2425 * @msg: pointer to the msg buffer
2427 * VFs get a default number of queues but can use this message to request a
2428 * different number. If the request is successful, PF will reset the VF and
2429 * return 0. If unsuccessful, PF will send message informing VF of number of
2430 * available queues and return result of sending VF a message.
2432 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2434 struct virtchnl_vf_res_request *vfres =
2435 (struct virtchnl_vf_res_request *)msg;
2436 u16 req_pairs = vfres->num_queue_pairs;
2437 u8 cur_pairs = vf->num_queue_pairs;
2438 struct i40e_pf *pf = vf->pf;
2440 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2443 if (req_pairs > I40E_MAX_VF_QUEUES) {
2444 dev_err(&pf->pdev->dev,
2445 "VF %d tried to request more than %d queues.\n",
2447 I40E_MAX_VF_QUEUES);
2448 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2449 } else if (req_pairs - cur_pairs > pf->queues_left) {
2450 dev_warn(&pf->pdev->dev,
2451 "VF %d requested %d more queues, but only %d left.\n",
2453 req_pairs - cur_pairs,
2455 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2457 /* successful request */
2458 vf->num_req_queues = req_pairs;
2459 i40e_vc_notify_vf_reset(vf);
2460 i40e_reset_vf(vf, false);
2464 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2465 (u8 *)vfres, sizeof(*vfres));
2469 * i40e_vc_get_stats_msg
2470 * @vf: pointer to the VF info
2471 * @msg: pointer to the msg buffer
2473 * called from the VF to get vsi stats
2475 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2477 struct virtchnl_queue_select *vqs =
2478 (struct virtchnl_queue_select *)msg;
2479 struct i40e_pf *pf = vf->pf;
2480 struct i40e_eth_stats stats;
2481 i40e_status aq_ret = 0;
2482 struct i40e_vsi *vsi;
2484 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2486 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2487 aq_ret = I40E_ERR_PARAM;
2491 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2492 aq_ret = I40E_ERR_PARAM;
2496 vsi = pf->vsi[vf->lan_vsi_idx];
2498 aq_ret = I40E_ERR_PARAM;
2501 i40e_update_eth_stats(vsi);
2502 stats = vsi->eth_stats;
2505 /* send the response back to the VF */
2506 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2507 (u8 *)&stats, sizeof(stats));
2510 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2511 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2513 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2514 #define I40E_VC_MAX_VLAN_PER_VF 16
2517 * i40e_check_vf_permission
2518 * @vf: pointer to the VF info
2519 * @al: MAC address list from virtchnl
2521 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2522 * if any address in the list is not valid. Checks the following conditions:
2524 * 1) broadcast and zero addresses are never valid
2525 * 2) unicast addresses are not allowed if the VMM has administratively set
2526 * the VF MAC address, unless the VF is marked as privileged.
2527 * 3) There is enough space to add all the addresses.
2529 * Note that to guarantee consistency, it is expected this function be called
2530 * while holding the mac_filter_hash_lock, as otherwise the current number of
2531 * addresses might not be accurate.
2533 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2534 struct virtchnl_ether_addr_list *al)
2536 struct i40e_pf *pf = vf->pf;
2539 /* If this VF is not privileged, then we can't add more than a limited
2540 * number of addresses. Check to make sure that the additions do not
2541 * push us over the limit.
2543 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2544 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2545 dev_err(&pf->pdev->dev,
2546 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2550 for (i = 0; i < al->num_elements; i++) {
2551 u8 *addr = al->list[i].addr;
2553 if (is_broadcast_ether_addr(addr) ||
2554 is_zero_ether_addr(addr)) {
2555 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2557 return I40E_ERR_INVALID_MAC_ADDR;
2560 /* If the host VMM administrator has set the VF MAC address
2561 * administratively via the ndo_set_vf_mac command then deny
2562 * permission to the VF to add or delete unicast MAC addresses.
2563 * Unless the VF is privileged and then it can do whatever.
2564 * The VF may request to set the MAC address filter already
2565 * assigned to it so do not return an error in that case.
2567 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2568 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2569 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2570 dev_err(&pf->pdev->dev,
2571 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2580 * i40e_vc_add_mac_addr_msg
2581 * @vf: pointer to the VF info
2582 * @msg: pointer to the msg buffer
2584 * add guest mac address filter
2586 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2588 struct virtchnl_ether_addr_list *al =
2589 (struct virtchnl_ether_addr_list *)msg;
2590 struct i40e_pf *pf = vf->pf;
2591 struct i40e_vsi *vsi = NULL;
2592 i40e_status ret = 0;
2595 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2596 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2597 ret = I40E_ERR_PARAM;
2601 vsi = pf->vsi[vf->lan_vsi_idx];
2603 /* Lock once, because all function inside for loop accesses VSI's
2604 * MAC filter list which needs to be protected using same lock.
2606 spin_lock_bh(&vsi->mac_filter_hash_lock);
2608 ret = i40e_check_vf_permission(vf, al);
2610 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2614 /* add new addresses to the list */
2615 for (i = 0; i < al->num_elements; i++) {
2616 struct i40e_mac_filter *f;
2618 f = i40e_find_mac(vsi, al->list[i].addr);
2620 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2623 dev_err(&pf->pdev->dev,
2624 "Unable to add MAC filter %pM for VF %d\n",
2625 al->list[i].addr, vf->vf_id);
2626 ret = I40E_ERR_PARAM;
2627 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2634 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2636 /* program the updated filter list */
2637 ret = i40e_sync_vsi_filters(vsi);
2639 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2643 /* send the response to the VF */
2644 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2649 * i40e_vc_del_mac_addr_msg
2650 * @vf: pointer to the VF info
2651 * @msg: pointer to the msg buffer
2653 * remove guest mac address filter
2655 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2657 struct virtchnl_ether_addr_list *al =
2658 (struct virtchnl_ether_addr_list *)msg;
2659 struct i40e_pf *pf = vf->pf;
2660 struct i40e_vsi *vsi = NULL;
2661 i40e_status ret = 0;
2664 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2665 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2666 ret = I40E_ERR_PARAM;
2670 for (i = 0; i < al->num_elements; i++) {
2671 if (is_broadcast_ether_addr(al->list[i].addr) ||
2672 is_zero_ether_addr(al->list[i].addr)) {
2673 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2674 al->list[i].addr, vf->vf_id);
2675 ret = I40E_ERR_INVALID_MAC_ADDR;
2679 if (vf->pf_set_mac &&
2680 ether_addr_equal(al->list[i].addr,
2681 vf->default_lan_addr.addr)) {
2682 dev_err(&pf->pdev->dev,
2683 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
2684 vf->default_lan_addr.addr, vf->vf_id);
2685 ret = I40E_ERR_PARAM;
2689 vsi = pf->vsi[vf->lan_vsi_idx];
2691 spin_lock_bh(&vsi->mac_filter_hash_lock);
2692 /* delete addresses from the list */
2693 for (i = 0; i < al->num_elements; i++)
2694 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2695 ret = I40E_ERR_INVALID_MAC_ADDR;
2696 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2702 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2704 /* program the updated filter list */
2705 ret = i40e_sync_vsi_filters(vsi);
2707 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2711 /* send the response to the VF */
2712 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2717 * i40e_vc_add_vlan_msg
2718 * @vf: pointer to the VF info
2719 * @msg: pointer to the msg buffer
2721 * program guest vlan id
2723 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
2725 struct virtchnl_vlan_filter_list *vfl =
2726 (struct virtchnl_vlan_filter_list *)msg;
2727 struct i40e_pf *pf = vf->pf;
2728 struct i40e_vsi *vsi = NULL;
2729 i40e_status aq_ret = 0;
2732 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2733 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2734 dev_err(&pf->pdev->dev,
2735 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2738 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2739 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2740 aq_ret = I40E_ERR_PARAM;
2744 for (i = 0; i < vfl->num_elements; i++) {
2745 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2746 aq_ret = I40E_ERR_PARAM;
2747 dev_err(&pf->pdev->dev,
2748 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2752 vsi = pf->vsi[vf->lan_vsi_idx];
2753 if (vsi->info.pvid) {
2754 aq_ret = I40E_ERR_PARAM;
2758 i40e_vlan_stripping_enable(vsi);
2759 for (i = 0; i < vfl->num_elements; i++) {
2760 /* add new VLAN filter */
2761 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2765 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2766 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2770 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2771 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2777 dev_err(&pf->pdev->dev,
2778 "Unable to add VLAN filter %d for VF %d, error %d\n",
2779 vfl->vlan_id[i], vf->vf_id, ret);
2783 /* send the response to the VF */
2784 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2788 * i40e_vc_remove_vlan_msg
2789 * @vf: pointer to the VF info
2790 * @msg: pointer to the msg buffer
2792 * remove programmed guest vlan id
2794 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
2796 struct virtchnl_vlan_filter_list *vfl =
2797 (struct virtchnl_vlan_filter_list *)msg;
2798 struct i40e_pf *pf = vf->pf;
2799 struct i40e_vsi *vsi = NULL;
2800 i40e_status aq_ret = 0;
2803 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2804 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2805 aq_ret = I40E_ERR_PARAM;
2809 for (i = 0; i < vfl->num_elements; i++) {
2810 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2811 aq_ret = I40E_ERR_PARAM;
2816 vsi = pf->vsi[vf->lan_vsi_idx];
2817 if (vsi->info.pvid) {
2818 if (vfl->num_elements > 1 || vfl->vlan_id[0])
2819 aq_ret = I40E_ERR_PARAM;
2823 for (i = 0; i < vfl->num_elements; i++) {
2824 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2827 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2828 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2832 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2833 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2840 /* send the response to the VF */
2841 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2846 * @vf: pointer to the VF info
2847 * @msg: pointer to the msg buffer
2848 * @msglen: msg length
2850 * called from the VF for the iwarp msgs
2852 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2854 struct i40e_pf *pf = vf->pf;
2855 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2856 i40e_status aq_ret = 0;
2858 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2859 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2860 aq_ret = I40E_ERR_PARAM;
2864 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2868 /* send the response to the VF */
2869 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2874 * i40e_vc_iwarp_qvmap_msg
2875 * @vf: pointer to the VF info
2876 * @msg: pointer to the msg buffer
2877 * @config: config qvmap or release it
2879 * called from the VF for the iwarp msgs
2881 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
2883 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2884 (struct virtchnl_iwarp_qvlist_info *)msg;
2885 i40e_status aq_ret = 0;
2887 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2888 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2889 aq_ret = I40E_ERR_PARAM;
2894 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2895 aq_ret = I40E_ERR_PARAM;
2897 i40e_release_iwarp_qvlist(vf);
2901 /* send the response to the VF */
2902 return i40e_vc_send_resp_to_vf(vf,
2903 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2904 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2909 * i40e_vc_config_rss_key
2910 * @vf: pointer to the VF info
2911 * @msg: pointer to the msg buffer
2913 * Configure the VF's RSS key
2915 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
2917 struct virtchnl_rss_key *vrk =
2918 (struct virtchnl_rss_key *)msg;
2919 struct i40e_pf *pf = vf->pf;
2920 struct i40e_vsi *vsi = NULL;
2921 i40e_status aq_ret = 0;
2923 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2924 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
2925 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2926 aq_ret = I40E_ERR_PARAM;
2930 vsi = pf->vsi[vf->lan_vsi_idx];
2931 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2933 /* send the response to the VF */
2934 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2939 * i40e_vc_config_rss_lut
2940 * @vf: pointer to the VF info
2941 * @msg: pointer to the msg buffer
2943 * Configure the VF's RSS LUT
2945 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
2947 struct virtchnl_rss_lut *vrl =
2948 (struct virtchnl_rss_lut *)msg;
2949 struct i40e_pf *pf = vf->pf;
2950 struct i40e_vsi *vsi = NULL;
2951 i40e_status aq_ret = 0;
2954 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2955 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
2956 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2957 aq_ret = I40E_ERR_PARAM;
2961 for (i = 0; i < vrl->lut_entries; i++)
2962 if (vrl->lut[i] >= vf->num_queue_pairs) {
2963 aq_ret = I40E_ERR_PARAM;
2967 vsi = pf->vsi[vf->lan_vsi_idx];
2968 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2969 /* send the response to the VF */
2971 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2976 * i40e_vc_get_rss_hena
2977 * @vf: pointer to the VF info
2978 * @msg: pointer to the msg buffer
2980 * Return the RSS HENA bits allowed by the hardware
2982 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
2984 struct virtchnl_rss_hena *vrh = NULL;
2985 struct i40e_pf *pf = vf->pf;
2986 i40e_status aq_ret = 0;
2989 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2990 aq_ret = I40E_ERR_PARAM;
2993 len = sizeof(struct virtchnl_rss_hena);
2995 vrh = kzalloc(len, GFP_KERNEL);
2997 aq_ret = I40E_ERR_NO_MEMORY;
3001 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3003 /* send the response back to the VF */
3004 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3005 aq_ret, (u8 *)vrh, len);
3011 * i40e_vc_set_rss_hena
3012 * @vf: pointer to the VF info
3013 * @msg: pointer to the msg buffer
3015 * Set the RSS HENA bits for the VF
3017 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3019 struct virtchnl_rss_hena *vrh =
3020 (struct virtchnl_rss_hena *)msg;
3021 struct i40e_pf *pf = vf->pf;
3022 struct i40e_hw *hw = &pf->hw;
3023 i40e_status aq_ret = 0;
3025 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3026 aq_ret = I40E_ERR_PARAM;
3029 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3030 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3031 (u32)(vrh->hena >> 32));
3033 /* send the response to the VF */
3035 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3039 * i40e_vc_enable_vlan_stripping
3040 * @vf: pointer to the VF info
3041 * @msg: pointer to the msg buffer
3043 * Enable vlan header stripping for the VF
3045 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3047 i40e_status aq_ret = 0;
3048 struct i40e_vsi *vsi;
3050 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3051 aq_ret = I40E_ERR_PARAM;
3055 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3056 i40e_vlan_stripping_enable(vsi);
3058 /* send the response to the VF */
3060 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3065 * i40e_vc_disable_vlan_stripping
3066 * @vf: pointer to the VF info
3067 * @msg: pointer to the msg buffer
3069 * Disable vlan header stripping for the VF
3071 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3073 i40e_status aq_ret = 0;
3074 struct i40e_vsi *vsi;
3076 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3077 aq_ret = I40E_ERR_PARAM;
3081 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3082 i40e_vlan_stripping_disable(vsi);
3084 /* send the response to the VF */
3086 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3091 * i40e_validate_cloud_filter
3092 * @mask: mask for TC filter
3093 * @data: data for TC filter
3095 * This function validates cloud filter programmed as TC filter for ADq
3097 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3098 struct virtchnl_filter *tc_filter)
3100 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3101 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3102 struct i40e_pf *pf = vf->pf;
3103 struct i40e_vsi *vsi = NULL;
3104 struct i40e_mac_filter *f;
3105 struct hlist_node *h;
3109 if (!tc_filter->action) {
3110 dev_info(&pf->pdev->dev,
3111 "VF %d: Currently ADq doesn't support Drop Action\n",
3116 /* action_meta is TC number here to which the filter is applied */
3117 if (!tc_filter->action_meta ||
3118 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3119 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3120 vf->vf_id, tc_filter->action_meta);
3124 /* Check filter if it's programmed for advanced mode or basic mode.
3125 * There are two ADq modes (for VF only),
3126 * 1. Basic mode: intended to allow as many filter options as possible
3127 * to be added to a VF in Non-trusted mode. Main goal is
3128 * to add filters to its own MAC and VLAN id.
3129 * 2. Advanced mode: is for allowing filters to be applied other than
3130 * its own MAC or VLAN. This mode requires the VF to be
3133 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3134 vsi = pf->vsi[vf->lan_vsi_idx];
3135 f = i40e_find_mac(vsi, data.dst_mac);
3138 dev_info(&pf->pdev->dev,
3139 "Destination MAC %pM doesn't belong to VF %d\n",
3140 data.dst_mac, vf->vf_id);
3145 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3147 if (f->vlan == ntohs(data.vlan_id)) {
3153 dev_info(&pf->pdev->dev,
3154 "VF %d doesn't have any VLAN id %u\n",
3155 vf->vf_id, ntohs(data.vlan_id));
3160 /* Check if VF is trusted */
3161 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3162 dev_err(&pf->pdev->dev,
3163 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3165 return I40E_ERR_CONFIG;
3169 if (mask.dst_mac[0] & data.dst_mac[0]) {
3170 if (is_broadcast_ether_addr(data.dst_mac) ||
3171 is_zero_ether_addr(data.dst_mac)) {
3172 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3173 vf->vf_id, data.dst_mac);
3178 if (mask.src_mac[0] & data.src_mac[0]) {
3179 if (is_broadcast_ether_addr(data.src_mac) ||
3180 is_zero_ether_addr(data.src_mac)) {
3181 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3182 vf->vf_id, data.src_mac);
3187 if (mask.dst_port & data.dst_port) {
3188 if (!data.dst_port) {
3189 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3195 if (mask.src_port & data.src_port) {
3196 if (!data.src_port) {
3197 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3203 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3204 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3205 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3210 if (mask.vlan_id & data.vlan_id) {
3211 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3212 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3218 return I40E_SUCCESS;
3220 return I40E_ERR_CONFIG;
3224 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3225 * @vf: pointer to the VF info
3226 * @seid - seid of the vsi it is searching for
3228 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3230 struct i40e_pf *pf = vf->pf;
3231 struct i40e_vsi *vsi = NULL;
3234 for (i = 0; i < vf->num_tc ; i++) {
3235 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3236 if (vsi && vsi->seid == seid)
3243 * i40e_del_all_cloud_filters
3244 * @vf: pointer to the VF info
3246 * This function deletes all cloud filters
3248 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3250 struct i40e_cloud_filter *cfilter = NULL;
3251 struct i40e_pf *pf = vf->pf;
3252 struct i40e_vsi *vsi = NULL;
3253 struct hlist_node *node;
3256 hlist_for_each_entry_safe(cfilter, node,
3257 &vf->cloud_filter_list, cloud_node) {
3258 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3261 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3262 vf->vf_id, cfilter->seid);
3266 if (cfilter->dst_port)
3267 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3270 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3272 dev_err(&pf->pdev->dev,
3273 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3274 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3275 i40e_aq_str(&pf->hw,
3276 pf->hw.aq.asq_last_status));
3278 hlist_del(&cfilter->cloud_node);
3280 vf->num_cloud_filters--;
3285 * i40e_vc_del_cloud_filter
3286 * @vf: pointer to the VF info
3287 * @msg: pointer to the msg buffer
3289 * This function deletes a cloud filter programmed as TC filter for ADq
3291 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3293 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3294 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3295 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3296 struct i40e_cloud_filter cfilter, *cf = NULL;
3297 struct i40e_pf *pf = vf->pf;
3298 struct i40e_vsi *vsi = NULL;
3299 struct hlist_node *node;
3300 i40e_status aq_ret = 0;
3303 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3304 aq_ret = I40E_ERR_PARAM;
3308 if (!vf->adq_enabled) {
3309 dev_info(&pf->pdev->dev,
3310 "VF %d: ADq not enabled, can't apply cloud filter\n",
3312 aq_ret = I40E_ERR_PARAM;
3316 if (i40e_validate_cloud_filter(vf, vcf)) {
3317 dev_info(&pf->pdev->dev,
3318 "VF %d: Invalid input, can't apply cloud filter\n",
3320 aq_ret = I40E_ERR_PARAM;
3324 memset(&cfilter, 0, sizeof(cfilter));
3325 /* parse destination mac address */
3326 for (i = 0; i < ETH_ALEN; i++)
3327 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3329 /* parse source mac address */
3330 for (i = 0; i < ETH_ALEN; i++)
3331 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3333 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3334 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3335 cfilter.src_port = mask.src_port & tcf.src_port;
3337 switch (vcf->flow_type) {
3338 case VIRTCHNL_TCP_V4_FLOW:
3339 cfilter.n_proto = ETH_P_IP;
3340 if (mask.dst_ip[0] & tcf.dst_ip[0])
3341 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3342 ARRAY_SIZE(tcf.dst_ip));
3343 else if (mask.src_ip[0] & tcf.dst_ip[0])
3344 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3345 ARRAY_SIZE(tcf.dst_ip));
3347 case VIRTCHNL_TCP_V6_FLOW:
3348 cfilter.n_proto = ETH_P_IPV6;
3349 if (mask.dst_ip[3] & tcf.dst_ip[3])
3350 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3351 sizeof(cfilter.ip.v6.dst_ip6));
3352 if (mask.src_ip[3] & tcf.src_ip[3])
3353 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3354 sizeof(cfilter.ip.v6.src_ip6));
3357 /* TC filter can be configured based on different combinations
3358 * and in this case IP is not a part of filter config
3360 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3364 /* get the vsi to which the tc belongs to */
3365 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3366 cfilter.seid = vsi->seid;
3367 cfilter.flags = vcf->field_flags;
3369 /* Deleting TC filter */
3371 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3373 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3375 dev_err(&pf->pdev->dev,
3376 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3377 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3378 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3382 hlist_for_each_entry_safe(cf, node,
3383 &vf->cloud_filter_list, cloud_node) {
3384 if (cf->seid != cfilter.seid)
3387 if (cfilter.dst_port != cf->dst_port)
3389 if (mask.dst_mac[0])
3390 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3392 /* for ipv4 data to be valid, only first byte of mask is set */
3393 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3394 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3395 ARRAY_SIZE(tcf.dst_ip)))
3397 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3398 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3399 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3400 sizeof(cfilter.ip.v6.src_ip6)))
3403 if (cfilter.vlan_id != cf->vlan_id)
3406 hlist_del(&cf->cloud_node);
3408 vf->num_cloud_filters--;
3412 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3417 * i40e_vc_add_cloud_filter
3418 * @vf: pointer to the VF info
3419 * @msg: pointer to the msg buffer
3421 * This function adds a cloud filter programmed as TC filter for ADq
3423 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3425 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3426 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3427 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3428 struct i40e_cloud_filter *cfilter = NULL;
3429 struct i40e_pf *pf = vf->pf;
3430 struct i40e_vsi *vsi = NULL;
3431 i40e_status aq_ret = 0;
3434 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3435 aq_ret = I40E_ERR_PARAM;
3439 if (!vf->adq_enabled) {
3440 dev_info(&pf->pdev->dev,
3441 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3443 aq_ret = I40E_ERR_PARAM;
3447 if (i40e_validate_cloud_filter(vf, vcf)) {
3448 dev_info(&pf->pdev->dev,
3449 "VF %d: Invalid input/s, can't apply cloud filter\n",
3451 aq_ret = I40E_ERR_PARAM;
3455 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3459 /* parse destination mac address */
3460 for (i = 0; i < ETH_ALEN; i++)
3461 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3463 /* parse source mac address */
3464 for (i = 0; i < ETH_ALEN; i++)
3465 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3467 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3468 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3469 cfilter->src_port = mask.src_port & tcf.src_port;
3471 switch (vcf->flow_type) {
3472 case VIRTCHNL_TCP_V4_FLOW:
3473 cfilter->n_proto = ETH_P_IP;
3474 if (mask.dst_ip[0] & tcf.dst_ip[0])
3475 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3476 ARRAY_SIZE(tcf.dst_ip));
3477 else if (mask.src_ip[0] & tcf.dst_ip[0])
3478 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3479 ARRAY_SIZE(tcf.dst_ip));
3481 case VIRTCHNL_TCP_V6_FLOW:
3482 cfilter->n_proto = ETH_P_IPV6;
3483 if (mask.dst_ip[3] & tcf.dst_ip[3])
3484 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3485 sizeof(cfilter->ip.v6.dst_ip6));
3486 if (mask.src_ip[3] & tcf.src_ip[3])
3487 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3488 sizeof(cfilter->ip.v6.src_ip6));
3491 /* TC filter can be configured based on different combinations
3492 * and in this case IP is not a part of filter config
3494 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3498 /* get the VSI to which the TC belongs to */
3499 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3500 cfilter->seid = vsi->seid;
3501 cfilter->flags = vcf->field_flags;
3503 /* Adding cloud filter programmed as TC filter */
3505 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3507 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3509 dev_err(&pf->pdev->dev,
3510 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3511 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3512 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3516 INIT_HLIST_NODE(&cfilter->cloud_node);
3517 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3518 /* release the pointer passing it to the collection */
3520 vf->num_cloud_filters++;
3524 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3529 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3530 * @vf: pointer to the VF info
3531 * @msg: pointer to the msg buffer
3533 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3535 struct virtchnl_tc_info *tci =
3536 (struct virtchnl_tc_info *)msg;
3537 struct i40e_pf *pf = vf->pf;
3538 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3539 int i, adq_request_qps = 0;
3540 i40e_status aq_ret = 0;
3543 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3544 aq_ret = I40E_ERR_PARAM;
3548 /* ADq cannot be applied if spoof check is ON */
3550 dev_err(&pf->pdev->dev,
3551 "Spoof check is ON, turn it OFF to enable ADq\n");
3552 aq_ret = I40E_ERR_PARAM;
3556 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3557 dev_err(&pf->pdev->dev,
3558 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3560 aq_ret = I40E_ERR_PARAM;
3564 /* max number of traffic classes for VF currently capped at 4 */
3565 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3566 dev_err(&pf->pdev->dev,
3567 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3568 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3569 aq_ret = I40E_ERR_PARAM;
3573 /* validate queues for each TC */
3574 for (i = 0; i < tci->num_tc; i++)
3575 if (!tci->list[i].count ||
3576 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3577 dev_err(&pf->pdev->dev,
3578 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3579 vf->vf_id, i, tci->list[i].count,
3580 I40E_DEFAULT_QUEUES_PER_VF);
3581 aq_ret = I40E_ERR_PARAM;
3585 /* need Max VF queues but already have default number of queues */
3586 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3588 if (pf->queues_left < adq_request_qps) {
3589 dev_err(&pf->pdev->dev,
3590 "No queues left to allocate to VF %d\n",
3592 aq_ret = I40E_ERR_PARAM;
3595 /* we need to allocate max VF queues to enable ADq so as to
3596 * make sure ADq enabled VF always gets back queues when it
3597 * goes through a reset.
3599 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3602 /* get link speed in MB to validate rate limit */
3603 switch (ls->link_speed) {
3604 case VIRTCHNL_LINK_SPEED_100MB:
3607 case VIRTCHNL_LINK_SPEED_1GB:
3610 case VIRTCHNL_LINK_SPEED_10GB:
3611 speed = SPEED_10000;
3613 case VIRTCHNL_LINK_SPEED_20GB:
3614 speed = SPEED_20000;
3616 case VIRTCHNL_LINK_SPEED_25GB:
3617 speed = SPEED_25000;
3619 case VIRTCHNL_LINK_SPEED_40GB:
3620 speed = SPEED_40000;
3623 dev_err(&pf->pdev->dev,
3624 "Cannot detect link speed\n");
3625 aq_ret = I40E_ERR_PARAM;
3629 /* parse data from the queue channel info */
3630 vf->num_tc = tci->num_tc;
3631 for (i = 0; i < vf->num_tc; i++) {
3632 if (tci->list[i].max_tx_rate) {
3633 if (tci->list[i].max_tx_rate > speed) {
3634 dev_err(&pf->pdev->dev,
3635 "Invalid max tx rate %llu specified for VF %d.",
3636 tci->list[i].max_tx_rate,
3638 aq_ret = I40E_ERR_PARAM;
3641 vf->ch[i].max_tx_rate =
3642 tci->list[i].max_tx_rate;
3645 vf->ch[i].num_qps = tci->list[i].count;
3648 /* set this flag only after making sure all inputs are sane */
3649 vf->adq_enabled = true;
3650 /* num_req_queues is set when user changes number of queues via ethtool
3651 * and this causes issue for default VSI(which depends on this variable)
3652 * when ADq is enabled, hence reset it.
3654 vf->num_req_queues = 0;
3656 /* reset the VF in order to allocate resources */
3657 i40e_vc_notify_vf_reset(vf);
3658 i40e_reset_vf(vf, false);
3660 return I40E_SUCCESS;
3662 /* send the response to the VF */
3664 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3669 * i40e_vc_del_qch_msg
3670 * @vf: pointer to the VF info
3671 * @msg: pointer to the msg buffer
3673 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3675 struct i40e_pf *pf = vf->pf;
3676 i40e_status aq_ret = 0;
3678 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3679 aq_ret = I40E_ERR_PARAM;
3683 if (vf->adq_enabled) {
3684 i40e_del_all_cloud_filters(vf);
3686 vf->adq_enabled = false;
3688 dev_info(&pf->pdev->dev,
3689 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3692 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3694 aq_ret = I40E_ERR_PARAM;
3697 /* reset the VF in order to allocate resources */
3698 i40e_vc_notify_vf_reset(vf);
3699 i40e_reset_vf(vf, false);
3701 return I40E_SUCCESS;
3704 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3709 * i40e_vc_process_vf_msg
3710 * @pf: pointer to the PF structure
3711 * @vf_id: source VF id
3712 * @v_opcode: operation code
3713 * @v_retval: unused return value code
3714 * @msg: pointer to the msg buffer
3715 * @msglen: msg length
3717 * called from the common aeq/arq handler to
3718 * process request from VF
3720 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3721 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3723 struct i40e_hw *hw = &pf->hw;
3724 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3728 pf->vf_aq_requests++;
3729 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
3731 vf = &(pf->vf[local_vf_id]);
3733 /* Check if VF is disabled. */
3734 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3735 return I40E_ERR_PARAM;
3737 /* perform basic checks on the msg */
3738 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3741 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3742 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3743 local_vf_id, v_opcode, msglen);
3745 case VIRTCHNL_STATUS_ERR_PARAM:
3753 case VIRTCHNL_OP_VERSION:
3754 ret = i40e_vc_get_version_msg(vf, msg);
3756 case VIRTCHNL_OP_GET_VF_RESOURCES:
3757 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3758 i40e_vc_notify_vf_link_state(vf);
3760 case VIRTCHNL_OP_RESET_VF:
3761 i40e_vc_reset_vf_msg(vf);
3764 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3765 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
3767 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3768 ret = i40e_vc_config_queues_msg(vf, msg);
3770 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3771 ret = i40e_vc_config_irq_map_msg(vf, msg);
3773 case VIRTCHNL_OP_ENABLE_QUEUES:
3774 ret = i40e_vc_enable_queues_msg(vf, msg);
3775 i40e_vc_notify_vf_link_state(vf);
3777 case VIRTCHNL_OP_DISABLE_QUEUES:
3778 ret = i40e_vc_disable_queues_msg(vf, msg);
3780 case VIRTCHNL_OP_ADD_ETH_ADDR:
3781 ret = i40e_vc_add_mac_addr_msg(vf, msg);
3783 case VIRTCHNL_OP_DEL_ETH_ADDR:
3784 ret = i40e_vc_del_mac_addr_msg(vf, msg);
3786 case VIRTCHNL_OP_ADD_VLAN:
3787 ret = i40e_vc_add_vlan_msg(vf, msg);
3789 case VIRTCHNL_OP_DEL_VLAN:
3790 ret = i40e_vc_remove_vlan_msg(vf, msg);
3792 case VIRTCHNL_OP_GET_STATS:
3793 ret = i40e_vc_get_stats_msg(vf, msg);
3795 case VIRTCHNL_OP_IWARP:
3796 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3798 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3799 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
3801 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3802 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
3804 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3805 ret = i40e_vc_config_rss_key(vf, msg);
3807 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3808 ret = i40e_vc_config_rss_lut(vf, msg);
3810 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3811 ret = i40e_vc_get_rss_hena(vf, msg);
3813 case VIRTCHNL_OP_SET_RSS_HENA:
3814 ret = i40e_vc_set_rss_hena(vf, msg);
3816 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3817 ret = i40e_vc_enable_vlan_stripping(vf, msg);
3819 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3820 ret = i40e_vc_disable_vlan_stripping(vf, msg);
3822 case VIRTCHNL_OP_REQUEST_QUEUES:
3823 ret = i40e_vc_request_queues_msg(vf, msg);
3825 case VIRTCHNL_OP_ENABLE_CHANNELS:
3826 ret = i40e_vc_add_qch_msg(vf, msg);
3828 case VIRTCHNL_OP_DISABLE_CHANNELS:
3829 ret = i40e_vc_del_qch_msg(vf, msg);
3831 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3832 ret = i40e_vc_add_cloud_filter(vf, msg);
3834 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3835 ret = i40e_vc_del_cloud_filter(vf, msg);
3837 case VIRTCHNL_OP_UNKNOWN:
3839 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3840 v_opcode, local_vf_id);
3841 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3842 I40E_ERR_NOT_IMPLEMENTED);
3850 * i40e_vc_process_vflr_event
3851 * @pf: pointer to the PF structure
3853 * called from the vlfr irq handler to
3854 * free up VF resources and state variables
3856 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3858 struct i40e_hw *hw = &pf->hw;
3859 u32 reg, reg_idx, bit_idx;
3863 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3866 /* Re-enable the VFLR interrupt cause here, before looking for which
3867 * VF got reset. Otherwise, if another VF gets a reset while the
3868 * first one is being processed, that interrupt will be lost, and
3869 * that VF will be stuck in reset forever.
3871 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3872 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3873 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3876 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3877 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3878 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3879 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3880 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3881 vf = &pf->vf[vf_id];
3882 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3883 if (reg & BIT(bit_idx))
3884 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3885 i40e_reset_vf(vf, true);
3893 * @pf: the physical function
3894 * @vf_id: VF identifier
3896 * Check that the VF is enabled and the VSI exists.
3898 * Returns 0 on success, negative on failure
3900 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
3902 struct i40e_vsi *vsi;
3906 if (vf_id >= pf->num_alloc_vfs) {
3907 dev_err(&pf->pdev->dev,
3908 "Invalid VF Identifier %d\n", vf_id);
3912 vf = &pf->vf[vf_id];
3913 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
3921 * i40e_ndo_set_vf_mac
3922 * @netdev: network interface device structure
3923 * @vf_id: VF identifier
3926 * program VF mac address
3928 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3930 struct i40e_netdev_priv *np = netdev_priv(netdev);
3931 struct i40e_vsi *vsi = np->vsi;
3932 struct i40e_pf *pf = vsi->back;
3933 struct i40e_mac_filter *f;
3936 struct hlist_node *h;
3940 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
3941 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
3945 /* validate the request */
3946 ret = i40e_validate_vf(pf, vf_id);
3950 vf = &pf->vf[vf_id];
3951 vsi = pf->vsi[vf->lan_vsi_idx];
3953 /* When the VF is resetting wait until it is done.
3954 * It can take up to 200 milliseconds,
3955 * but wait for up to 300 milliseconds to be safe.
3957 for (i = 0; i < 15; i++) {
3958 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3962 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3963 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3969 if (is_multicast_ether_addr(mac)) {
3970 dev_err(&pf->pdev->dev,
3971 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3976 /* Lock once because below invoked function add/del_filter requires
3977 * mac_filter_hash_lock to be held
3979 spin_lock_bh(&vsi->mac_filter_hash_lock);
3981 /* delete the temporary mac address */
3982 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3983 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3985 /* Delete all the filters for this VSI - we're going to kill it
3988 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3989 __i40e_del_filter(vsi, f);
3991 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3993 /* program mac filter */
3994 if (i40e_sync_vsi_filters(vsi)) {
3995 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3999 ether_addr_copy(vf->default_lan_addr.addr, mac);
4001 if (is_zero_ether_addr(mac)) {
4002 vf->pf_set_mac = false;
4003 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4005 vf->pf_set_mac = true;
4006 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4010 /* Force the VF interface down so it has to bring up with new MAC
4013 i40e_vc_disable_vf(vf);
4014 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4017 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4022 * i40e_vsi_has_vlans - True if VSI has configured VLANs
4023 * @vsi: pointer to the vsi
4025 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
4026 * we have no configured VLANs. Do not call while holding the
4027 * mac_filter_hash_lock.
4029 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
4033 /* If we have a port VLAN, then the VSI cannot have any VLANs
4034 * configured, as all MAC/VLAN filters will be assigned to the PVID.
4039 /* Since we don't have a PVID, we know that if the device is in VLAN
4040 * mode it must be because of a VLAN filter configured on this VSI.
4042 spin_lock_bh(&vsi->mac_filter_hash_lock);
4043 have_vlans = i40e_is_vsi_in_vlan(vsi);
4044 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4050 * i40e_ndo_set_vf_port_vlan
4051 * @netdev: network interface device structure
4052 * @vf_id: VF identifier
4053 * @vlan_id: mac address
4054 * @qos: priority setting
4055 * @vlan_proto: vlan protocol
4057 * program VF vlan id and/or qos
4059 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4060 u16 vlan_id, u8 qos, __be16 vlan_proto)
4062 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4063 struct i40e_netdev_priv *np = netdev_priv(netdev);
4064 bool allmulti = false, alluni = false;
4065 struct i40e_pf *pf = np->vsi->back;
4066 struct i40e_vsi *vsi;
4070 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4071 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4075 /* validate the request */
4076 ret = i40e_validate_vf(pf, vf_id);
4080 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4081 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4086 if (vlan_proto != htons(ETH_P_8021Q)) {
4087 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4088 ret = -EPROTONOSUPPORT;
4092 vf = &pf->vf[vf_id];
4093 vsi = pf->vsi[vf->lan_vsi_idx];
4094 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4095 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4101 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4102 /* duplicate request, so just return success */
4105 if (i40e_vsi_has_vlans(vsi)) {
4106 dev_err(&pf->pdev->dev,
4107 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4109 /* Administrator Error - knock the VF offline until he does
4110 * the right thing by reconfiguring his network correctly
4111 * and then reloading the VF driver.
4113 i40e_vc_disable_vf(vf);
4114 /* During reset the VF got a new VSI, so refresh the pointer. */
4115 vsi = pf->vsi[vf->lan_vsi_idx];
4118 /* Locked once because multiple functions below iterate list */
4119 spin_lock_bh(&vsi->mac_filter_hash_lock);
4121 /* Check for condition where there was already a port VLAN ID
4122 * filter set and now it is being deleted by setting it to zero.
4123 * Additionally check for the condition where there was a port
4124 * VLAN but now there is a new and different port VLAN being set.
4125 * Before deleting all the old VLAN filters we must add new ones
4126 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4127 * MAC addresses deleted.
4129 if ((!(vlan_id || qos) ||
4130 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4132 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4134 dev_info(&vsi->back->pdev->dev,
4135 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4136 vsi->back->hw.aq.asq_last_status);
4137 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4142 if (vsi->info.pvid) {
4143 /* remove all filters on the old VLAN */
4144 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4148 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4150 /* disable promisc modes in case they were enabled */
4151 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4154 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4159 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4161 i40e_vsi_remove_pvid(vsi);
4162 spin_lock_bh(&vsi->mac_filter_hash_lock);
4165 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4166 vlan_id, qos, vf_id);
4168 /* add new VLAN filter for each MAC */
4169 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4171 dev_info(&vsi->back->pdev->dev,
4172 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4173 vsi->back->hw.aq.asq_last_status);
4174 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4178 /* remove the previously added non-VLAN MAC filters */
4179 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4182 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4184 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4187 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4190 /* Schedule the worker thread to take care of applying changes */
4191 i40e_service_event_schedule(vsi->back);
4194 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4198 /* The Port VLAN needs to be saved across resets the same as the
4199 * default LAN MAC address.
4201 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4203 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4205 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4212 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4217 * i40e_ndo_set_vf_bw
4218 * @netdev: network interface device structure
4219 * @vf_id: VF identifier
4220 * @min_tx_rate: Minimum Tx rate
4221 * @max_tx_rate: Maximum Tx rate
4223 * configure VF Tx rate
4225 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4228 struct i40e_netdev_priv *np = netdev_priv(netdev);
4229 struct i40e_pf *pf = np->vsi->back;
4230 struct i40e_vsi *vsi;
4234 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4235 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4239 /* validate the request */
4240 ret = i40e_validate_vf(pf, vf_id);
4245 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4246 min_tx_rate, vf_id);
4250 vf = &pf->vf[vf_id];
4251 vsi = pf->vsi[vf->lan_vsi_idx];
4252 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4253 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4259 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4263 vf->tx_rate = max_tx_rate;
4265 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4270 * i40e_ndo_get_vf_config
4271 * @netdev: network interface device structure
4272 * @vf_id: VF identifier
4273 * @ivi: VF configuration structure
4275 * return VF configuration
4277 int i40e_ndo_get_vf_config(struct net_device *netdev,
4278 int vf_id, struct ifla_vf_info *ivi)
4280 struct i40e_netdev_priv *np = netdev_priv(netdev);
4281 struct i40e_vsi *vsi = np->vsi;
4282 struct i40e_pf *pf = vsi->back;
4286 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4287 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4291 /* validate the request */
4292 ret = i40e_validate_vf(pf, vf_id);
4296 vf = &pf->vf[vf_id];
4297 /* first vsi is always the LAN vsi */
4298 vsi = pf->vsi[vf->lan_vsi_idx];
4306 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4308 ivi->max_tx_rate = vf->tx_rate;
4309 ivi->min_tx_rate = 0;
4310 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4311 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4312 I40E_VLAN_PRIORITY_SHIFT;
4313 if (vf->link_forced == false)
4314 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4315 else if (vf->link_up == true)
4316 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4318 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4319 ivi->spoofchk = vf->spoofchk;
4320 ivi->trusted = vf->trusted;
4324 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4329 * i40e_ndo_set_vf_link_state
4330 * @netdev: network interface device structure
4331 * @vf_id: VF identifier
4332 * @link: required link state
4334 * Set the link state of a specified VF, regardless of physical link state
4336 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4338 struct i40e_netdev_priv *np = netdev_priv(netdev);
4339 struct i40e_pf *pf = np->vsi->back;
4340 struct virtchnl_pf_event pfe;
4341 struct i40e_hw *hw = &pf->hw;
4346 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4347 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4351 /* validate the request */
4352 if (vf_id >= pf->num_alloc_vfs) {
4353 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4358 vf = &pf->vf[vf_id];
4359 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4361 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4362 pfe.severity = PF_EVENT_SEVERITY_INFO;
4365 case IFLA_VF_LINK_STATE_AUTO:
4366 vf->link_forced = false;
4367 pfe.event_data.link_event.link_status =
4368 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4369 pfe.event_data.link_event.link_speed =
4370 (enum virtchnl_link_speed)
4371 pf->hw.phy.link_info.link_speed;
4373 case IFLA_VF_LINK_STATE_ENABLE:
4374 vf->link_forced = true;
4376 pfe.event_data.link_event.link_status = true;
4377 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4379 case IFLA_VF_LINK_STATE_DISABLE:
4380 vf->link_forced = true;
4381 vf->link_up = false;
4382 pfe.event_data.link_event.link_status = false;
4383 pfe.event_data.link_event.link_speed = 0;
4389 /* Notify the VF of its new link state */
4390 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4391 0, (u8 *)&pfe, sizeof(pfe), NULL);
4394 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4399 * i40e_ndo_set_vf_spoofchk
4400 * @netdev: network interface device structure
4401 * @vf_id: VF identifier
4402 * @enable: flag to enable or disable feature
4404 * Enable or disable VF spoof checking
4406 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4408 struct i40e_netdev_priv *np = netdev_priv(netdev);
4409 struct i40e_vsi *vsi = np->vsi;
4410 struct i40e_pf *pf = vsi->back;
4411 struct i40e_vsi_context ctxt;
4412 struct i40e_hw *hw = &pf->hw;
4416 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4417 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4421 /* validate the request */
4422 if (vf_id >= pf->num_alloc_vfs) {
4423 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4428 vf = &(pf->vf[vf_id]);
4429 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4430 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4436 if (enable == vf->spoofchk)
4439 vf->spoofchk = enable;
4440 memset(&ctxt, 0, sizeof(ctxt));
4441 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4442 ctxt.pf_num = pf->hw.pf_id;
4443 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4445 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4446 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4447 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4449 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4454 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4459 * i40e_ndo_set_vf_trust
4460 * @netdev: network interface device structure of the pf
4461 * @vf_id: VF identifier
4462 * @setting: trust setting
4464 * Enable or disable VF trust setting
4466 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4468 struct i40e_netdev_priv *np = netdev_priv(netdev);
4469 struct i40e_pf *pf = np->vsi->back;
4473 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4474 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4478 /* validate the request */
4479 if (vf_id >= pf->num_alloc_vfs) {
4480 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4485 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4486 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4491 vf = &pf->vf[vf_id];
4493 if (setting == vf->trusted)
4496 vf->trusted = setting;
4497 i40e_vc_disable_vf(vf);
4498 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4499 vf_id, setting ? "" : "un");
4501 if (vf->adq_enabled) {
4503 dev_info(&pf->pdev->dev,
4504 "VF %u no longer Trusted, deleting all cloud filters\n",
4506 i40e_del_all_cloud_filters(vf);
4511 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);