1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022, Intel Corporation. */
4 #include "ice_virtchnl.h"
5 #include "ice_vf_lib_private.h"
10 #include "ice_virtchnl_allowlist.h"
11 #include "ice_vf_vsi_vlan_ops.h"
13 #include "ice_flex_pipe.h"
14 #include "ice_dcb_lib.h"
16 #define FIELD_SELECTOR(proto_hdr_field) \
17 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
19 struct ice_vc_hdr_match_type {
20 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
21 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
24 static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
25 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
26 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
27 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
28 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
29 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
30 ICE_FLOW_SEG_HDR_IPV_OTHER},
31 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
32 ICE_FLOW_SEG_HDR_IPV_OTHER},
33 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
34 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
35 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
36 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
37 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
38 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
39 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
40 ICE_FLOW_SEG_HDR_GTPU_DWN},
41 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
42 ICE_FLOW_SEG_HDR_GTPU_UP},
43 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
44 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
45 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
46 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
49 struct ice_vc_hash_field_match_type {
50 u32 vc_hdr; /* virtchnl headers
51 * (VIRTCHNL_PROTO_HDR_XXX)
53 u32 vc_hash_field; /* virtchnl hash fields selector
54 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
56 u64 ice_hash_field; /* ice hash fields
57 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
62 ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
63 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
64 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
65 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
66 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
67 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
68 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
70 {VIRTCHNL_PROTO_HDR_ETH,
71 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
73 {VIRTCHNL_PROTO_HDR_S_VLAN,
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
75 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
76 {VIRTCHNL_PROTO_HDR_C_VLAN,
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
79 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
80 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
81 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
83 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
84 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
86 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
87 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
88 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
91 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
92 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
94 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
96 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
97 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
98 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
100 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
101 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
102 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
104 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
105 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
107 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
108 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
109 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
114 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
115 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
116 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
117 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
118 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
119 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
120 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
121 {VIRTCHNL_PROTO_HDR_TCP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_TCP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_TCP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
130 ICE_FLOW_HASH_TCP_PORT},
131 {VIRTCHNL_PROTO_HDR_UDP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_UDP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_UDP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
140 ICE_FLOW_HASH_UDP_PORT},
141 {VIRTCHNL_PROTO_HDR_SCTP,
142 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
143 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
144 {VIRTCHNL_PROTO_HDR_SCTP,
145 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
147 {VIRTCHNL_PROTO_HDR_SCTP,
148 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
149 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
150 ICE_FLOW_HASH_SCTP_PORT},
151 {VIRTCHNL_PROTO_HDR_PPPOE,
152 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
153 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
154 {VIRTCHNL_PROTO_HDR_GTPU_IP,
155 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
156 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
157 {VIRTCHNL_PROTO_HDR_L2TPV3,
158 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
159 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
160 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
161 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
162 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
163 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
164 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
165 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
169 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
170 * @pf: pointer to the PF structure
171 * @v_opcode: operation code
172 * @v_retval: return value
173 * @msg: pointer to the msg buffer
174 * @msglen: msg length
177 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
178 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
180 struct ice_hw *hw = &pf->hw;
184 mutex_lock(&pf->vfs.table_lock);
185 ice_for_each_vf(pf, bkt, vf) {
186 /* Not all vfs are enabled so skip the ones that are not */
187 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
188 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
191 /* Ignore return value on purpose - a given VF may fail, but
192 * we need to keep going and send to all of them
194 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
197 mutex_unlock(&pf->vfs.table_lock);
201 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
202 * @vf: pointer to the VF structure
203 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
204 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
205 * @link_up: whether or not to set the link up/down
208 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
209 int ice_link_speed, bool link_up)
211 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
212 pfe->event_data.link_event_adv.link_status = link_up;
214 pfe->event_data.link_event_adv.link_speed =
215 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
217 pfe->event_data.link_event.link_status = link_up;
218 /* Legacy method for virtchnl link speeds */
219 pfe->event_data.link_event.link_speed =
220 (enum virtchnl_link_speed)
221 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
226 * ice_vc_notify_vf_link_state - Inform a VF of link status
227 * @vf: pointer to the VF structure
229 * send a link status message to a single VF
231 void ice_vc_notify_vf_link_state(struct ice_vf *vf)
233 struct virtchnl_pf_event pfe = { 0 };
234 struct ice_hw *hw = &vf->pf->hw;
236 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
237 pfe.severity = PF_EVENT_SEVERITY_INFO;
239 if (ice_is_vf_link_up(vf))
240 ice_set_pfe_link(vf, &pfe,
241 hw->port_info->phy.link_info.link_speed, true);
243 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
245 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
246 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
251 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
252 * @pf: pointer to the PF structure
254 void ice_vc_notify_link_state(struct ice_pf *pf)
259 mutex_lock(&pf->vfs.table_lock);
260 ice_for_each_vf(pf, bkt, vf)
261 ice_vc_notify_vf_link_state(vf);
262 mutex_unlock(&pf->vfs.table_lock);
266 * ice_vc_notify_reset - Send pending reset message to all VFs
267 * @pf: pointer to the PF structure
269 * indicate a pending reset to all VFs on a given PF
271 void ice_vc_notify_reset(struct ice_pf *pf)
273 struct virtchnl_pf_event pfe;
275 if (!ice_has_vfs(pf))
278 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
279 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
280 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
281 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
285 * ice_vc_send_msg_to_vf - Send message to VF
286 * @vf: pointer to the VF info
287 * @v_opcode: virtual channel opcode
288 * @v_retval: virtual channel return value
289 * @msg: pointer to the msg buffer
290 * @msglen: msg length
295 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
296 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
303 dev = ice_pf_to_dev(pf);
305 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
307 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
308 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
310 ice_aq_str(pf->hw.mailboxq.sq_last_status));
319 * @vf: pointer to the VF info
320 * @msg: pointer to the msg buffer
322 * called from the VF to request the API version used by the PF
324 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
326 struct virtchnl_version_info info = {
327 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
330 vf->vf_ver = *(struct virtchnl_version_info *)msg;
331 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
332 if (VF_IS_V10(&vf->vf_ver))
333 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
335 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
336 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
337 sizeof(struct virtchnl_version_info));
341 * ice_vc_get_max_frame_size - get max frame size allowed for VF
342 * @vf: VF used to determine max frame size
344 * Max frame size is determined based on the current port's max frame size and
345 * whether a port VLAN is configured on this VF. The VF is not aware whether
346 * it's in a port VLAN so the PF needs to account for this in max frame size
347 * checks and sending the max frame size to the VF.
349 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
351 struct ice_port_info *pi = ice_vf_get_port_info(vf);
354 max_frame_size = pi->phy.link_info.max_frame_size;
356 if (ice_vf_is_port_vlan_ena(vf))
357 max_frame_size -= VLAN_HLEN;
359 return max_frame_size;
363 * ice_vc_get_vf_res_msg
364 * @vf: pointer to the VF info
365 * @msg: pointer to the msg buffer
367 * called from the VF to request its resources
369 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
371 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
372 struct virtchnl_vf_resource *vfres = NULL;
373 struct ice_hw *hw = &vf->pf->hw;
378 if (ice_check_vf_init(vf)) {
379 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
383 len = sizeof(struct virtchnl_vf_resource);
385 vfres = kzalloc(len, GFP_KERNEL);
387 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
391 if (VF_IS_V11(&vf->vf_ver))
392 vf->driver_caps = *(u32 *)msg;
394 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
395 VIRTCHNL_VF_OFFLOAD_RSS_REG |
396 VIRTCHNL_VF_OFFLOAD_VLAN;
398 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
399 vsi = ice_get_vf_vsi(vf);
401 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
405 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
406 /* VLAN offloads based on current device configuration */
407 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN_V2;
408 } else if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
409 /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
410 * these two conditions, which amounts to guest VLAN filtering
411 * and offloads being based on the inner VLAN or the
412 * inner/single VLAN respectively and don't allow VF to
413 * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
415 if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
416 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
417 } else if (!ice_is_dvm_ena(hw) &&
418 !ice_vf_is_port_vlan_ena(vf)) {
419 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
420 /* configure backward compatible support for VFs that
421 * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
422 * configured in SVM, and no port VLAN is configured
424 ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
425 } else if (ice_is_dvm_ena(hw)) {
426 /* configure software offloaded VLAN support when DVM
427 * is enabled, but no port VLAN is enabled
429 ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
433 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
434 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
436 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
437 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
439 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
442 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
443 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
445 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
446 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
448 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
449 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
451 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
452 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
454 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
455 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
457 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
458 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
460 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
461 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
463 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
464 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
466 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
467 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
469 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
470 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
473 /* Tx and Rx queue are equal for VF */
474 vfres->num_queue_pairs = vsi->num_txq;
475 vfres->max_vectors = vf->pf->vfs.num_msix_per;
476 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
477 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
478 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
480 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
481 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
482 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
483 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
484 vf->hw_lan_addr.addr);
486 /* match guest capabilities */
487 vf->driver_caps = vfres->vf_cap_flags;
489 ice_vc_set_caps_allowlist(vf);
490 ice_vc_set_working_allowlist(vf);
492 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
495 /* send the response back to the VF */
496 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
504 * ice_vc_reset_vf_msg
505 * @vf: pointer to the VF info
507 * called from the VF to reset itself,
508 * unlike other virtchnl messages, PF driver
509 * doesn't send the response back to the VF
511 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
513 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
518 * ice_find_vsi_from_id
519 * @pf: the PF structure to search for the VSI
520 * @id: ID of the VSI it is searching for
522 * searches for the VSI with the given ID
524 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
528 ice_for_each_vsi(pf, i)
529 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
536 * ice_vc_isvalid_vsi_id
537 * @vf: pointer to the VF info
538 * @vsi_id: VF relative VSI ID
540 * check for the valid VSI ID
542 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
544 struct ice_pf *pf = vf->pf;
547 vsi = ice_find_vsi_from_id(pf, vsi_id);
549 return (vsi && (vsi->vf == vf));
553 * ice_vc_isvalid_q_id
554 * @vf: pointer to the VF info
556 * @qid: VSI relative queue ID
558 * check for the valid queue ID
560 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
562 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
563 /* allocated Tx and Rx queues should be always equal for VF VSI */
564 return (vsi && (qid < vsi->alloc_txq));
568 * ice_vc_isvalid_ring_len
569 * @ring_len: length of ring
571 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
574 static bool ice_vc_isvalid_ring_len(u16 ring_len)
576 return ring_len == 0 ||
577 (ring_len >= ICE_MIN_NUM_DESC &&
578 ring_len <= ICE_MAX_NUM_DESC &&
579 !(ring_len % ICE_REQ_DESC_MULTIPLE));
583 * ice_vc_validate_pattern
584 * @vf: pointer to the VF info
585 * @proto: virtchnl protocol headers
587 * validate the pattern is supported or not.
589 * Return: true on success, false on error.
592 ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
594 bool is_ipv4 = false;
595 bool is_ipv6 = false;
600 while (i < proto->count &&
601 proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
602 switch (proto->proto_hdr[i].type) {
603 case VIRTCHNL_PROTO_HDR_ETH:
604 ptype = ICE_PTYPE_MAC_PAY;
606 case VIRTCHNL_PROTO_HDR_IPV4:
607 ptype = ICE_PTYPE_IPV4_PAY;
610 case VIRTCHNL_PROTO_HDR_IPV6:
611 ptype = ICE_PTYPE_IPV6_PAY;
614 case VIRTCHNL_PROTO_HDR_UDP:
616 ptype = ICE_PTYPE_IPV4_UDP_PAY;
618 ptype = ICE_PTYPE_IPV6_UDP_PAY;
621 case VIRTCHNL_PROTO_HDR_TCP:
623 ptype = ICE_PTYPE_IPV4_TCP_PAY;
625 ptype = ICE_PTYPE_IPV6_TCP_PAY;
627 case VIRTCHNL_PROTO_HDR_SCTP:
629 ptype = ICE_PTYPE_IPV4_SCTP_PAY;
631 ptype = ICE_PTYPE_IPV6_SCTP_PAY;
633 case VIRTCHNL_PROTO_HDR_GTPU_IP:
634 case VIRTCHNL_PROTO_HDR_GTPU_EH:
636 ptype = ICE_MAC_IPV4_GTPU;
638 ptype = ICE_MAC_IPV6_GTPU;
640 case VIRTCHNL_PROTO_HDR_L2TPV3:
642 ptype = ICE_MAC_IPV4_L2TPV3;
644 ptype = ICE_MAC_IPV6_L2TPV3;
646 case VIRTCHNL_PROTO_HDR_ESP:
648 ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
651 ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
654 case VIRTCHNL_PROTO_HDR_AH:
656 ptype = ICE_MAC_IPV4_AH;
658 ptype = ICE_MAC_IPV6_AH;
660 case VIRTCHNL_PROTO_HDR_PFCP:
662 ptype = ICE_MAC_IPV4_PFCP_SESSION;
664 ptype = ICE_MAC_IPV6_PFCP_SESSION;
673 return ice_hw_ptype_ena(&vf->pf->hw, ptype);
677 * ice_vc_parse_rss_cfg - parses hash fields and headers from
678 * a specific virtchnl RSS cfg
679 * @hw: pointer to the hardware
680 * @rss_cfg: pointer to the virtchnl RSS cfg
681 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
683 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
685 * Return true if all the protocol header and hash fields in the RSS cfg could
686 * be parsed, else return false
688 * This function parses the virtchnl RSS cfg to be the intended
689 * hash fields and the intended header for RSS configuration
692 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
693 u32 *addl_hdrs, u64 *hash_flds)
695 const struct ice_vc_hash_field_match_type *hf_list;
696 const struct ice_vc_hdr_match_type *hdr_list;
697 int i, hf_list_len, hdr_list_len;
699 hf_list = ice_vc_hash_field_list;
700 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
701 hdr_list = ice_vc_hdr_list;
702 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
704 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
705 struct virtchnl_proto_hdr *proto_hdr =
706 &rss_cfg->proto_hdrs.proto_hdr[i];
707 bool hdr_found = false;
710 /* Find matched ice headers according to virtchnl headers. */
711 for (j = 0; j < hdr_list_len; j++) {
712 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
714 if (proto_hdr->type == hdr_map.vc_hdr) {
715 *addl_hdrs |= hdr_map.ice_hdr;
723 /* Find matched ice hash fields according to
724 * virtchnl hash fields.
726 for (j = 0; j < hf_list_len; j++) {
727 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
729 if (proto_hdr->type == hf_map.vc_hdr &&
730 proto_hdr->field_selector == hf_map.vc_hash_field) {
731 *hash_flds |= hf_map.ice_hash_field;
741 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
743 * @caps: VF driver negotiated capabilities
745 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
748 static bool ice_vf_adv_rss_offload_ena(u32 caps)
750 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
754 * ice_vc_handle_rss_cfg
755 * @vf: pointer to the VF info
756 * @msg: pointer to the message buffer
757 * @add: add a RSS config if true, otherwise delete a RSS config
759 * This function adds/deletes a RSS config
761 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
763 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
764 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
765 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
766 struct device *dev = ice_pf_to_dev(vf->pf);
767 struct ice_hw *hw = &vf->pf->hw;
770 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
771 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
773 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
777 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
778 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
780 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
784 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
785 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
789 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
790 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
791 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
792 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
794 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
798 vsi = ice_get_vf_vsi(vf);
800 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
804 if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
805 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
809 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
810 struct ice_vsi_ctx *ctx;
811 u8 lut_type, hash_type;
814 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
815 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
816 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
818 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
820 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
824 ctx->info.q_opt_rss = ((lut_type <<
825 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
826 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
828 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
830 /* Preserve existing queueing option setting */
831 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
832 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
833 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
834 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
836 ctx->info.valid_sections =
837 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
839 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
841 dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
842 status, ice_aq_str(hw->adminq.sq_last_status));
843 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
845 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
850 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
851 u64 hash_flds = ICE_HASH_INVALID;
853 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
855 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
860 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
862 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
863 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
864 vsi->vsi_num, v_ret);
869 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
871 /* We just ignore -ENOENT, because if two configurations
872 * share the same profile remove one of them actually
873 * removes both, since the profile is deleted.
875 if (status && status != -ENOENT) {
876 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
877 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
884 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
888 * ice_vc_config_rss_key
889 * @vf: pointer to the VF info
890 * @msg: pointer to the msg buffer
892 * Configure the VF's RSS key
894 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
896 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
897 struct virtchnl_rss_key *vrk =
898 (struct virtchnl_rss_key *)msg;
901 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
902 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
906 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
907 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
911 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
912 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
916 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
917 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
921 vsi = ice_get_vf_vsi(vf);
923 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
927 if (ice_set_rss_key(vsi, vrk->key))
928 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
930 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
935 * ice_vc_config_rss_lut
936 * @vf: pointer to the VF info
937 * @msg: pointer to the msg buffer
939 * Configure the VF's RSS LUT
941 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
943 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
944 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
947 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
948 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
952 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
953 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
957 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
958 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
962 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
963 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
967 vsi = ice_get_vf_vsi(vf);
969 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
973 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
974 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
976 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
981 * ice_vc_cfg_promiscuous_mode_msg
982 * @vf: pointer to the VF info
983 * @msg: pointer to the msg buffer
985 * called from the VF to configure VF VSIs promiscuous mode
987 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
989 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
990 bool rm_promisc, alluni = false, allmulti = false;
991 struct virtchnl_promisc_info *info =
992 (struct virtchnl_promisc_info *)msg;
993 struct ice_vsi_vlan_ops *vlan_ops;
994 int mcast_err = 0, ucast_err = 0;
995 struct ice_pf *pf = vf->pf;
1000 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1001 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1005 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
1006 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1010 vsi = ice_get_vf_vsi(vf);
1012 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1016 dev = ice_pf_to_dev(pf);
1017 if (!ice_is_vf_trusted(vf)) {
1018 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1020 /* Leave v_ret alone, lie to the VF on purpose. */
1024 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1027 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1030 rm_promisc = !allmulti && !alluni;
1032 vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
1034 ret = vlan_ops->ena_rx_filtering(vsi);
1036 ret = vlan_ops->dis_rx_filtering(vsi);
1038 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
1039 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1043 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
1044 bool set_dflt_vsi = alluni || allmulti;
1046 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
1047 /* only attempt to set the default forwarding VSI if
1048 * it's not currently set
1050 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
1051 else if (!set_dflt_vsi &&
1052 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
1053 /* only attempt to free the default forwarding VSI if we
1056 ret = ice_clear_dflt_vsi(pf->first_sw);
1059 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
1060 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
1061 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1065 u8 mcast_m, ucast_m;
1067 if (ice_vf_is_port_vlan_ena(vf) ||
1068 ice_vsi_has_non_zero_vlans(vsi)) {
1069 mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
1070 ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
1072 mcast_m = ICE_MCAST_PROMISC_BITS;
1073 ucast_m = ICE_UCAST_PROMISC_BITS;
1077 ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
1079 ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
1082 mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
1084 mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
1086 if (ucast_err || mcast_err)
1087 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1092 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
1093 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
1095 else if (!allmulti &&
1096 test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
1098 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
1104 !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
1105 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
1108 test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
1110 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
1115 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1120 * ice_vc_get_stats_msg
1121 * @vf: pointer to the VF info
1122 * @msg: pointer to the msg buffer
1124 * called from the VF to get VSI stats
1126 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1128 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1129 struct virtchnl_queue_select *vqs =
1130 (struct virtchnl_queue_select *)msg;
1131 struct ice_eth_stats stats = { 0 };
1132 struct ice_vsi *vsi;
1134 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1135 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1139 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1140 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1144 vsi = ice_get_vf_vsi(vf);
1146 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1150 ice_update_eth_stats(vsi);
1152 stats = vsi->eth_stats;
1155 /* send the response to the VF */
1156 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1157 (u8 *)&stats, sizeof(stats));
1161 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
1162 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
1164 * Return true on successful validation, else false
1166 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
1168 if ((!vqs->rx_queues && !vqs->tx_queues) ||
1169 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
1170 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
1177 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
1178 * @vsi: VSI of the VF to configure
1179 * @q_idx: VF queue index used to determine the queue in the PF's space
1181 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1183 struct ice_hw *hw = &vsi->back->hw;
1184 u32 pfq = vsi->txq_map[q_idx];
1187 reg = rd32(hw, QINT_TQCTL(pfq));
1189 /* MSI-X index 0 in the VF's space is always for the OICR, which means
1190 * this is most likely a poll mode VF driver, so don't enable an
1191 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1193 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
1196 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
1200 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
1201 * @vsi: VSI of the VF to configure
1202 * @q_idx: VF queue index used to determine the queue in the PF's space
1204 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
1206 struct ice_hw *hw = &vsi->back->hw;
1207 u32 pfq = vsi->rxq_map[q_idx];
1210 reg = rd32(hw, QINT_RQCTL(pfq));
1212 /* MSI-X index 0 in the VF's space is always for the OICR, which means
1213 * this is most likely a poll mode VF driver, so don't enable an
1214 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
1216 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
1219 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
1224 * @vf: pointer to the VF info
1225 * @msg: pointer to the msg buffer
1227 * called from the VF to enable all or specific queue(s)
1229 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
1231 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1232 struct virtchnl_queue_select *vqs =
1233 (struct virtchnl_queue_select *)msg;
1234 struct ice_vsi *vsi;
1235 unsigned long q_map;
1238 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1239 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1243 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1244 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1248 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1249 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1253 vsi = ice_get_vf_vsi(vf);
1255 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1259 /* Enable only Rx rings, Tx rings were enabled by the FW when the
1260 * Tx queue group list was configured and the context bits were
1261 * programmed using ice_vsi_cfg_txqs
1263 q_map = vqs->rx_queues;
1264 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1265 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1266 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1270 /* Skip queue if enabled */
1271 if (test_bit(vf_q_id, vf->rxq_ena))
1274 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
1275 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
1276 vf_q_id, vsi->vsi_num);
1277 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1281 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
1282 set_bit(vf_q_id, vf->rxq_ena);
1285 q_map = vqs->tx_queues;
1286 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1287 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1288 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1292 /* Skip queue if enabled */
1293 if (test_bit(vf_q_id, vf->txq_ena))
1296 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
1297 set_bit(vf_q_id, vf->txq_ena);
1300 /* Set flag to indicate that queues are enabled */
1301 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
1302 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1305 /* send the response to the VF */
1306 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
1312 * @vf: pointer to the VF info
1313 * @msg: pointer to the msg buffer
1315 * called from the VF to disable all or specific
1318 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
1320 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1321 struct virtchnl_queue_select *vqs =
1322 (struct virtchnl_queue_select *)msg;
1323 struct ice_vsi *vsi;
1324 unsigned long q_map;
1327 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
1328 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
1329 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1333 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1334 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1338 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
1339 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1343 vsi = ice_get_vf_vsi(vf);
1345 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1349 if (vqs->tx_queues) {
1350 q_map = vqs->tx_queues;
1352 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1353 struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
1354 struct ice_txq_meta txq_meta = { 0 };
1356 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1357 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1361 if (!test_bit(vf_q_id, vf->txq_ena))
1362 dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
1363 vf_q_id, vsi->vsi_num);
1365 ice_fill_txq_meta(vsi, ring, &txq_meta);
1367 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
1369 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
1370 vf_q_id, vsi->vsi_num);
1371 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1375 /* Clear enabled queues flag */
1376 clear_bit(vf_q_id, vf->txq_ena);
1380 q_map = vqs->rx_queues;
1381 /* speed up Rx queue disable by batching them if possible */
1383 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
1384 if (ice_vsi_stop_all_rx_rings(vsi)) {
1385 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
1387 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1391 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
1393 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
1394 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
1395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1399 /* Skip queue if not enabled */
1400 if (!test_bit(vf_q_id, vf->rxq_ena))
1403 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
1405 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
1406 vf_q_id, vsi->vsi_num);
1407 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1411 /* Clear enabled queues flag */
1412 clear_bit(vf_q_id, vf->rxq_ena);
1416 /* Clear enabled queues flag */
1417 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
1418 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
1421 /* send the response to the VF */
1422 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
1428 * @vf: pointer to the VF info
1429 * @vsi: the VSI being configured
1430 * @vector_id: vector ID
1431 * @map: vector map for mapping vectors to queues
1432 * @q_vector: structure for interrupt vector
1433 * configure the IRQ to queue map
1436 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
1437 struct virtchnl_vector_map *map,
1438 struct ice_q_vector *q_vector)
1440 u16 vsi_q_id, vsi_q_id_idx;
1443 q_vector->num_ring_rx = 0;
1444 q_vector->num_ring_tx = 0;
1446 qmap = map->rxq_map;
1447 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1448 vsi_q_id = vsi_q_id_idx;
1450 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1451 return VIRTCHNL_STATUS_ERR_PARAM;
1453 q_vector->num_ring_rx++;
1454 q_vector->rx.itr_idx = map->rxitr_idx;
1455 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
1456 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
1457 q_vector->rx.itr_idx);
1460 qmap = map->txq_map;
1461 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
1462 vsi_q_id = vsi_q_id_idx;
1464 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
1465 return VIRTCHNL_STATUS_ERR_PARAM;
1467 q_vector->num_ring_tx++;
1468 q_vector->tx.itr_idx = map->txitr_idx;
1469 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
1470 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
1471 q_vector->tx.itr_idx);
1474 return VIRTCHNL_STATUS_SUCCESS;
1478 * ice_vc_cfg_irq_map_msg
1479 * @vf: pointer to the VF info
1480 * @msg: pointer to the msg buffer
1482 * called from the VF to configure the IRQ to queue map
1484 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
1486 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1487 u16 num_q_vectors_mapped, vsi_id, vector_id;
1488 struct virtchnl_irq_map_info *irqmap_info;
1489 struct virtchnl_vector_map *map;
1490 struct ice_pf *pf = vf->pf;
1491 struct ice_vsi *vsi;
1494 irqmap_info = (struct virtchnl_irq_map_info *)msg;
1495 num_q_vectors_mapped = irqmap_info->num_vectors;
1497 /* Check to make sure number of VF vectors mapped is not greater than
1498 * number of VF vectors originally allocated, and check that
1499 * there is actually at least a single VF queue vector mapped
1501 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1502 pf->vfs.num_msix_per < num_q_vectors_mapped ||
1503 !num_q_vectors_mapped) {
1504 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1508 vsi = ice_get_vf_vsi(vf);
1510 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1514 for (i = 0; i < num_q_vectors_mapped; i++) {
1515 struct ice_q_vector *q_vector;
1517 map = &irqmap_info->vecmap[i];
1519 vector_id = map->vector_id;
1520 vsi_id = map->vsi_id;
1521 /* vector_id is always 0-based for each VF, and can never be
1522 * larger than or equal to the max allowed interrupts per VF
1524 if (!(vector_id < pf->vfs.num_msix_per) ||
1525 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
1526 (!vector_id && (map->rxq_map || map->txq_map))) {
1527 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1531 /* No need to map VF miscellaneous or rogue vector */
1535 /* Subtract non queue vector from vector_id passed by VF
1536 * to get actual number of VSI queue vector array index
1538 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
1540 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1544 /* lookout for the invalid queue index */
1545 v_ret = (enum virtchnl_status_code)
1546 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
1552 /* send the response to the VF */
1553 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
1559 * @vf: pointer to the VF info
1560 * @msg: pointer to the msg buffer
1562 * called from the VF to configure the Rx/Tx queues
1564 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
1566 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1567 struct virtchnl_vsi_queue_config_info *qci =
1568 (struct virtchnl_vsi_queue_config_info *)msg;
1569 struct virtchnl_queue_pair_info *qpi;
1570 struct ice_pf *pf = vf->pf;
1571 struct ice_vsi *vsi;
1574 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1575 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1579 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
1580 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1584 vsi = ice_get_vf_vsi(vf);
1586 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1590 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
1591 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
1592 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
1593 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
1594 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1598 for (i = 0; i < qci->num_queue_pairs; i++) {
1599 qpi = &qci->qpair[i];
1600 if (qpi->txq.vsi_id != qci->vsi_id ||
1601 qpi->rxq.vsi_id != qci->vsi_id ||
1602 qpi->rxq.queue_id != qpi->txq.queue_id ||
1603 qpi->txq.headwb_enabled ||
1604 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
1605 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
1606 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
1607 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1611 q_idx = qpi->rxq.queue_id;
1613 /* make sure selected "q_idx" is in valid range of queues
1614 * for selected "vsi"
1616 if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
1617 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1621 /* copy Tx queue info from VF into VSI */
1622 if (qpi->txq.ring_len > 0) {
1623 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
1624 vsi->tx_rings[i]->count = qpi->txq.ring_len;
1625 if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
1626 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1631 /* copy Rx queue info from VF into VSI */
1632 if (qpi->rxq.ring_len > 0) {
1633 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
1635 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
1636 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
1638 if (qpi->rxq.databuffer_size != 0 &&
1639 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
1640 qpi->rxq.databuffer_size < 1024)) {
1641 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1644 vsi->rx_buf_len = qpi->rxq.databuffer_size;
1645 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
1646 if (qpi->rxq.max_pkt_size > max_frame_size ||
1647 qpi->rxq.max_pkt_size < 64) {
1648 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1652 vsi->max_frame = qpi->rxq.max_pkt_size;
1653 /* add space for the port VLAN since the VF driver is
1654 * not expected to account for it in the MTU
1657 if (ice_vf_is_port_vlan_ena(vf))
1658 vsi->max_frame += VLAN_HLEN;
1660 if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
1661 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1668 /* send the response to the VF */
1669 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
1674 * ice_can_vf_change_mac
1675 * @vf: pointer to the VF info
1677 * Return true if the VF is allowed to change its MAC filters, false otherwise
1679 static bool ice_can_vf_change_mac(struct ice_vf *vf)
1681 /* If the VF MAC address has been set administratively (via the
1682 * ndo_set_vf_mac command), then deny permission to the VF to
1683 * add/delete unicast MAC addresses, unless the VF is trusted
1685 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
1692 * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
1693 * @vc_ether_addr: used to extract the type
1696 ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
1698 return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
1702 * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
1703 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1706 ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
1708 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1710 return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
1714 * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
1715 * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
1717 * This function should only be called when the MAC address in
1718 * virtchnl_ether_addr is a valid unicast MAC
1721 ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
1723 u8 type = ice_vc_ether_addr_type(vc_ether_addr);
1725 return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
1729 * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
1731 * @vc_ether_addr: structure from VIRTCHNL with MAC to add
1734 ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1736 u8 *mac_addr = vc_ether_addr->addr;
1738 if (!is_valid_ether_addr(mac_addr))
1741 /* only allow legacy VF drivers to set the device and hardware MAC if it
1742 * is zero and allow new VF drivers to set the hardware MAC if the type
1743 * was correctly specified over VIRTCHNL
1745 if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
1746 is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
1747 ice_is_vc_addr_primary(vc_ether_addr)) {
1748 ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
1749 ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
1752 /* hardware and device MACs are already set, but its possible that the
1753 * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
1754 * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
1755 * away for the legacy VF driver case as it will be updated in the
1756 * delete flow for this case
1758 if (ice_is_vc_addr_legacy(vc_ether_addr)) {
1759 ether_addr_copy(vf->legacy_last_added_umac.addr,
1761 vf->legacy_last_added_umac.time_modified = jiffies;
1766 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
1767 * @vf: pointer to the VF info
1768 * @vsi: pointer to the VF's VSI
1769 * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
1772 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1773 struct virtchnl_ether_addr *vc_ether_addr)
1775 struct device *dev = ice_pf_to_dev(vf->pf);
1776 u8 *mac_addr = vc_ether_addr->addr;
1779 /* device MAC already added */
1780 if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
1783 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
1784 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
1788 ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1789 if (ret == -EEXIST) {
1790 dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
1792 /* don't return since we might need to update
1793 * the primary MAC in ice_vfhw_mac_add() below
1796 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
1797 mac_addr, vf->vf_id, ret);
1803 ice_vfhw_mac_add(vf, vc_ether_addr);
1809 * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
1810 * @last_added_umac: structure used to check expiration
1812 static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
1814 #define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
1815 return time_is_before_jiffies(last_added_umac->time_modified +
1816 ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
1820 * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
1822 * @vc_ether_addr: structure from VIRTCHNL with MAC to check
1824 * only update cached hardware MAC for legacy VF drivers on delete
1825 * because we cannot guarantee order/type of MAC from the VF driver
1828 ice_update_legacy_cached_mac(struct ice_vf *vf,
1829 struct virtchnl_ether_addr *vc_ether_addr)
1831 if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
1832 ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
1835 ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
1836 ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
1840 * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
1842 * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
1845 ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
1847 u8 *mac_addr = vc_ether_addr->addr;
1849 if (!is_valid_ether_addr(mac_addr) ||
1850 !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1853 /* allow the device MAC to be repopulated in the add flow and don't
1854 * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
1855 * to be persistent on VM reboot and across driver unload/load, which
1856 * won't work if we clear the hardware MAC here
1858 eth_zero_addr(vf->dev_lan_addr.addr);
1860 ice_update_legacy_cached_mac(vf, vc_ether_addr);
1864 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
1865 * @vf: pointer to the VF info
1866 * @vsi: pointer to the VF's VSI
1867 * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
1870 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
1871 struct virtchnl_ether_addr *vc_ether_addr)
1873 struct device *dev = ice_pf_to_dev(vf->pf);
1874 u8 *mac_addr = vc_ether_addr->addr;
1877 if (!ice_can_vf_change_mac(vf) &&
1878 ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
1881 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
1882 if (status == -ENOENT) {
1883 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
1886 } else if (status) {
1887 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
1888 mac_addr, vf->vf_id, status);
1892 ice_vfhw_mac_del(vf, vc_ether_addr);
1900 * ice_vc_handle_mac_addr_msg
1901 * @vf: pointer to the VF info
1902 * @msg: pointer to the msg buffer
1903 * @set: true if MAC filters are being set, false otherwise
1905 * add guest MAC address filter
1908 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
1910 int (*ice_vc_cfg_mac)
1911 (struct ice_vf *vf, struct ice_vsi *vsi,
1912 struct virtchnl_ether_addr *virtchnl_ether_addr);
1913 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1914 struct virtchnl_ether_addr_list *al =
1915 (struct virtchnl_ether_addr_list *)msg;
1916 struct ice_pf *pf = vf->pf;
1917 enum virtchnl_ops vc_op;
1918 struct ice_vsi *vsi;
1922 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
1923 ice_vc_cfg_mac = ice_vc_add_mac_addr;
1925 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
1926 ice_vc_cfg_mac = ice_vc_del_mac_addr;
1929 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
1930 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
1931 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1932 goto handle_mac_exit;
1935 /* If this VF is not privileged, then we can't add more than a
1936 * limited number of addresses. Check to make sure that the
1937 * additions do not push us over the limit.
1939 if (set && !ice_is_vf_trusted(vf) &&
1940 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
1941 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
1943 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1944 goto handle_mac_exit;
1947 vsi = ice_get_vf_vsi(vf);
1949 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1950 goto handle_mac_exit;
1953 for (i = 0; i < al->num_elements; i++) {
1954 u8 *mac_addr = al->list[i].addr;
1957 if (is_broadcast_ether_addr(mac_addr) ||
1958 is_zero_ether_addr(mac_addr))
1961 result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
1962 if (result == -EEXIST || result == -ENOENT) {
1964 } else if (result) {
1965 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1966 goto handle_mac_exit;
1971 /* send the response to the VF */
1972 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
1976 * ice_vc_add_mac_addr_msg
1977 * @vf: pointer to the VF info
1978 * @msg: pointer to the msg buffer
1980 * add guest MAC address filter
1982 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1984 return ice_vc_handle_mac_addr_msg(vf, msg, true);
1988 * ice_vc_del_mac_addr_msg
1989 * @vf: pointer to the VF info
1990 * @msg: pointer to the msg buffer
1992 * remove guest MAC address filter
1994 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
1996 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2000 * ice_vc_request_qs_msg
2001 * @vf: pointer to the VF info
2002 * @msg: pointer to the msg buffer
2004 * VFs get a default number of queues but can use this message to request a
2005 * different number. If the request is successful, PF will reset the VF and
2006 * return 0. If unsuccessful, PF will send message informing VF of number of
2007 * available queue pairs via virtchnl message response to VF.
2009 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2011 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2012 struct virtchnl_vf_res_request *vfres =
2013 (struct virtchnl_vf_res_request *)msg;
2014 u16 req_queues = vfres->num_queue_pairs;
2015 struct ice_pf *pf = vf->pf;
2016 u16 max_allowed_vf_queues;
2017 u16 tx_rx_queue_left;
2021 dev = ice_pf_to_dev(pf);
2022 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2023 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2027 cur_queues = vf->num_vf_qs;
2028 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2029 ice_get_avail_rxq_count(pf));
2030 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2032 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2034 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2035 dev_err(dev, "VF %d tried to request more than %d queues.\n",
2036 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2037 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2038 } else if (req_queues > cur_queues &&
2039 req_queues - cur_queues > tx_rx_queue_left) {
2040 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2041 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2042 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2043 ICE_MAX_RSS_QS_PER_VF);
2045 /* request is successful, then reset VF */
2046 vf->num_req_qs = req_queues;
2047 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
2048 dev_info(dev, "VF %d granted request of %u queues.\n",
2049 vf->vf_id, req_queues);
2054 /* send the response to the VF */
2055 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2056 v_ret, (u8 *)vfres, sizeof(*vfres));
2060 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2061 * @caps: VF driver negotiated capabilities
2063 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2065 static bool ice_vf_vlan_offload_ena(u32 caps)
2067 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2071 * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
2072 * @vf: VF used to determine if VLAN promiscuous config is allowed
2074 static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
2076 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2077 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
2078 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
2085 * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
2086 * @vsi: VF's VSI used to enable VLAN promiscuous mode
2087 * @vlan: VLAN used to enable VLAN promiscuous
2089 * This function should only be called if VLAN promiscuous mode is allowed,
2090 * which can be determined via ice_is_vlan_promisc_allowed().
2092 static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2094 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2097 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2099 if (status && status != -EEXIST)
2106 * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
2107 * @vsi: VF's VSI used to disable VLAN promiscuous mode for
2108 * @vlan: VLAN used to disable VLAN promiscuous
2110 * This function should only be called if VLAN promiscuous mode is allowed,
2111 * which can be determined via ice_is_vlan_promisc_allowed().
2113 static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
2115 u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
2118 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
2120 if (status && status != -ENOENT)
2127 * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
2128 * @vf: VF to check against
2131 * If the VF is trusted then the VF is allowed to add as many VLANs as it
2132 * wants to, so return false.
2134 * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
2135 * allowed VLANs for an untrusted VF. Return the result of this comparison.
2137 static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
2139 if (ice_is_vf_trusted(vf))
2142 #define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
2143 return ((ice_vsi_num_non_zero_vlans(vsi) +
2144 ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
2148 * ice_vc_process_vlan_msg
2149 * @vf: pointer to the VF info
2150 * @msg: pointer to the msg buffer
2151 * @add_v: Add VLAN if true, otherwise delete VLAN
2153 * Process virtchnl op to add or remove programmed guest VLAN ID
2155 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2157 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2158 struct virtchnl_vlan_filter_list *vfl =
2159 (struct virtchnl_vlan_filter_list *)msg;
2160 struct ice_pf *pf = vf->pf;
2161 bool vlan_promisc = false;
2162 struct ice_vsi *vsi;
2167 dev = ice_pf_to_dev(pf);
2168 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2169 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2173 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2174 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2178 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2179 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2183 for (i = 0; i < vfl->num_elements; i++) {
2184 if (vfl->vlan_id[i] >= VLAN_N_VID) {
2185 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2186 dev_err(dev, "invalid VF VLAN id %d\n",
2192 vsi = ice_get_vf_vsi(vf);
2194 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2198 if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
2199 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2201 /* There is no need to let VF know about being not trusted,
2202 * so we can just return success message here
2207 /* in DVM a VF can add/delete inner VLAN filters when
2208 * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
2210 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
2211 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2215 /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
2216 * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
2217 * allow vlan_promisc = true in SVM and if no port VLAN is configured
2219 vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
2220 !ice_is_dvm_ena(&pf->hw) &&
2221 !ice_vf_is_port_vlan_ena(vf);
2224 for (i = 0; i < vfl->num_elements; i++) {
2225 u16 vid = vfl->vlan_id[i];
2226 struct ice_vlan vlan;
2228 if (ice_vf_has_max_vlans(vf, vsi)) {
2229 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2231 /* There is no need to let VF know about being
2232 * not trusted, so we can just return success
2233 * message here as well.
2238 /* we add VLAN 0 by default for each VF so we can enable
2239 * Tx VLAN anti-spoof without triggering MDD events so
2240 * we don't need to add it again here
2245 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2246 status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
2248 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2252 /* Enable VLAN filtering on first non-zero VLAN */
2253 if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
2254 if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
2255 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2256 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2260 } else if (vlan_promisc) {
2261 status = ice_vf_ena_vlan_promisc(vsi, &vlan);
2263 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2264 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2270 /* In case of non_trusted VF, number of VLAN elements passed
2271 * to PF for removal might be greater than number of VLANs
2272 * filter programmed for that VF - So, use actual number of
2273 * VLANS added earlier with add VLAN opcode. In order to avoid
2274 * removing VLAN that doesn't exist, which result to sending
2275 * erroneous failed message back to the VF
2279 num_vf_vlan = vsi->num_vlan;
2280 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2281 u16 vid = vfl->vlan_id[i];
2282 struct ice_vlan vlan;
2284 /* we add VLAN 0 by default for each VF so we can enable
2285 * Tx VLAN anti-spoof without triggering MDD events so
2286 * we don't want a VIRTCHNL request to remove it
2291 vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
2292 status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
2294 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2298 /* Disable VLAN filtering when only VLAN 0 is left */
2299 if (!ice_vsi_has_non_zero_vlans(vsi))
2300 vsi->inner_vlan_ops.dis_rx_filtering(vsi);
2303 ice_vf_dis_vlan_promisc(vsi, &vlan);
2308 /* send the response to the VF */
2310 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2313 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2318 * ice_vc_add_vlan_msg
2319 * @vf: pointer to the VF info
2320 * @msg: pointer to the msg buffer
2322 * Add and program guest VLAN ID
2324 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2326 return ice_vc_process_vlan_msg(vf, msg, true);
2330 * ice_vc_remove_vlan_msg
2331 * @vf: pointer to the VF info
2332 * @msg: pointer to the msg buffer
2334 * remove programmed guest VLAN ID
2336 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2338 return ice_vc_process_vlan_msg(vf, msg, false);
2342 * ice_vc_ena_vlan_stripping
2343 * @vf: pointer to the VF info
2345 * Enable VLAN header stripping for a given VF
2347 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2349 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2350 struct ice_vsi *vsi;
2352 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2353 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2357 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2358 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2362 vsi = ice_get_vf_vsi(vf);
2363 if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
2364 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2372 * ice_vc_dis_vlan_stripping
2373 * @vf: pointer to the VF info
2375 * Disable VLAN header stripping for a given VF
2377 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
2379 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2380 struct ice_vsi *vsi;
2382 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2383 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2387 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2388 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2392 vsi = ice_get_vf_vsi(vf);
2394 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2398 if (vsi->inner_vlan_ops.dis_stripping(vsi))
2399 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2402 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2407 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
2408 * @vf: VF to enable/disable VLAN stripping for on initialization
2410 * Set the default for VLAN stripping based on whether a port VLAN is configured
2411 * and the current VLAN mode of the device.
2413 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
2415 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
2420 /* don't modify stripping if port VLAN is configured in SVM since the
2421 * port VLAN is based on the inner/single VLAN in SVM
2423 if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
2426 if (ice_vf_vlan_offload_ena(vf->driver_caps))
2427 return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
2429 return vsi->inner_vlan_ops.dis_stripping(vsi);
2432 static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
2437 return ICE_MAX_VLAN_PER_VF;
2441 * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
2442 * @vf: VF that being checked for
2444 * When the device is in double VLAN mode, check whether or not the outer VLAN
2447 static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
2449 if (ice_vf_is_port_vlan_ena(vf))
2456 * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
2457 * @vf: VF that capabilities are being set for
2458 * @caps: VLAN capabilities to populate
2460 * Determine VLAN capabilities support based on whether a port VLAN is
2461 * configured. If a port VLAN is configured then the VF should use the inner
2462 * filtering/offload capabilities since the port VLAN is using the outer VLAN
2466 ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2468 struct virtchnl_vlan_supported_caps *supported_caps;
2470 if (ice_vf_outer_vlan_not_allowed(vf)) {
2471 /* until support for inner VLAN filtering is added when a port
2472 * VLAN is configured, only support software offloaded inner
2473 * VLANs when a port VLAN is confgured in DVM
2475 supported_caps = &caps->filtering.filtering_support;
2476 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2478 supported_caps = &caps->offloads.stripping_support;
2479 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2480 VIRTCHNL_VLAN_TOGGLE |
2481 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2482 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2484 supported_caps = &caps->offloads.insertion_support;
2485 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2486 VIRTCHNL_VLAN_TOGGLE |
2487 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2488 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2490 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2491 caps->offloads.ethertype_match =
2492 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2494 supported_caps = &caps->filtering.filtering_support;
2495 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2496 supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2497 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2498 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2499 VIRTCHNL_VLAN_ETHERTYPE_AND;
2500 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2501 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2502 VIRTCHNL_VLAN_ETHERTYPE_9100;
2504 supported_caps = &caps->offloads.stripping_support;
2505 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2506 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2507 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2508 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2509 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2510 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2511 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2512 VIRTCHNL_VLAN_ETHERTYPE_XOR |
2513 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
2515 supported_caps = &caps->offloads.insertion_support;
2516 supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
2517 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2518 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2519 supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
2520 VIRTCHNL_VLAN_ETHERTYPE_8100 |
2521 VIRTCHNL_VLAN_ETHERTYPE_88A8 |
2522 VIRTCHNL_VLAN_ETHERTYPE_9100 |
2523 VIRTCHNL_VLAN_ETHERTYPE_XOR |
2524 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
2526 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2528 caps->offloads.ethertype_match =
2529 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2532 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2536 * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
2537 * @vf: VF that capabilities are being set for
2538 * @caps: VLAN capabilities to populate
2540 * Determine VLAN capabilities support based on whether a port VLAN is
2541 * configured. If a port VLAN is configured then the VF does not have any VLAN
2542 * filtering or offload capabilities since the port VLAN is using the inner VLAN
2543 * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
2544 * VLAN fitlering and offload capabilities.
2547 ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
2549 struct virtchnl_vlan_supported_caps *supported_caps;
2551 if (ice_vf_is_port_vlan_ena(vf)) {
2552 supported_caps = &caps->filtering.filtering_support;
2553 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2554 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2556 supported_caps = &caps->offloads.stripping_support;
2557 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2558 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2560 supported_caps = &caps->offloads.insertion_support;
2561 supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
2562 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2564 caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
2565 caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
2566 caps->filtering.max_filters = 0;
2568 supported_caps = &caps->filtering.filtering_support;
2569 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
2570 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2571 caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2573 supported_caps = &caps->offloads.stripping_support;
2574 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2575 VIRTCHNL_VLAN_TOGGLE |
2576 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2577 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2579 supported_caps = &caps->offloads.insertion_support;
2580 supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
2581 VIRTCHNL_VLAN_TOGGLE |
2582 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
2583 supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
2585 caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
2586 caps->offloads.ethertype_match =
2587 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
2588 caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
2593 * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
2594 * @vf: VF to determine VLAN capabilities for
2596 * This will only be called if the VF and PF successfully negotiated
2597 * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2599 * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
2600 * is configured or not.
2602 static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
2604 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2605 struct virtchnl_vlan_caps *caps = NULL;
2608 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2609 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2613 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
2615 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2618 len = sizeof(*caps);
2620 if (ice_is_dvm_ena(&vf->pf->hw))
2621 ice_vc_set_dvm_caps(vf, caps);
2623 ice_vc_set_svm_caps(vf, caps);
2625 /* store negotiated caps to prevent invalid VF messages */
2626 memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
2629 err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
2630 v_ret, (u8 *)caps, len);
2636 * ice_vc_validate_vlan_tpid - validate VLAN TPID
2637 * @filtering_caps: negotiated/supported VLAN filtering capabilities
2638 * @tpid: VLAN TPID used for validation
2640 * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
2641 * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
2643 static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
2645 enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
2649 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
2652 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
2655 vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
2659 if (!(filtering_caps & vlan_ethertype))
2666 * ice_vc_is_valid_vlan - validate the virtchnl_vlan
2667 * @vc_vlan: virtchnl_vlan to validate
2669 * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
2670 * false. Otherwise return true.
2672 static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
2674 if (!vc_vlan->tci || !vc_vlan->tpid)
2681 * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
2682 * @vfc: negotiated/supported VLAN filtering capabilities
2683 * @vfl: VLAN filter list from VF to validate
2685 * Validate all of the filters in the VLAN filter list from the VF. If any of
2686 * the checks fail then return false. Otherwise return true.
2689 ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
2690 struct virtchnl_vlan_filter_list_v2 *vfl)
2694 if (!vfl->num_elements)
2697 for (i = 0; i < vfl->num_elements; i++) {
2698 struct virtchnl_vlan_supported_caps *filtering_support =
2699 &vfc->filtering_support;
2700 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2701 struct virtchnl_vlan *outer = &vlan_fltr->outer;
2702 struct virtchnl_vlan *inner = &vlan_fltr->inner;
2704 if ((ice_vc_is_valid_vlan(outer) &&
2705 filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
2706 (ice_vc_is_valid_vlan(inner) &&
2707 filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
2710 if ((outer->tci_mask &&
2711 !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
2713 !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
2716 if (((outer->tci & VLAN_PRIO_MASK) &&
2717 !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
2718 ((inner->tci & VLAN_PRIO_MASK) &&
2719 !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
2722 if ((ice_vc_is_valid_vlan(outer) &&
2723 !ice_vc_validate_vlan_tpid(filtering_support->outer,
2725 (ice_vc_is_valid_vlan(inner) &&
2726 !ice_vc_validate_vlan_tpid(filtering_support->inner,
2735 * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
2736 * @vc_vlan: struct virtchnl_vlan to transform
2738 static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
2740 struct ice_vlan vlan = { 0 };
2742 vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2743 vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
2744 vlan.tpid = vc_vlan->tpid;
2750 * ice_vc_vlan_action - action to perform on the virthcnl_vlan
2751 * @vsi: VF's VSI used to perform the action
2752 * @vlan_action: function to perform the action with (i.e. add/del)
2753 * @vlan: VLAN filter to perform the action with
2756 ice_vc_vlan_action(struct ice_vsi *vsi,
2757 int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
2758 struct ice_vlan *vlan)
2762 err = vlan_action(vsi, vlan);
2770 * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
2771 * @vf: VF used to delete the VLAN(s)
2772 * @vsi: VF's VSI used to delete the VLAN(s)
2773 * @vfl: virthchnl filter list used to delete the filters
2776 ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2777 struct virtchnl_vlan_filter_list_v2 *vfl)
2779 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2783 for (i = 0; i < vfl->num_elements; i++) {
2784 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2785 struct virtchnl_vlan *vc_vlan;
2787 vc_vlan = &vlan_fltr->outer;
2788 if (ice_vc_is_valid_vlan(vc_vlan)) {
2789 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2791 err = ice_vc_vlan_action(vsi,
2792 vsi->outer_vlan_ops.del_vlan,
2798 ice_vf_dis_vlan_promisc(vsi, &vlan);
2801 vc_vlan = &vlan_fltr->inner;
2802 if (ice_vc_is_valid_vlan(vc_vlan)) {
2803 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2805 err = ice_vc_vlan_action(vsi,
2806 vsi->inner_vlan_ops.del_vlan,
2811 /* no support for VLAN promiscuous on inner VLAN unless
2812 * we are in Single VLAN Mode (SVM)
2814 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc)
2815 ice_vf_dis_vlan_promisc(vsi, &vlan);
2823 * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
2824 * @vf: VF the message was received from
2825 * @msg: message received from the VF
2827 static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2829 struct virtchnl_vlan_filter_list_v2 *vfl =
2830 (struct virtchnl_vlan_filter_list_v2 *)msg;
2831 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2832 struct ice_vsi *vsi;
2834 if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
2836 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2840 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2841 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2845 vsi = ice_get_vf_vsi(vf);
2847 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2851 if (ice_vc_del_vlans(vf, vsi, vfl))
2852 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2855 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
2860 * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
2861 * @vf: VF used to add the VLAN(s)
2862 * @vsi: VF's VSI used to add the VLAN(s)
2863 * @vfl: virthchnl filter list used to add the filters
2866 ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
2867 struct virtchnl_vlan_filter_list_v2 *vfl)
2869 bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
2873 for (i = 0; i < vfl->num_elements; i++) {
2874 struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
2875 struct virtchnl_vlan *vc_vlan;
2877 vc_vlan = &vlan_fltr->outer;
2878 if (ice_vc_is_valid_vlan(vc_vlan)) {
2879 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2881 err = ice_vc_vlan_action(vsi,
2882 vsi->outer_vlan_ops.add_vlan,
2888 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2894 vc_vlan = &vlan_fltr->inner;
2895 if (ice_vc_is_valid_vlan(vc_vlan)) {
2896 struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
2898 err = ice_vc_vlan_action(vsi,
2899 vsi->inner_vlan_ops.add_vlan,
2904 /* no support for VLAN promiscuous on inner VLAN unless
2905 * we are in Single VLAN Mode (SVM)
2907 if (!ice_is_dvm_ena(&vsi->back->hw) && vlan_promisc) {
2908 err = ice_vf_ena_vlan_promisc(vsi, &vlan);
2919 * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
2920 * @vsi: VF VSI used to get number of existing VLAN filters
2921 * @vfc: negotiated/supported VLAN filtering capabilities
2922 * @vfl: VLAN filter list from VF to validate
2924 * Validate all of the filters in the VLAN filter list from the VF during the
2925 * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
2926 * Otherwise return true.
2929 ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
2930 struct virtchnl_vlan_filtering_caps *vfc,
2931 struct virtchnl_vlan_filter_list_v2 *vfl)
2933 u16 num_requested_filters = vsi->num_vlan + vfl->num_elements;
2935 if (num_requested_filters > vfc->max_filters)
2938 return ice_vc_validate_vlan_filter_list(vfc, vfl);
2942 * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
2943 * @vf: VF the message was received from
2944 * @msg: message received from the VF
2946 static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
2948 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2949 struct virtchnl_vlan_filter_list_v2 *vfl =
2950 (struct virtchnl_vlan_filter_list_v2 *)msg;
2951 struct ice_vsi *vsi;
2953 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2954 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2958 if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
2959 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2963 vsi = ice_get_vf_vsi(vf);
2965 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2969 if (!ice_vc_validate_add_vlan_filter_list(vsi,
2970 &vf->vlan_v2_caps.filtering,
2972 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2976 if (ice_vc_add_vlans(vf, vsi, vfl))
2977 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2980 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
2985 * ice_vc_valid_vlan_setting - validate VLAN setting
2986 * @negotiated_settings: negotiated VLAN settings during VF init
2987 * @ethertype_setting: ethertype(s) requested for the VLAN setting
2990 ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
2992 if (ethertype_setting && !(negotiated_settings & ethertype_setting))
2995 /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
2996 * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
2998 if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
2999 hweight32(ethertype_setting) > 1)
3002 /* ability to modify the VLAN setting was not negotiated */
3003 if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
3010 * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
3011 * @caps: negotiated VLAN settings during VF init
3012 * @msg: message to validate
3014 * Used to validate any VLAN virtchnl message sent as a
3015 * virtchnl_vlan_setting structure. Validates the message against the
3016 * negotiated/supported caps during VF driver init.
3019 ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
3020 struct virtchnl_vlan_setting *msg)
3022 if ((!msg->outer_ethertype_setting &&
3023 !msg->inner_ethertype_setting) ||
3024 (!caps->outer && !caps->inner))
3027 if (msg->outer_ethertype_setting &&
3028 !ice_vc_valid_vlan_setting(caps->outer,
3029 msg->outer_ethertype_setting))
3032 if (msg->inner_ethertype_setting &&
3033 !ice_vc_valid_vlan_setting(caps->inner,
3034 msg->inner_ethertype_setting))
3041 * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
3042 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
3043 * @tpid: VLAN TPID to populate
3045 static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
3047 switch (ethertype_setting) {
3048 case VIRTCHNL_VLAN_ETHERTYPE_8100:
3049 *tpid = ETH_P_8021Q;
3051 case VIRTCHNL_VLAN_ETHERTYPE_88A8:
3052 *tpid = ETH_P_8021AD;
3054 case VIRTCHNL_VLAN_ETHERTYPE_9100:
3055 *tpid = ETH_P_QINQ1;
3066 * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
3067 * @vsi: VF's VSI used to enable the VLAN offload
3068 * @ena_offload: function used to enable the VLAN offload
3069 * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
3072 ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
3073 int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
3074 u32 ethertype_setting)
3079 err = ice_vc_get_tpid(ethertype_setting, &tpid);
3083 err = ena_offload(vsi, tpid);
3090 #define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
3091 #define ICE_L2TSEL_BIT_OFFSET 23
3093 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
3094 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
3098 * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
3099 * @vsi: VSI used to update l2tsel on
3100 * @l2tsel: l2tsel setting requested
3102 * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
3103 * This will modify which descriptor field the first offloaded VLAN will be
3106 static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
3108 struct ice_hw *hw = &vsi->back->hw;
3112 if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
3115 l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
3117 for (i = 0; i < vsi->alloc_rxq; i++) {
3118 u16 pfq = vsi->rxq_map[i];
3119 u32 qrx_context_offset;
3122 qrx_context_offset =
3123 QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
3125 regval = rd32(hw, qrx_context_offset);
3126 regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
3127 regval |= l2tsel_bit;
3128 wr32(hw, qrx_context_offset, regval);
3133 * ice_vc_ena_vlan_stripping_v2_msg
3134 * @vf: VF the message was received from
3135 * @msg: message received from the VF
3137 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
3139 static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3141 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3142 struct virtchnl_vlan_supported_caps *stripping_support;
3143 struct virtchnl_vlan_setting *strip_msg =
3144 (struct virtchnl_vlan_setting *)msg;
3145 u32 ethertype_setting;
3146 struct ice_vsi *vsi;
3148 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3149 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3153 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3154 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3158 vsi = ice_get_vf_vsi(vf);
3160 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3164 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3165 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3166 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3170 ethertype_setting = strip_msg->outer_ethertype_setting;
3171 if (ethertype_setting) {
3172 if (ice_vc_ena_vlan_offload(vsi,
3173 vsi->outer_vlan_ops.ena_stripping,
3174 ethertype_setting)) {
3175 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3178 enum ice_l2tsel l2tsel =
3179 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
3181 /* PF tells the VF that the outer VLAN tag is always
3182 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3183 * inner is always extracted to
3184 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3185 * support outer stripping so the first tag always ends
3186 * up in L2TAG2_2ND and the second/inner tag, if
3187 * enabled, is extracted in L2TAG1.
3189 ice_vsi_update_l2tsel(vsi, l2tsel);
3193 ethertype_setting = strip_msg->inner_ethertype_setting;
3194 if (ethertype_setting &&
3195 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
3196 ethertype_setting)) {
3197 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3202 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
3207 * ice_vc_dis_vlan_stripping_v2_msg
3208 * @vf: VF the message was received from
3209 * @msg: message received from the VF
3211 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
3213 static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
3215 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3216 struct virtchnl_vlan_supported_caps *stripping_support;
3217 struct virtchnl_vlan_setting *strip_msg =
3218 (struct virtchnl_vlan_setting *)msg;
3219 u32 ethertype_setting;
3220 struct ice_vsi *vsi;
3222 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3223 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3227 if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
3228 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3232 vsi = ice_get_vf_vsi(vf);
3234 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3238 stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
3239 if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
3240 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3244 ethertype_setting = strip_msg->outer_ethertype_setting;
3245 if (ethertype_setting) {
3246 if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
3247 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3250 enum ice_l2tsel l2tsel =
3251 ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
3253 /* PF tells the VF that the outer VLAN tag is always
3254 * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
3255 * inner is always extracted to
3256 * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
3257 * support inner stripping while outer stripping is
3258 * disabled so that the first and only tag is extracted
3261 ice_vsi_update_l2tsel(vsi, l2tsel);
3265 ethertype_setting = strip_msg->inner_ethertype_setting;
3266 if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
3267 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3272 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
3277 * ice_vc_ena_vlan_insertion_v2_msg
3278 * @vf: VF the message was received from
3279 * @msg: message received from the VF
3281 * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
3283 static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3285 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3286 struct virtchnl_vlan_supported_caps *insertion_support;
3287 struct virtchnl_vlan_setting *insertion_msg =
3288 (struct virtchnl_vlan_setting *)msg;
3289 u32 ethertype_setting;
3290 struct ice_vsi *vsi;
3292 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3293 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3297 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3298 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3302 vsi = ice_get_vf_vsi(vf);
3304 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3308 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3309 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3310 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3314 ethertype_setting = insertion_msg->outer_ethertype_setting;
3315 if (ethertype_setting &&
3316 ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
3317 ethertype_setting)) {
3318 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3322 ethertype_setting = insertion_msg->inner_ethertype_setting;
3323 if (ethertype_setting &&
3324 ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
3325 ethertype_setting)) {
3326 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3331 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
3336 * ice_vc_dis_vlan_insertion_v2_msg
3337 * @vf: VF the message was received from
3338 * @msg: message received from the VF
3340 * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
3342 static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
3344 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3345 struct virtchnl_vlan_supported_caps *insertion_support;
3346 struct virtchnl_vlan_setting *insertion_msg =
3347 (struct virtchnl_vlan_setting *)msg;
3348 u32 ethertype_setting;
3349 struct ice_vsi *vsi;
3351 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3352 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3356 if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
3357 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3361 vsi = ice_get_vf_vsi(vf);
3363 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3367 insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
3368 if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
3369 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3373 ethertype_setting = insertion_msg->outer_ethertype_setting;
3374 if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
3375 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3379 ethertype_setting = insertion_msg->inner_ethertype_setting;
3380 if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
3381 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3386 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
3390 static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
3391 .get_ver_msg = ice_vc_get_ver_msg,
3392 .get_vf_res_msg = ice_vc_get_vf_res_msg,
3393 .reset_vf = ice_vc_reset_vf_msg,
3394 .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
3395 .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
3396 .cfg_qs_msg = ice_vc_cfg_qs_msg,
3397 .ena_qs_msg = ice_vc_ena_qs_msg,
3398 .dis_qs_msg = ice_vc_dis_qs_msg,
3399 .request_qs_msg = ice_vc_request_qs_msg,
3400 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3401 .config_rss_key = ice_vc_config_rss_key,
3402 .config_rss_lut = ice_vc_config_rss_lut,
3403 .get_stats_msg = ice_vc_get_stats_msg,
3404 .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
3405 .add_vlan_msg = ice_vc_add_vlan_msg,
3406 .remove_vlan_msg = ice_vc_remove_vlan_msg,
3407 .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
3408 .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
3409 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3410 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3411 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3412 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3413 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3414 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3415 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3416 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3417 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3418 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3422 * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
3423 * @vf: the VF to switch ops
3425 void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
3427 vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
3431 * ice_vc_repr_add_mac
3432 * @vf: pointer to VF
3433 * @msg: virtchannel message
3435 * When port representors are created, we do not add MAC rule
3436 * to firmware, we store it so that PF could report same
3439 static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
3441 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3442 struct virtchnl_ether_addr_list *al =
3443 (struct virtchnl_ether_addr_list *)msg;
3444 struct ice_vsi *vsi;
3448 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3449 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3450 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3451 goto handle_mac_exit;
3456 vsi = ice_get_vf_vsi(vf);
3458 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3459 goto handle_mac_exit;
3462 for (i = 0; i < al->num_elements; i++) {
3463 u8 *mac_addr = al->list[i].addr;
3466 if (!is_unicast_ether_addr(mac_addr) ||
3467 ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
3470 if (vf->pf_set_mac) {
3471 dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
3472 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
3473 goto handle_mac_exit;
3476 result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
3478 dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
3479 mac_addr, vf->vf_id, result);
3480 goto handle_mac_exit;
3483 ice_vfhw_mac_add(vf, &al->list[i]);
3489 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
3494 * ice_vc_repr_del_mac - response with success for deleting MAC
3495 * @vf: pointer to VF
3496 * @msg: virtchannel message
3498 * Respond with success to not break normal VF flow.
3499 * For legacy VF driver try to update cached MAC address.
3502 ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
3504 struct virtchnl_ether_addr_list *al =
3505 (struct virtchnl_ether_addr_list *)msg;
3507 ice_update_legacy_cached_mac(vf, &al->list[0]);
3509 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
3510 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3513 static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
3515 dev_dbg(ice_pf_to_dev(vf->pf),
3516 "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
3517 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
3518 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3521 static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
3523 dev_dbg(ice_pf_to_dev(vf->pf),
3524 "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
3525 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
3526 VIRTCHNL_STATUS_SUCCESS, NULL, 0);
3529 static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
3531 dev_dbg(ice_pf_to_dev(vf->pf),
3532 "Can't enable VLAN stripping in switchdev mode for VF %d\n",
3534 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3535 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3539 static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
3541 dev_dbg(ice_pf_to_dev(vf->pf),
3542 "Can't disable VLAN stripping in switchdev mode for VF %d\n",
3544 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3545 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3550 ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
3552 dev_dbg(ice_pf_to_dev(vf->pf),
3553 "Can't config promiscuous mode in switchdev mode for VF %d\n",
3555 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3556 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3560 static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
3561 .get_ver_msg = ice_vc_get_ver_msg,
3562 .get_vf_res_msg = ice_vc_get_vf_res_msg,
3563 .reset_vf = ice_vc_reset_vf_msg,
3564 .add_mac_addr_msg = ice_vc_repr_add_mac,
3565 .del_mac_addr_msg = ice_vc_repr_del_mac,
3566 .cfg_qs_msg = ice_vc_cfg_qs_msg,
3567 .ena_qs_msg = ice_vc_ena_qs_msg,
3568 .dis_qs_msg = ice_vc_dis_qs_msg,
3569 .request_qs_msg = ice_vc_request_qs_msg,
3570 .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
3571 .config_rss_key = ice_vc_config_rss_key,
3572 .config_rss_lut = ice_vc_config_rss_lut,
3573 .get_stats_msg = ice_vc_get_stats_msg,
3574 .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
3575 .add_vlan_msg = ice_vc_repr_add_vlan,
3576 .remove_vlan_msg = ice_vc_repr_del_vlan,
3577 .ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping,
3578 .dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping,
3579 .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
3580 .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
3581 .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
3582 .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
3583 .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
3584 .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
3585 .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
3586 .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
3587 .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
3588 .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
3592 * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
3593 * @vf: the VF to switch ops
3595 void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
3597 vf->virtchnl_ops = &ice_virtchnl_repr_ops;
3601 * ice_vc_process_vf_msg - Process request from VF
3602 * @pf: pointer to the PF structure
3603 * @event: pointer to the AQ event
3605 * called from the common asq/arq handler to
3606 * process request from VF
3608 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3610 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3611 s16 vf_id = le16_to_cpu(event->desc.retval);
3612 const struct ice_virtchnl_ops *ops;
3613 u16 msglen = event->msg_len;
3614 u8 *msg = event->msg_buf;
3615 struct ice_vf *vf = NULL;
3619 dev = ice_pf_to_dev(pf);
3621 vf = ice_get_vf_by_id(pf, vf_id);
3623 dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
3624 vf_id, v_opcode, msglen);
3628 /* Check if VF is disabled. */
3629 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3634 ops = vf->virtchnl_ops;
3636 /* Perform basic checks on the msg */
3637 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3639 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3645 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
3646 ice_vc_send_msg_to_vf(vf, v_opcode,
3647 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
3655 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3657 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3658 vf_id, v_opcode, msglen, err);
3663 /* VF is being configured in another context that triggers a VFR, so no
3664 * need to process this message
3666 if (!mutex_trylock(&vf->cfg_lock)) {
3667 dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
3674 case VIRTCHNL_OP_VERSION:
3675 err = ops->get_ver_msg(vf, msg);
3677 case VIRTCHNL_OP_GET_VF_RESOURCES:
3678 err = ops->get_vf_res_msg(vf, msg);
3679 if (ice_vf_init_vlan_stripping(vf))
3680 dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
3682 ice_vc_notify_vf_link_state(vf);
3684 case VIRTCHNL_OP_RESET_VF:
3687 case VIRTCHNL_OP_ADD_ETH_ADDR:
3688 err = ops->add_mac_addr_msg(vf, msg);
3690 case VIRTCHNL_OP_DEL_ETH_ADDR:
3691 err = ops->del_mac_addr_msg(vf, msg);
3693 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3694 err = ops->cfg_qs_msg(vf, msg);
3696 case VIRTCHNL_OP_ENABLE_QUEUES:
3697 err = ops->ena_qs_msg(vf, msg);
3698 ice_vc_notify_vf_link_state(vf);
3700 case VIRTCHNL_OP_DISABLE_QUEUES:
3701 err = ops->dis_qs_msg(vf, msg);
3703 case VIRTCHNL_OP_REQUEST_QUEUES:
3704 err = ops->request_qs_msg(vf, msg);
3706 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3707 err = ops->cfg_irq_map_msg(vf, msg);
3709 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3710 err = ops->config_rss_key(vf, msg);
3712 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3713 err = ops->config_rss_lut(vf, msg);
3715 case VIRTCHNL_OP_GET_STATS:
3716 err = ops->get_stats_msg(vf, msg);
3718 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3719 err = ops->cfg_promiscuous_mode_msg(vf, msg);
3721 case VIRTCHNL_OP_ADD_VLAN:
3722 err = ops->add_vlan_msg(vf, msg);
3724 case VIRTCHNL_OP_DEL_VLAN:
3725 err = ops->remove_vlan_msg(vf, msg);
3727 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3728 err = ops->ena_vlan_stripping(vf);
3730 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3731 err = ops->dis_vlan_stripping(vf);
3733 case VIRTCHNL_OP_ADD_FDIR_FILTER:
3734 err = ops->add_fdir_fltr_msg(vf, msg);
3736 case VIRTCHNL_OP_DEL_FDIR_FILTER:
3737 err = ops->del_fdir_fltr_msg(vf, msg);
3739 case VIRTCHNL_OP_ADD_RSS_CFG:
3740 err = ops->handle_rss_cfg_msg(vf, msg, true);
3742 case VIRTCHNL_OP_DEL_RSS_CFG:
3743 err = ops->handle_rss_cfg_msg(vf, msg, false);
3745 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
3746 err = ops->get_offload_vlan_v2_caps(vf);
3748 case VIRTCHNL_OP_ADD_VLAN_V2:
3749 err = ops->add_vlan_v2_msg(vf, msg);
3751 case VIRTCHNL_OP_DEL_VLAN_V2:
3752 err = ops->remove_vlan_v2_msg(vf, msg);
3754 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
3755 err = ops->ena_vlan_stripping_v2_msg(vf, msg);
3757 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
3758 err = ops->dis_vlan_stripping_v2_msg(vf, msg);
3760 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
3761 err = ops->ena_vlan_insertion_v2_msg(vf, msg);
3763 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
3764 err = ops->dis_vlan_insertion_v2_msg(vf, msg);
3766 case VIRTCHNL_OP_UNKNOWN:
3768 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3770 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3771 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3776 /* Helper function cares less about error return values here
3777 * as it is busy with pending work.
3779 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3780 vf_id, v_opcode, err);
3783 mutex_unlock(&vf->cfg_lock);