1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
9 #define to_fltr_conf_from_desc(p) \
10 container_of(p, struct virtchnl_fdir_fltr_conf, input)
12 #define ICE_FLOW_PROF_TYPE_S 0
13 #define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
14 #define ICE_FLOW_PROF_VSI_S 32
15 #define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
17 /* Flow profile ID format:
18 * [0:31] - flow type, flow + tun_offs
21 #define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
22 ((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
23 (((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
25 #define GTPU_TEID_OFFSET 4
26 #define GTPU_EH_QFI_OFFSET 1
27 #define GTPU_EH_QFI_MASK 0x3F
28 #define PFCP_S_OFFSET 0
29 #define PFCP_S_MASK 0x1
30 #define PFCP_PORT_NR 8805
32 #define FDIR_INSET_FLAG_ESP_S 0
33 #define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
34 #define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
35 #define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
37 enum ice_fdir_tunnel_type {
38 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
39 ICE_FDIR_TUNNEL_TYPE_GTPU,
40 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
43 struct virtchnl_fdir_fltr_conf {
44 struct ice_fdir_fltr input;
45 enum ice_fdir_tunnel_type ttype;
49 static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
50 VIRTCHNL_PROTO_HDR_ETH,
51 VIRTCHNL_PROTO_HDR_NONE,
54 static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
55 VIRTCHNL_PROTO_HDR_ETH,
56 VIRTCHNL_PROTO_HDR_IPV4,
57 VIRTCHNL_PROTO_HDR_NONE,
60 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
61 VIRTCHNL_PROTO_HDR_ETH,
62 VIRTCHNL_PROTO_HDR_IPV4,
63 VIRTCHNL_PROTO_HDR_TCP,
64 VIRTCHNL_PROTO_HDR_NONE,
67 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
68 VIRTCHNL_PROTO_HDR_ETH,
69 VIRTCHNL_PROTO_HDR_IPV4,
70 VIRTCHNL_PROTO_HDR_UDP,
71 VIRTCHNL_PROTO_HDR_NONE,
74 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
75 VIRTCHNL_PROTO_HDR_ETH,
76 VIRTCHNL_PROTO_HDR_IPV4,
77 VIRTCHNL_PROTO_HDR_SCTP,
78 VIRTCHNL_PROTO_HDR_NONE,
81 static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
82 VIRTCHNL_PROTO_HDR_ETH,
83 VIRTCHNL_PROTO_HDR_IPV6,
84 VIRTCHNL_PROTO_HDR_NONE,
87 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
88 VIRTCHNL_PROTO_HDR_ETH,
89 VIRTCHNL_PROTO_HDR_IPV6,
90 VIRTCHNL_PROTO_HDR_TCP,
91 VIRTCHNL_PROTO_HDR_NONE,
94 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
95 VIRTCHNL_PROTO_HDR_ETH,
96 VIRTCHNL_PROTO_HDR_IPV6,
97 VIRTCHNL_PROTO_HDR_UDP,
98 VIRTCHNL_PROTO_HDR_NONE,
101 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
102 VIRTCHNL_PROTO_HDR_ETH,
103 VIRTCHNL_PROTO_HDR_IPV6,
104 VIRTCHNL_PROTO_HDR_SCTP,
105 VIRTCHNL_PROTO_HDR_NONE,
108 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
109 VIRTCHNL_PROTO_HDR_ETH,
110 VIRTCHNL_PROTO_HDR_IPV4,
111 VIRTCHNL_PROTO_HDR_UDP,
112 VIRTCHNL_PROTO_HDR_GTPU_IP,
113 VIRTCHNL_PROTO_HDR_NONE,
116 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
117 VIRTCHNL_PROTO_HDR_ETH,
118 VIRTCHNL_PROTO_HDR_IPV4,
119 VIRTCHNL_PROTO_HDR_UDP,
120 VIRTCHNL_PROTO_HDR_GTPU_IP,
121 VIRTCHNL_PROTO_HDR_GTPU_EH,
122 VIRTCHNL_PROTO_HDR_NONE,
125 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
126 VIRTCHNL_PROTO_HDR_ETH,
127 VIRTCHNL_PROTO_HDR_IPV4,
128 VIRTCHNL_PROTO_HDR_L2TPV3,
129 VIRTCHNL_PROTO_HDR_NONE,
132 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
133 VIRTCHNL_PROTO_HDR_ETH,
134 VIRTCHNL_PROTO_HDR_IPV6,
135 VIRTCHNL_PROTO_HDR_L2TPV3,
136 VIRTCHNL_PROTO_HDR_NONE,
139 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
140 VIRTCHNL_PROTO_HDR_ETH,
141 VIRTCHNL_PROTO_HDR_IPV4,
142 VIRTCHNL_PROTO_HDR_ESP,
143 VIRTCHNL_PROTO_HDR_NONE,
146 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
147 VIRTCHNL_PROTO_HDR_ETH,
148 VIRTCHNL_PROTO_HDR_IPV6,
149 VIRTCHNL_PROTO_HDR_ESP,
150 VIRTCHNL_PROTO_HDR_NONE,
153 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
154 VIRTCHNL_PROTO_HDR_ETH,
155 VIRTCHNL_PROTO_HDR_IPV4,
156 VIRTCHNL_PROTO_HDR_AH,
157 VIRTCHNL_PROTO_HDR_NONE,
160 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
161 VIRTCHNL_PROTO_HDR_ETH,
162 VIRTCHNL_PROTO_HDR_IPV6,
163 VIRTCHNL_PROTO_HDR_AH,
164 VIRTCHNL_PROTO_HDR_NONE,
167 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
168 VIRTCHNL_PROTO_HDR_ETH,
169 VIRTCHNL_PROTO_HDR_IPV4,
170 VIRTCHNL_PROTO_HDR_UDP,
171 VIRTCHNL_PROTO_HDR_ESP,
172 VIRTCHNL_PROTO_HDR_NONE,
175 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
176 VIRTCHNL_PROTO_HDR_ETH,
177 VIRTCHNL_PROTO_HDR_IPV6,
178 VIRTCHNL_PROTO_HDR_UDP,
179 VIRTCHNL_PROTO_HDR_ESP,
180 VIRTCHNL_PROTO_HDR_NONE,
183 static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
184 VIRTCHNL_PROTO_HDR_ETH,
185 VIRTCHNL_PROTO_HDR_IPV4,
186 VIRTCHNL_PROTO_HDR_UDP,
187 VIRTCHNL_PROTO_HDR_PFCP,
188 VIRTCHNL_PROTO_HDR_NONE,
191 static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
192 VIRTCHNL_PROTO_HDR_ETH,
193 VIRTCHNL_PROTO_HDR_IPV6,
194 VIRTCHNL_PROTO_HDR_UDP,
195 VIRTCHNL_PROTO_HDR_PFCP,
196 VIRTCHNL_PROTO_HDR_NONE,
199 struct virtchnl_fdir_pattern_match_item {
200 enum virtchnl_proto_hdr_type *list;
205 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
206 {vc_pattern_ipv4, 0, NULL},
207 {vc_pattern_ipv4_tcp, 0, NULL},
208 {vc_pattern_ipv4_udp, 0, NULL},
209 {vc_pattern_ipv4_sctp, 0, NULL},
210 {vc_pattern_ipv6, 0, NULL},
211 {vc_pattern_ipv6_tcp, 0, NULL},
212 {vc_pattern_ipv6_udp, 0, NULL},
213 {vc_pattern_ipv6_sctp, 0, NULL},
216 static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
217 {vc_pattern_ipv4, 0, NULL},
218 {vc_pattern_ipv4_tcp, 0, NULL},
219 {vc_pattern_ipv4_udp, 0, NULL},
220 {vc_pattern_ipv4_sctp, 0, NULL},
221 {vc_pattern_ipv6, 0, NULL},
222 {vc_pattern_ipv6_tcp, 0, NULL},
223 {vc_pattern_ipv6_udp, 0, NULL},
224 {vc_pattern_ipv6_sctp, 0, NULL},
225 {vc_pattern_ether, 0, NULL},
226 {vc_pattern_ipv4_gtpu, 0, NULL},
227 {vc_pattern_ipv4_gtpu_eh, 0, NULL},
228 {vc_pattern_ipv4_l2tpv3, 0, NULL},
229 {vc_pattern_ipv6_l2tpv3, 0, NULL},
230 {vc_pattern_ipv4_esp, 0, NULL},
231 {vc_pattern_ipv6_esp, 0, NULL},
232 {vc_pattern_ipv4_ah, 0, NULL},
233 {vc_pattern_ipv6_ah, 0, NULL},
234 {vc_pattern_ipv4_nat_t_esp, 0, NULL},
235 {vc_pattern_ipv6_nat_t_esp, 0, NULL},
236 {vc_pattern_ipv4_pfcp, 0, NULL},
237 {vc_pattern_ipv6_pfcp, 0, NULL},
240 struct virtchnl_fdir_inset_map {
241 enum virtchnl_proto_hdr_field field;
242 enum ice_flow_field fld;
247 static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
248 {VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
249 {VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
250 {VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
251 {VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
252 {VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
253 {VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
254 {VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
255 {VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
256 {VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
257 {VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
258 {VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
259 {VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
260 {VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
261 {VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
262 {VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
263 {VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
264 {VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
265 {VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
266 {VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
267 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
268 FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
269 {VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
270 FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
271 {VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
272 {VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
273 {VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
277 * ice_vc_fdir_param_check
278 * @vf: pointer to the VF structure
279 * @vsi_id: VF relative VSI ID
281 * Check for the valid VSI ID, PF's state and VF's state
283 * Return: 0 on success, and -EINVAL on error.
286 ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
288 struct ice_pf *pf = vf->pf;
290 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
293 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
296 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
299 if (vsi_id != vf->lan_vsi_num)
302 if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
305 if (!pf->vsi[vf->lan_vsi_idx])
312 * ice_vf_start_ctrl_vsi
313 * @vf: pointer to the VF structure
315 * Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
317 * Return: 0 on success, and other on error.
319 static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
321 struct ice_pf *pf = vf->pf;
322 struct ice_vsi *ctrl_vsi;
326 dev = ice_pf_to_dev(pf);
327 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
330 ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
332 dev_dbg(dev, "Could not setup control VSI for VF %d\n",
337 err = ice_vsi_open_ctrl(ctrl_vsi);
339 dev_dbg(dev, "Could not open control VSI for VF %d\n",
347 ice_vsi_release(ctrl_vsi);
348 if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
349 pf->vsi[vf->ctrl_vsi_idx] = NULL;
350 vf->ctrl_vsi_idx = ICE_NO_VSI;
356 * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
357 * @vf: pointer to the VF structure
358 * @flow: filter flow type
360 * Return: 0 on success, and other on error.
363 ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
365 struct ice_vf_fdir *fdir = &vf->fdir;
367 if (!fdir->fdir_prof) {
368 fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
370 sizeof(*fdir->fdir_prof),
372 if (!fdir->fdir_prof)
376 if (!fdir->fdir_prof[flow]) {
377 fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
378 sizeof(**fdir->fdir_prof),
380 if (!fdir->fdir_prof[flow])
388 * ice_vc_fdir_free_prof - free profile for this filter flow type
389 * @vf: pointer to the VF structure
390 * @flow: filter flow type
393 ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
395 struct ice_vf_fdir *fdir = &vf->fdir;
397 if (!fdir->fdir_prof)
400 if (!fdir->fdir_prof[flow])
403 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
404 fdir->fdir_prof[flow] = NULL;
408 * ice_vc_fdir_free_prof_all - free all the profile for this VF
409 * @vf: pointer to the VF structure
411 static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
413 struct ice_vf_fdir *fdir = &vf->fdir;
414 enum ice_fltr_ptype flow;
416 if (!fdir->fdir_prof)
419 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
420 ice_vc_fdir_free_prof(vf, flow);
422 devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
423 fdir->fdir_prof = NULL;
427 * ice_vc_fdir_parse_flow_fld
428 * @proto_hdr: virtual channel protocol filter header
429 * @conf: FDIR configuration for each filter
430 * @fld: field type array
431 * @fld_cnt: field counter
433 * Parse the virtual channel filter header and store them into field type array
435 * Return: 0 on success, and other on error.
438 ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
439 struct virtchnl_fdir_fltr_conf *conf,
440 enum ice_flow_field *fld, int *fld_cnt)
442 struct virtchnl_proto_hdr hdr;
445 memcpy(&hdr, proto_hdr, sizeof(hdr));
447 for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
448 VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
449 if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
450 if (fdir_inset_map[i].mask &&
451 ((fdir_inset_map[i].mask & conf->inset_flag) !=
452 fdir_inset_map[i].flag))
455 fld[*fld_cnt] = fdir_inset_map[i].fld;
457 if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
459 VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
460 fdir_inset_map[i].field);
467 * ice_vc_fdir_set_flow_fld
468 * @vf: pointer to the VF structure
469 * @fltr: virtual channel add cmd buffer
470 * @conf: FDIR configuration for each filter
471 * @seg: array of one or more packet segments that describe the flow
473 * Parse the virtual channel add msg buffer's field vector and store them into
474 * flow's packet segment field
476 * Return: 0 on success, and other on error.
479 ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
480 struct virtchnl_fdir_fltr_conf *conf,
481 struct ice_flow_seg_info *seg)
483 struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
484 enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
485 struct device *dev = ice_pf_to_dev(vf->pf);
486 struct virtchnl_proto_hdrs *proto;
490 proto = &rule->proto_hdrs;
491 for (i = 0; i < proto->count; i++) {
492 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
495 ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
501 dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
505 for (i = 0; i < fld_cnt; i++)
506 ice_flow_set_fld(seg, fld[i],
507 ICE_FLOW_FLD_OFF_INVAL,
508 ICE_FLOW_FLD_OFF_INVAL,
509 ICE_FLOW_FLD_OFF_INVAL, false);
515 * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
516 * @vf: pointer to the VF structure
517 * @conf: FDIR configuration for each filter
518 * @seg: array of one or more packet segments that describe the flow
520 * Return: 0 on success, and other on error.
523 ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
524 struct virtchnl_fdir_fltr_conf *conf,
525 struct ice_flow_seg_info *seg)
527 enum ice_fltr_ptype flow = conf->input.flow_type;
528 enum ice_fdir_tunnel_type ttype = conf->ttype;
529 struct device *dev = ice_pf_to_dev(vf->pf);
532 case ICE_FLTR_PTYPE_NON_IP_L2:
533 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
535 case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
536 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
537 ICE_FLOW_SEG_HDR_IPV4 |
538 ICE_FLOW_SEG_HDR_IPV_OTHER);
540 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
541 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
542 ICE_FLOW_SEG_HDR_IPV4 |
543 ICE_FLOW_SEG_HDR_IPV_OTHER);
545 case ICE_FLTR_PTYPE_NONF_IPV4_AH:
546 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
547 ICE_FLOW_SEG_HDR_IPV4 |
548 ICE_FLOW_SEG_HDR_IPV_OTHER);
550 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
551 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
552 ICE_FLOW_SEG_HDR_IPV4 |
553 ICE_FLOW_SEG_HDR_IPV_OTHER);
555 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
556 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
557 ICE_FLOW_SEG_HDR_IPV4 |
558 ICE_FLOW_SEG_HDR_IPV_OTHER);
560 case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
561 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
562 ICE_FLOW_SEG_HDR_IPV4 |
563 ICE_FLOW_SEG_HDR_IPV_OTHER);
565 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
566 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
567 ICE_FLOW_SEG_HDR_IPV_OTHER);
569 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
570 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
571 ICE_FLOW_SEG_HDR_IPV4 |
572 ICE_FLOW_SEG_HDR_IPV_OTHER);
574 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
575 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
576 ICE_FLOW_SEG_HDR_IPV4 |
577 ICE_FLOW_SEG_HDR_IPV_OTHER);
579 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
580 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
581 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
582 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
583 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
584 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
585 ICE_FLOW_SEG_HDR_IPV4 |
586 ICE_FLOW_SEG_HDR_IPV_OTHER);
587 } else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
588 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
589 ICE_FLOW_SEG_HDR_GTPU_IP |
590 ICE_FLOW_SEG_HDR_IPV4 |
591 ICE_FLOW_SEG_HDR_IPV_OTHER);
593 dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
598 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
599 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
600 ICE_FLOW_SEG_HDR_IPV4 |
601 ICE_FLOW_SEG_HDR_IPV_OTHER);
603 case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
604 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
605 ICE_FLOW_SEG_HDR_IPV6 |
606 ICE_FLOW_SEG_HDR_IPV_OTHER);
608 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
609 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
610 ICE_FLOW_SEG_HDR_IPV6 |
611 ICE_FLOW_SEG_HDR_IPV_OTHER);
613 case ICE_FLTR_PTYPE_NONF_IPV6_AH:
614 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
615 ICE_FLOW_SEG_HDR_IPV6 |
616 ICE_FLOW_SEG_HDR_IPV_OTHER);
618 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
619 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
620 ICE_FLOW_SEG_HDR_IPV6 |
621 ICE_FLOW_SEG_HDR_IPV_OTHER);
623 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
624 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
625 ICE_FLOW_SEG_HDR_IPV6 |
626 ICE_FLOW_SEG_HDR_IPV_OTHER);
628 case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
629 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
630 ICE_FLOW_SEG_HDR_IPV6 |
631 ICE_FLOW_SEG_HDR_IPV_OTHER);
633 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
634 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
635 ICE_FLOW_SEG_HDR_IPV_OTHER);
637 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
638 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
639 ICE_FLOW_SEG_HDR_IPV6 |
640 ICE_FLOW_SEG_HDR_IPV_OTHER);
642 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
643 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
644 ICE_FLOW_SEG_HDR_IPV6 |
645 ICE_FLOW_SEG_HDR_IPV_OTHER);
647 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
648 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
649 ICE_FLOW_SEG_HDR_IPV6 |
650 ICE_FLOW_SEG_HDR_IPV_OTHER);
653 dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
662 * ice_vc_fdir_rem_prof - remove profile for this filter flow type
663 * @vf: pointer to the VF structure
664 * @flow: filter flow type
665 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
668 ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
670 struct ice_vf_fdir *fdir = &vf->fdir;
671 struct ice_fd_hw_prof *vf_prof;
672 struct ice_pf *pf = vf->pf;
673 struct ice_vsi *vf_vsi;
679 dev = ice_pf_to_dev(pf);
681 if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
684 vf_prof = fdir->fdir_prof[flow];
686 vf_vsi = pf->vsi[vf->lan_vsi_idx];
688 dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
692 if (!fdir->prof_entry_cnt[flow][tun])
695 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
696 flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
698 for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
699 if (vf_prof->entry_h[i][tun]) {
700 u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
702 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
703 ice_flow_rem_entry(hw, ICE_BLK_FD,
704 vf_prof->entry_h[i][tun]);
705 vf_prof->entry_h[i][tun] = 0;
708 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
709 devm_kfree(dev, vf_prof->fdir_seg[tun]);
710 vf_prof->fdir_seg[tun] = NULL;
712 for (i = 0; i < vf_prof->cnt; i++)
713 vf_prof->vsi_h[i] = 0;
715 fdir->prof_entry_cnt[flow][tun] = 0;
719 * ice_vc_fdir_rem_prof_all - remove profile for this VF
720 * @vf: pointer to the VF structure
722 static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
724 enum ice_fltr_ptype flow;
726 for (flow = ICE_FLTR_PTYPE_NONF_NONE;
727 flow < ICE_FLTR_PTYPE_MAX; flow++) {
728 ice_vc_fdir_rem_prof(vf, flow, 0);
729 ice_vc_fdir_rem_prof(vf, flow, 1);
734 * ice_vc_fdir_write_flow_prof
735 * @vf: pointer to the VF structure
736 * @flow: filter flow type
737 * @seg: array of one or more packet segments that describe the flow
738 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
740 * Write the flow's profile config and packet segment into the hardware
742 * Return: 0 on success, and other on error.
745 ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
746 struct ice_flow_seg_info *seg, int tun)
748 struct ice_vf_fdir *fdir = &vf->fdir;
749 struct ice_vsi *vf_vsi, *ctrl_vsi;
750 struct ice_flow_seg_info *old_seg;
751 struct ice_flow_prof *prof = NULL;
752 struct ice_fd_hw_prof *vf_prof;
753 enum ice_status status;
763 dev = ice_pf_to_dev(pf);
765 vf_vsi = pf->vsi[vf->lan_vsi_idx];
769 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
773 vf_prof = fdir->fdir_prof[flow];
774 old_seg = vf_prof->fdir_seg[tun];
776 if (!memcmp(old_seg, seg, sizeof(*seg))) {
777 dev_dbg(dev, "Duplicated profile for VF %d!\n",
782 if (fdir->fdir_fltr_cnt[flow][tun]) {
784 dev_dbg(dev, "Input set conflicts for VF %d\n",
789 /* remove previously allocated profile */
790 ice_vc_fdir_rem_prof(vf, flow, tun);
793 prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
794 tun ? ICE_FLTR_PTYPE_MAX : 0);
796 status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
798 ret = ice_status_to_errno(status);
800 dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
805 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
806 vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
808 ret = ice_status_to_errno(status);
810 dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
815 status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
816 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
818 ret = ice_status_to_errno(status);
821 "Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
826 vf_prof->fdir_seg[tun] = seg;
828 fdir->prof_entry_cnt[flow][tun] = 0;
830 vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
831 vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
833 fdir->prof_entry_cnt[flow][tun]++;
835 vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
836 vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
838 fdir->prof_entry_cnt[flow][tun]++;
843 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
844 ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
845 ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
847 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
853 * ice_vc_fdir_config_input_set
854 * @vf: pointer to the VF structure
855 * @fltr: virtual channel add cmd buffer
856 * @conf: FDIR configuration for each filter
857 * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
859 * Config the input set type and value for virtual channel add msg buffer
861 * Return: 0 on success, and other on error.
864 ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
865 struct virtchnl_fdir_fltr_conf *conf, int tun)
867 struct ice_fdir_fltr *input = &conf->input;
868 struct device *dev = ice_pf_to_dev(vf->pf);
869 struct ice_flow_seg_info *seg;
870 enum ice_fltr_ptype flow;
873 flow = input->flow_type;
874 ret = ice_vc_fdir_alloc_prof(vf, flow);
876 dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
880 seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
884 ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
886 dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
890 ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
892 dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
896 ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
897 if (ret == -EEXIST) {
898 devm_kfree(dev, seg);
900 dev_dbg(dev, "Write flow profile for VF %d failed\n",
908 devm_kfree(dev, seg);
913 * ice_vc_fdir_match_pattern
914 * @fltr: virtual channel add cmd buffer
915 * @type: virtual channel protocol filter header type
917 * Matching the header type by comparing fltr and type's value.
919 * Return: true on success, and false on error.
922 ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
923 enum virtchnl_proto_hdr_type *type)
925 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
928 while ((i < proto->count) &&
929 (*type == proto->proto_hdr[i].type) &&
930 (*type != VIRTCHNL_PROTO_HDR_NONE)) {
935 return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
939 * ice_vc_fdir_get_pattern - get while list pattern
940 * @vf: pointer to the VF info
941 * @len: filter list length
943 * Return: pointer to allowed filter list
945 static const struct virtchnl_fdir_pattern_match_item *
946 ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
948 const struct virtchnl_fdir_pattern_match_item *item;
949 struct ice_pf *pf = vf->pf;
953 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
954 sizeof(hw->active_pkg_name))) {
955 item = vc_fdir_pattern_comms;
956 *len = ARRAY_SIZE(vc_fdir_pattern_comms);
958 item = vc_fdir_pattern_os;
959 *len = ARRAY_SIZE(vc_fdir_pattern_os);
966 * ice_vc_fdir_search_pattern
967 * @vf: pointer to the VF info
968 * @fltr: virtual channel add cmd buffer
970 * Search for matched pattern from supported pattern list
972 * Return: 0 on success, and other on error.
975 ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
977 const struct virtchnl_fdir_pattern_match_item *pattern;
980 pattern = ice_vc_fdir_get_pattern(vf, &len);
982 for (i = 0; i < len; i++)
983 if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
990 * ice_vc_fdir_parse_pattern
991 * @vf: pointer to the VF info
992 * @fltr: virtual channel add cmd buffer
993 * @conf: FDIR configuration for each filter
995 * Parse the virtual channel filter's pattern and store them into conf
997 * Return: 0 on success, and other on error.
1000 ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1001 struct virtchnl_fdir_fltr_conf *conf)
1003 struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
1004 enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
1005 enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
1006 struct device *dev = ice_pf_to_dev(vf->pf);
1007 struct ice_fdir_fltr *input = &conf->input;
1010 if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
1011 dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
1012 proto->count, vf->vf_id);
1016 for (i = 0; i < proto->count; i++) {
1017 struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
1018 struct ip_esp_hdr *esph;
1019 struct ip_auth_hdr *ah;
1020 struct sctphdr *sctph;
1021 struct ipv6hdr *ip6h;
1022 struct udphdr *udph;
1023 struct tcphdr *tcph;
1029 switch (hdr->type) {
1030 case VIRTCHNL_PROTO_HDR_ETH:
1031 eth = (struct ethhdr *)hdr->buffer;
1032 input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1034 if (hdr->field_selector)
1035 input->ext_data.ether_type = eth->h_proto;
1037 case VIRTCHNL_PROTO_HDR_IPV4:
1038 iph = (struct iphdr *)hdr->buffer;
1039 l3 = VIRTCHNL_PROTO_HDR_IPV4;
1040 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1042 if (hdr->field_selector) {
1043 input->ip.v4.src_ip = iph->saddr;
1044 input->ip.v4.dst_ip = iph->daddr;
1045 input->ip.v4.tos = iph->tos;
1046 input->ip.v4.proto = iph->protocol;
1049 case VIRTCHNL_PROTO_HDR_IPV6:
1050 ip6h = (struct ipv6hdr *)hdr->buffer;
1051 l3 = VIRTCHNL_PROTO_HDR_IPV6;
1052 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1054 if (hdr->field_selector) {
1055 memcpy(input->ip.v6.src_ip,
1056 ip6h->saddr.in6_u.u6_addr8,
1057 sizeof(ip6h->saddr));
1058 memcpy(input->ip.v6.dst_ip,
1059 ip6h->daddr.in6_u.u6_addr8,
1060 sizeof(ip6h->daddr));
1061 input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
1062 (ip6h->flow_lbl[0] >> 4);
1063 input->ip.v6.proto = ip6h->nexthdr;
1066 case VIRTCHNL_PROTO_HDR_TCP:
1067 tcph = (struct tcphdr *)hdr->buffer;
1068 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1069 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1070 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1071 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1073 if (hdr->field_selector) {
1074 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1075 input->ip.v4.src_port = tcph->source;
1076 input->ip.v4.dst_port = tcph->dest;
1077 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1078 input->ip.v6.src_port = tcph->source;
1079 input->ip.v6.dst_port = tcph->dest;
1083 case VIRTCHNL_PROTO_HDR_UDP:
1084 udph = (struct udphdr *)hdr->buffer;
1085 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1086 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1087 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1088 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1090 if (hdr->field_selector) {
1091 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1092 input->ip.v4.src_port = udph->source;
1093 input->ip.v4.dst_port = udph->dest;
1094 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1095 input->ip.v6.src_port = udph->source;
1096 input->ip.v6.dst_port = udph->dest;
1100 case VIRTCHNL_PROTO_HDR_SCTP:
1101 sctph = (struct sctphdr *)hdr->buffer;
1102 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1104 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1105 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1107 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1109 if (hdr->field_selector) {
1110 if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
1111 input->ip.v4.src_port = sctph->source;
1112 input->ip.v4.dst_port = sctph->dest;
1113 } else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
1114 input->ip.v6.src_port = sctph->source;
1115 input->ip.v6.dst_port = sctph->dest;
1119 case VIRTCHNL_PROTO_HDR_L2TPV3:
1120 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1121 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
1122 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1123 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
1125 if (hdr->field_selector)
1126 input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
1128 case VIRTCHNL_PROTO_HDR_ESP:
1129 esph = (struct ip_esp_hdr *)hdr->buffer;
1130 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1131 l4 == VIRTCHNL_PROTO_HDR_UDP)
1132 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
1133 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1134 l4 == VIRTCHNL_PROTO_HDR_UDP)
1135 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
1136 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
1137 l4 == VIRTCHNL_PROTO_HDR_NONE)
1138 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
1139 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
1140 l4 == VIRTCHNL_PROTO_HDR_NONE)
1141 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
1143 if (l4 == VIRTCHNL_PROTO_HDR_UDP)
1144 conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
1146 conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
1148 if (hdr->field_selector) {
1149 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1150 input->ip.v4.sec_parm_idx = esph->spi;
1151 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1152 input->ip.v6.sec_parm_idx = esph->spi;
1155 case VIRTCHNL_PROTO_HDR_AH:
1156 ah = (struct ip_auth_hdr *)hdr->buffer;
1157 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1158 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
1159 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1160 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
1162 if (hdr->field_selector) {
1163 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1164 input->ip.v4.sec_parm_idx = ah->spi;
1165 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1166 input->ip.v6.sec_parm_idx = ah->spi;
1169 case VIRTCHNL_PROTO_HDR_PFCP:
1170 rawh = (u8 *)hdr->buffer;
1171 s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
1172 if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
1173 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
1174 else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
1175 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
1176 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
1177 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
1178 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
1179 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
1181 if (hdr->field_selector) {
1182 if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
1183 input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
1184 else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
1185 input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
1188 case VIRTCHNL_PROTO_HDR_GTPU_IP:
1189 rawh = (u8 *)hdr->buffer;
1190 input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1192 if (hdr->field_selector)
1193 input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
1194 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
1196 case VIRTCHNL_PROTO_HDR_GTPU_EH:
1197 rawh = (u8 *)hdr->buffer;
1199 if (hdr->field_selector)
1200 input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
1201 conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1204 dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
1205 hdr->type, vf->vf_id);
1214 * ice_vc_fdir_parse_action
1215 * @vf: pointer to the VF info
1216 * @fltr: virtual channel add cmd buffer
1217 * @conf: FDIR configuration for each filter
1219 * Parse the virtual channel filter's action and store them into conf
1221 * Return: 0 on success, and other on error.
1224 ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1225 struct virtchnl_fdir_fltr_conf *conf)
1227 struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
1228 struct device *dev = ice_pf_to_dev(vf->pf);
1229 struct ice_fdir_fltr *input = &conf->input;
1234 if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
1235 dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
1236 as->count, vf->vf_id);
1240 for (i = 0; i < as->count; i++) {
1241 struct virtchnl_filter_action *action = &as->actions[i];
1243 switch (action->type) {
1244 case VIRTCHNL_ACTION_PASSTHRU:
1246 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1248 case VIRTCHNL_ACTION_DROP:
1250 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1252 case VIRTCHNL_ACTION_QUEUE:
1254 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1255 input->q_index = action->act_conf.queue.index;
1257 case VIRTCHNL_ACTION_Q_REGION:
1259 input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1260 input->q_index = action->act_conf.queue.index;
1261 input->q_region = action->act_conf.queue.region;
1263 case VIRTCHNL_ACTION_MARK:
1265 input->fltr_id = action->act_conf.mark_id;
1266 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1269 dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
1270 action->type, vf->vf_id);
1275 if (dest_num == 0 || dest_num >= 2) {
1276 dev_dbg(dev, "Invalid destination action for VF %d\n",
1281 if (mark_num >= 2) {
1282 dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
1290 * ice_vc_validate_fdir_fltr - validate the virtual channel filter
1291 * @vf: pointer to the VF info
1292 * @fltr: virtual channel add cmd buffer
1293 * @conf: FDIR configuration for each filter
1295 * Return: 0 on success, and other on error.
1298 ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
1299 struct virtchnl_fdir_fltr_conf *conf)
1303 ret = ice_vc_fdir_search_pattern(vf, fltr);
1307 ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
1311 return ice_vc_fdir_parse_action(vf, fltr, conf);
1315 * ice_vc_fdir_comp_rules - compare if two filter rules have the same value
1316 * @conf_a: FDIR configuration for filter a
1317 * @conf_b: FDIR configuration for filter b
1319 * Return: 0 on success, and other on error.
1322 ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
1323 struct virtchnl_fdir_fltr_conf *conf_b)
1325 struct ice_fdir_fltr *a = &conf_a->input;
1326 struct ice_fdir_fltr *b = &conf_b->input;
1328 if (conf_a->ttype != conf_b->ttype)
1330 if (a->flow_type != b->flow_type)
1332 if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
1334 if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
1336 if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
1338 if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
1340 if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
1342 if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
1344 if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
1346 if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
1353 * ice_vc_fdir_is_dup_fltr
1354 * @vf: pointer to the VF info
1355 * @conf: FDIR configuration for each filter
1357 * Check if there is duplicated rule with same conf value
1359 * Return: 0 true success, and false on error.
1362 ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
1364 struct ice_fdir_fltr *desc;
1367 list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
1368 struct virtchnl_fdir_fltr_conf *node =
1369 to_fltr_conf_from_desc(desc);
1371 ret = ice_vc_fdir_comp_rules(node, conf);
1380 * ice_vc_fdir_insert_entry
1381 * @vf: pointer to the VF info
1382 * @conf: FDIR configuration for each filter
1383 * @id: pointer to ID value allocated by driver
1385 * Insert FDIR conf entry into list and allocate ID for this filter
1387 * Return: 0 true success, and other on error.
1390 ice_vc_fdir_insert_entry(struct ice_vf *vf,
1391 struct virtchnl_fdir_fltr_conf *conf, u32 *id)
1393 struct ice_fdir_fltr *input = &conf->input;
1396 /* alloc ID corresponding with conf */
1397 i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
1398 ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
1403 list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
1408 * ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
1409 * @vf: pointer to the VF info
1410 * @conf: FDIR configuration for each filter
1411 * @id: filter rule's ID
1414 ice_vc_fdir_remove_entry(struct ice_vf *vf,
1415 struct virtchnl_fdir_fltr_conf *conf, u32 id)
1417 struct ice_fdir_fltr *input = &conf->input;
1419 idr_remove(&vf->fdir.fdir_rule_idr, id);
1420 list_del(&input->fltr_node);
1424 * ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
1425 * @vf: pointer to the VF info
1426 * @id: filter rule's ID
1428 * Return: NULL on error, and other on success.
1430 static struct virtchnl_fdir_fltr_conf *
1431 ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
1433 return idr_find(&vf->fdir.fdir_rule_idr, id);
1437 * ice_vc_fdir_flush_entry - remove all FDIR conf entry
1438 * @vf: pointer to the VF info
1440 static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
1442 struct virtchnl_fdir_fltr_conf *conf;
1443 struct ice_fdir_fltr *desc, *temp;
1445 list_for_each_entry_safe(desc, temp,
1446 &vf->fdir.fdir_rule_list, fltr_node) {
1447 conf = to_fltr_conf_from_desc(desc);
1448 list_del(&desc->fltr_node);
1449 devm_kfree(ice_pf_to_dev(vf->pf), conf);
1454 * ice_vc_fdir_write_fltr - write filter rule into hardware
1455 * @vf: pointer to the VF info
1456 * @conf: FDIR configuration for each filter
1457 * @add: true implies add rule, false implies del rules
1458 * @is_tun: false implies non-tunnel type filter, true implies tunnel filter
1460 * Return: 0 on success, and other on error.
1462 static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
1463 struct virtchnl_fdir_fltr_conf *conf,
1464 bool add, bool is_tun)
1466 struct ice_fdir_fltr *input = &conf->input;
1467 struct ice_vsi *vsi, *ctrl_vsi;
1468 struct ice_fltr_desc desc;
1469 enum ice_status status;
1477 dev = ice_pf_to_dev(pf);
1479 vsi = pf->vsi[vf->lan_vsi_idx];
1481 dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
1485 input->dest_vsi = vsi->idx;
1486 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1488 ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
1490 dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
1494 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1498 ice_fdir_get_prgm_desc(hw, input, &desc, add);
1499 status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
1500 ret = ice_status_to_errno(status);
1502 dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
1503 vf->vf_id, input->flow_type);
1507 ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
1514 devm_kfree(dev, pkt);
1519 * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
1520 * @vf: pointer to the VF info
1521 * @msg: pointer to the msg buffer
1523 * Return: 0 on success, and other on error.
1525 int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
1527 struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
1528 struct virtchnl_fdir_add *stat = NULL;
1529 struct virtchnl_fdir_fltr_conf *conf;
1530 enum virtchnl_status_code v_ret;
1538 dev = ice_pf_to_dev(pf);
1539 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1541 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1542 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1546 ret = ice_vf_start_ctrl_vsi(vf);
1547 if (ret && (ret != -EEXIST)) {
1548 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1549 dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
1554 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1556 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1557 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1561 conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
1563 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1564 dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
1568 len = sizeof(*stat);
1569 ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
1571 v_ret = VIRTCHNL_STATUS_SUCCESS;
1572 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
1573 dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
1577 if (fltr->validate_only) {
1578 v_ret = VIRTCHNL_STATUS_SUCCESS;
1579 stat->status = VIRTCHNL_FDIR_SUCCESS;
1580 devm_kfree(dev, conf);
1581 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
1582 v_ret, (u8 *)stat, len);
1586 ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
1588 v_ret = VIRTCHNL_STATUS_SUCCESS;
1589 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
1590 dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
1595 ret = ice_vc_fdir_is_dup_fltr(vf, conf);
1597 v_ret = VIRTCHNL_STATUS_SUCCESS;
1598 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
1599 dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
1604 ret = ice_vc_fdir_insert_entry(vf, conf, &stat->flow_id);
1606 v_ret = VIRTCHNL_STATUS_SUCCESS;
1607 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1608 dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
1612 ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
1614 v_ret = VIRTCHNL_STATUS_SUCCESS;
1615 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1616 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1621 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
1623 v_ret = VIRTCHNL_STATUS_SUCCESS;
1624 stat->status = VIRTCHNL_FDIR_SUCCESS;
1626 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1632 ice_vc_fdir_remove_entry(vf, conf, stat->flow_id);
1634 devm_kfree(dev, conf);
1636 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
1643 * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
1644 * @vf: pointer to the VF info
1645 * @msg: pointer to the msg buffer
1647 * Return: 0 on success, and other on error.
1649 int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
1651 struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
1652 struct virtchnl_fdir_del *stat = NULL;
1653 struct virtchnl_fdir_fltr_conf *conf;
1654 enum virtchnl_status_code v_ret;
1662 dev = ice_pf_to_dev(pf);
1663 ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
1665 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1666 dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
1670 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
1672 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1673 dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
1677 len = sizeof(*stat);
1679 conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
1681 v_ret = VIRTCHNL_STATUS_SUCCESS;
1682 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
1683 dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
1684 vf->vf_id, fltr->flow_id);
1688 /* Just return failure when ctrl_vsi idx is invalid */
1689 if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
1690 v_ret = VIRTCHNL_STATUS_SUCCESS;
1691 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1692 dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
1696 ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
1698 v_ret = VIRTCHNL_STATUS_SUCCESS;
1699 stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
1700 dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
1705 ice_vc_fdir_remove_entry(vf, conf, fltr->flow_id);
1706 devm_kfree(dev, conf);
1707 vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
1709 v_ret = VIRTCHNL_STATUS_SUCCESS;
1710 stat->status = VIRTCHNL_FDIR_SUCCESS;
1713 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
1720 * ice_vf_fdir_init - init FDIR resource for VF
1721 * @vf: pointer to the VF info
1723 void ice_vf_fdir_init(struct ice_vf *vf)
1725 struct ice_vf_fdir *fdir = &vf->fdir;
1727 idr_init(&fdir->fdir_rule_idr);
1728 INIT_LIST_HEAD(&fdir->fdir_rule_list);
1732 * ice_vf_fdir_exit - destroy FDIR resource for VF
1733 * @vf: pointer to the VF info
1735 void ice_vf_fdir_exit(struct ice_vf *vf)
1737 ice_vc_fdir_flush_entry(vf);
1738 idr_destroy(&vf->fdir.fdir_rule_idr);
1739 ice_vc_fdir_rem_prof_all(vf);
1740 ice_vc_fdir_free_prof_all(vf);