1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018-2020, Intel Corporation. */
7 * ice_is_arfs_active - helper to check is aRFS is active
10 static bool ice_is_arfs_active(struct ice_vsi *vsi)
12 return !!vsi->arfs_fltr_list;
16 * ice_is_arfs_using_perfect_flow - check if aRFS has active perfect filters
17 * @hw: pointer to the HW structure
18 * @flow_type: flow type as Flow Director understands it
20 * Flow Director will query this function to see if aRFS is currently using
21 * the specified flow_type for perfect (4-tuple) filters.
24 ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type)
26 struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
27 struct ice_pf *pf = hw->back;
30 vsi = ice_get_main_vsi(pf);
34 arfs_fltr_cntrs = vsi->arfs_fltr_cntrs;
36 /* active counters can be updated by multiple CPUs */
37 smp_mb__before_atomic();
39 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
40 return atomic_read(&arfs_fltr_cntrs->active_udpv4_cnt) > 0;
41 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
42 return atomic_read(&arfs_fltr_cntrs->active_udpv6_cnt) > 0;
43 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
44 return atomic_read(&arfs_fltr_cntrs->active_tcpv4_cnt) > 0;
45 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
46 return atomic_read(&arfs_fltr_cntrs->active_tcpv6_cnt) > 0;
53 * ice_arfs_update_active_fltr_cntrs - update active filter counters for aRFS
54 * @vsi: VSI that aRFS is active on
55 * @entry: aRFS entry used to change counters
56 * @add: true to increment counter, false to decrement
59 ice_arfs_update_active_fltr_cntrs(struct ice_vsi *vsi,
60 struct ice_arfs_entry *entry, bool add)
62 struct ice_arfs_active_fltr_cntrs *fltr_cntrs = vsi->arfs_fltr_cntrs;
64 switch (entry->fltr_info.flow_type) {
65 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
67 atomic_inc(&fltr_cntrs->active_tcpv4_cnt);
69 atomic_dec(&fltr_cntrs->active_tcpv4_cnt);
71 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
73 atomic_inc(&fltr_cntrs->active_tcpv6_cnt);
75 atomic_dec(&fltr_cntrs->active_tcpv6_cnt);
77 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
79 atomic_inc(&fltr_cntrs->active_udpv4_cnt);
81 atomic_dec(&fltr_cntrs->active_udpv4_cnt);
83 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
85 atomic_inc(&fltr_cntrs->active_udpv6_cnt);
87 atomic_dec(&fltr_cntrs->active_udpv6_cnt);
90 dev_err(ice_pf_to_dev(vsi->back), "aRFS: Failed to update filter counters, invalid filter type %d\n",
91 entry->fltr_info.flow_type);
96 * ice_arfs_del_flow_rules - delete the rules passed in from HW
97 * @vsi: VSI for the flow rules that need to be deleted
98 * @del_list_head: head of the list of ice_arfs_entry(s) for rule deletion
100 * Loop through the delete list passed in and remove the rules from HW. After
101 * each rule is deleted, disconnect and free the ice_arfs_entry because it is no
102 * longer being referenced by the aRFS hash table.
105 ice_arfs_del_flow_rules(struct ice_vsi *vsi, struct hlist_head *del_list_head)
107 struct ice_arfs_entry *e;
108 struct hlist_node *n;
111 dev = ice_pf_to_dev(vsi->back);
113 hlist_for_each_entry_safe(e, n, del_list_head, list_entry) {
116 result = ice_fdir_write_fltr(vsi->back, &e->fltr_info, false,
119 ice_arfs_update_active_fltr_cntrs(vsi, e, false);
121 dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
122 result, e->fltr_state, e->fltr_info.fltr_id,
123 e->flow_id, e->fltr_info.q_index);
125 /* The aRFS hash table is no longer referencing this entry */
126 hlist_del(&e->list_entry);
132 * ice_arfs_add_flow_rules - add the rules passed in from HW
133 * @vsi: VSI for the flow rules that need to be added
134 * @add_list_head: head of the list of ice_arfs_entry_ptr(s) for rule addition
136 * Loop through the add list passed in and remove the rules from HW. After each
137 * rule is added, disconnect and free the ice_arfs_entry_ptr node. Don't free
138 * the ice_arfs_entry(s) because they are still being referenced in the aRFS
142 ice_arfs_add_flow_rules(struct ice_vsi *vsi, struct hlist_head *add_list_head)
144 struct ice_arfs_entry_ptr *ep;
145 struct hlist_node *n;
148 dev = ice_pf_to_dev(vsi->back);
150 hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) {
153 result = ice_fdir_write_fltr(vsi->back,
154 &ep->arfs_entry->fltr_info, true,
157 ice_arfs_update_active_fltr_cntrs(vsi, ep->arfs_entry,
160 dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n",
161 result, ep->arfs_entry->fltr_state,
162 ep->arfs_entry->fltr_info.fltr_id,
163 ep->arfs_entry->flow_id,
164 ep->arfs_entry->fltr_info.q_index);
166 hlist_del(&ep->list_entry);
172 * ice_arfs_is_flow_expired - check if the aRFS entry has expired
173 * @vsi: VSI containing the aRFS entry
174 * @arfs_entry: aRFS entry that's being checked for expiration
176 * Return true if the flow has expired, else false. This function should be used
177 * to determine whether or not an aRFS entry should be removed from the hardware
178 * and software structures.
181 ice_arfs_is_flow_expired(struct ice_vsi *vsi, struct ice_arfs_entry *arfs_entry)
183 #define ICE_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000)
184 if (rps_may_expire_flow(vsi->netdev, arfs_entry->fltr_info.q_index,
186 arfs_entry->fltr_info.fltr_id))
189 /* expiration timer only used for UDP filters */
190 if (arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV4_UDP &&
191 arfs_entry->fltr_info.flow_type != ICE_FLTR_PTYPE_NONF_IPV6_UDP)
194 return time_in_range64(arfs_entry->time_activated +
195 ICE_ARFS_TIME_DELTA_EXPIRATION,
196 arfs_entry->time_activated, get_jiffies_64());
200 * ice_arfs_update_flow_rules - add/delete aRFS rules in HW
201 * @vsi: the VSI to be forwarded to
202 * @idx: index into the table of aRFS filter lists. Obtained from skb->hash
203 * @add_list: list to populate with filters to be added to Flow Director
204 * @del_list: list to populate with filters to be deleted from Flow Director
206 * Iterate over the hlist at the index given in the aRFS hash table and
207 * determine if there are any aRFS entries that need to be either added or
208 * deleted in the HW. If the aRFS entry is marked as ICE_ARFS_INACTIVE the
209 * filter needs to be added to HW, else if it's marked as ICE_ARFS_ACTIVE and
210 * the flow has expired delete the filter from HW. The caller of this function
211 * is expected to add/delete rules on the add_list/del_list respectively.
214 ice_arfs_update_flow_rules(struct ice_vsi *vsi, u16 idx,
215 struct hlist_head *add_list,
216 struct hlist_head *del_list)
218 struct ice_arfs_entry *e;
219 struct hlist_node *n;
222 dev = ice_pf_to_dev(vsi->back);
224 /* go through the aRFS hlist at this idx and check for needed updates */
225 hlist_for_each_entry_safe(e, n, &vsi->arfs_fltr_list[idx], list_entry)
226 /* check if filter needs to be added to HW */
227 if (e->fltr_state == ICE_ARFS_INACTIVE) {
228 enum ice_fltr_ptype flow_type = e->fltr_info.flow_type;
229 struct ice_arfs_entry_ptr *ep =
230 devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC);
234 INIT_HLIST_NODE(&ep->list_entry);
235 /* reference aRFS entry to add HW filter */
237 hlist_add_head(&ep->list_entry, add_list);
238 e->fltr_state = ICE_ARFS_ACTIVE;
239 /* expiration timer only used for UDP flows */
240 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
241 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
242 e->time_activated = get_jiffies_64();
243 } else if (e->fltr_state == ICE_ARFS_ACTIVE) {
244 /* check if filter needs to be removed from HW */
245 if (ice_arfs_is_flow_expired(vsi, e)) {
246 /* remove aRFS entry from hash table for delete
247 * and to prevent referencing it the next time
248 * through this hlist index
250 hlist_del(&e->list_entry);
251 e->fltr_state = ICE_ARFS_TODEL;
252 /* save reference to aRFS entry for delete */
253 hlist_add_head(&e->list_entry, del_list);
259 * ice_sync_arfs_fltrs - update all aRFS filters
260 * @pf: board private structure
262 void ice_sync_arfs_fltrs(struct ice_pf *pf)
264 HLIST_HEAD(tmp_del_list);
265 HLIST_HEAD(tmp_add_list);
266 struct ice_vsi *pf_vsi;
269 pf_vsi = ice_get_main_vsi(pf);
273 if (!ice_is_arfs_active(pf_vsi))
276 spin_lock_bh(&pf_vsi->arfs_lock);
277 /* Once we process aRFS for the PF VSI get out */
278 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
279 ice_arfs_update_flow_rules(pf_vsi, i, &tmp_add_list,
281 spin_unlock_bh(&pf_vsi->arfs_lock);
283 /* use list of ice_arfs_entry(s) for delete */
284 ice_arfs_del_flow_rules(pf_vsi, &tmp_del_list);
286 /* use list of ice_arfs_entry_ptr(s) for add */
287 ice_arfs_add_flow_rules(pf_vsi, &tmp_add_list);
291 * ice_arfs_build_entry - builds an aRFS entry based on input
292 * @vsi: destination VSI for this flow
293 * @fk: flow dissector keys for creating the tuple
294 * @rxq_idx: Rx queue to steer this flow to
295 * @flow_id: passed down from the stack and saved for flow expiration
297 * returns an aRFS entry on success and NULL on failure
299 static struct ice_arfs_entry *
300 ice_arfs_build_entry(struct ice_vsi *vsi, const struct flow_keys *fk,
301 u16 rxq_idx, u32 flow_id)
303 struct ice_arfs_entry *arfs_entry;
304 struct ice_fdir_fltr *fltr_info;
307 arfs_entry = devm_kzalloc(ice_pf_to_dev(vsi->back),
309 GFP_ATOMIC | __GFP_NOWARN);
313 fltr_info = &arfs_entry->fltr_info;
314 fltr_info->q_index = rxq_idx;
315 fltr_info->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
316 fltr_info->dest_vsi = vsi->idx;
317 ip_proto = fk->basic.ip_proto;
319 if (fk->basic.n_proto == htons(ETH_P_IP)) {
320 fltr_info->ip.v4.proto = ip_proto;
321 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
322 ICE_FLTR_PTYPE_NONF_IPV4_TCP :
323 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
324 fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src;
325 fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst;
326 fltr_info->ip.v4.src_port = fk->ports.src;
327 fltr_info->ip.v4.dst_port = fk->ports.dst;
328 } else { /* ETH_P_IPV6 */
329 fltr_info->ip.v6.proto = ip_proto;
330 fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ?
331 ICE_FLTR_PTYPE_NONF_IPV6_TCP :
332 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
333 memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
334 sizeof(struct in6_addr));
335 memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
336 sizeof(struct in6_addr));
337 fltr_info->ip.v6.src_port = fk->ports.src;
338 fltr_info->ip.v6.dst_port = fk->ports.dst;
341 arfs_entry->flow_id = flow_id;
343 atomic_inc_return(vsi->arfs_last_fltr_id) % RPS_NO_FILTER;
349 * ice_arfs_is_perfect_flow_set - Check to see if perfect flow is set
350 * @hw: pointer to HW structure
351 * @l3_proto: ETH_P_IP or ETH_P_IPV6 in network order
352 * @l4_proto: IPPROTO_UDP or IPPROTO_TCP
354 * We only support perfect (4-tuple) filters for aRFS. This function allows aRFS
355 * to check if perfect (4-tuple) flow rules are currently in place by Flow
359 ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
361 unsigned long *perfect_fltr = hw->fdir_perfect_fltr;
363 /* advanced Flow Director disabled, perfect filters always supported */
367 if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_UDP)
368 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_UDP, perfect_fltr);
369 else if (l3_proto == htons(ETH_P_IP) && l4_proto == IPPROTO_TCP)
370 return test_bit(ICE_FLTR_PTYPE_NONF_IPV4_TCP, perfect_fltr);
371 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_UDP)
372 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_UDP, perfect_fltr);
373 else if (l3_proto == htons(ETH_P_IPV6) && l4_proto == IPPROTO_TCP)
374 return test_bit(ICE_FLTR_PTYPE_NONF_IPV6_TCP, perfect_fltr);
380 * ice_rx_flow_steer - steer the Rx flow to where application is being run
381 * @netdev: ptr to the netdev being adjusted
382 * @skb: buffer with required header information
383 * @rxq_idx: queue to which the flow needs to move
384 * @flow_id: flow identifier provided by the netdev
386 * Based on the skb, rxq_idx, and flow_id passed in add/update an entry in the
387 * aRFS hash table. Iterate over one of the hlists in the aRFS hash table and
388 * if the flow_id already exists in the hash table but the rxq_idx has changed
389 * mark the entry as ICE_ARFS_INACTIVE so it can get updated in HW, else
390 * if the entry is marked as ICE_ARFS_TODEL delete it from the aRFS hash table.
391 * If neither of the previous conditions are true then add a new entry in the
392 * aRFS hash table, which gets set to ICE_ARFS_INACTIVE by default so it can be
396 ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
397 u16 rxq_idx, u32 flow_id)
399 struct ice_netdev_priv *np = netdev_priv(netdev);
400 struct ice_arfs_entry *arfs_entry;
401 struct ice_vsi *vsi = np->vsi;
409 /* failed to allocate memory for aRFS so don't crash */
410 if (unlikely(!vsi->arfs_fltr_list))
415 if (skb->encapsulation)
416 return -EPROTONOSUPPORT;
418 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
419 return -EPROTONOSUPPORT;
421 n_proto = fk.basic.n_proto;
422 /* Support only IPV4 and IPV6 */
423 if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) ||
424 n_proto == htons(ETH_P_IPV6))
425 ip_proto = fk.basic.ip_proto;
427 return -EPROTONOSUPPORT;
429 /* Support only TCP and UDP */
430 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
431 return -EPROTONOSUPPORT;
433 /* only support 4-tuple filters for aRFS */
434 if (!ice_arfs_is_perfect_flow_set(&pf->hw, n_proto, ip_proto))
437 /* choose the aRFS list bucket based on skb hash */
438 idx = skb_get_hash_raw(skb) & ICE_ARFS_LST_MASK;
439 /* search for entry in the bucket */
440 spin_lock_bh(&vsi->arfs_lock);
441 hlist_for_each_entry(arfs_entry, &vsi->arfs_fltr_list[idx],
443 struct ice_fdir_fltr *fltr_info;
445 /* keep searching for the already existing arfs_entry flow */
446 if (arfs_entry->flow_id != flow_id)
449 fltr_info = &arfs_entry->fltr_info;
450 ret = fltr_info->fltr_id;
452 if (fltr_info->q_index == rxq_idx ||
453 arfs_entry->fltr_state != ICE_ARFS_ACTIVE)
456 /* update the queue to forward to on an already existing flow */
457 fltr_info->q_index = rxq_idx;
458 arfs_entry->fltr_state = ICE_ARFS_INACTIVE;
459 ice_arfs_update_active_fltr_cntrs(vsi, arfs_entry, false);
460 goto out_schedule_service_task;
463 arfs_entry = ice_arfs_build_entry(vsi, &fk, rxq_idx, flow_id);
469 ret = arfs_entry->fltr_info.fltr_id;
470 INIT_HLIST_NODE(&arfs_entry->list_entry);
471 hlist_add_head(&arfs_entry->list_entry, &vsi->arfs_fltr_list[idx]);
472 out_schedule_service_task:
473 ice_service_task_schedule(pf);
475 spin_unlock_bh(&vsi->arfs_lock);
480 * ice_init_arfs_cntrs - initialize aRFS counter values
481 * @vsi: VSI that aRFS counters need to be initialized on
483 static int ice_init_arfs_cntrs(struct ice_vsi *vsi)
485 if (!vsi || vsi->type != ICE_VSI_PF)
488 vsi->arfs_fltr_cntrs = kzalloc(sizeof(*vsi->arfs_fltr_cntrs),
490 if (!vsi->arfs_fltr_cntrs)
493 vsi->arfs_last_fltr_id = kzalloc(sizeof(*vsi->arfs_last_fltr_id),
495 if (!vsi->arfs_last_fltr_id) {
496 kfree(vsi->arfs_fltr_cntrs);
497 vsi->arfs_fltr_cntrs = NULL;
505 * ice_init_arfs - initialize aRFS resources
506 * @vsi: the VSI to be forwarded to
508 void ice_init_arfs(struct ice_vsi *vsi)
510 struct hlist_head *arfs_fltr_list;
513 if (!vsi || vsi->type != ICE_VSI_PF)
516 arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
521 if (ice_init_arfs_cntrs(vsi))
522 goto free_arfs_fltr_list;
524 for (i = 0; i < ICE_MAX_ARFS_LIST; i++)
525 INIT_HLIST_HEAD(&arfs_fltr_list[i]);
527 spin_lock_init(&vsi->arfs_lock);
529 vsi->arfs_fltr_list = arfs_fltr_list;
534 kfree(arfs_fltr_list);
538 * ice_clear_arfs - clear the aRFS hash table and any memory used for aRFS
539 * @vsi: the VSI to be forwarded to
541 void ice_clear_arfs(struct ice_vsi *vsi)
546 if (!vsi || vsi->type != ICE_VSI_PF || !vsi->back ||
547 !vsi->arfs_fltr_list)
550 dev = ice_pf_to_dev(vsi->back);
551 for (i = 0; i < ICE_MAX_ARFS_LIST; i++) {
552 struct ice_arfs_entry *r;
553 struct hlist_node *n;
555 spin_lock_bh(&vsi->arfs_lock);
556 hlist_for_each_entry_safe(r, n, &vsi->arfs_fltr_list[i],
558 hlist_del(&r->list_entry);
561 spin_unlock_bh(&vsi->arfs_lock);
564 kfree(vsi->arfs_fltr_list);
565 vsi->arfs_fltr_list = NULL;
566 kfree(vsi->arfs_last_fltr_id);
567 vsi->arfs_last_fltr_id = NULL;
568 kfree(vsi->arfs_fltr_cntrs);
569 vsi->arfs_fltr_cntrs = NULL;
573 * ice_free_cpu_rx_rmap - free setup CPU reverse map
574 * @vsi: the VSI to be forwarded to
576 void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
578 struct net_device *netdev;
580 if (!vsi || vsi->type != ICE_VSI_PF)
583 netdev = vsi->netdev;
584 if (!netdev || !netdev->rx_cpu_rmap)
587 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
588 netdev->rx_cpu_rmap = NULL;
592 * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
593 * @vsi: the VSI to be forwarded to
595 int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
597 struct net_device *netdev;
601 if (!vsi || vsi->type != ICE_VSI_PF)
605 netdev = vsi->netdev;
606 if (!pf || !netdev || !vsi->num_q_vectors)
609 netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
610 vsi->type, netdev->name, vsi->num_q_vectors);
612 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
613 if (unlikely(!netdev->rx_cpu_rmap))
616 base_idx = vsi->base_vector;
617 ice_for_each_q_vector(vsi, i)
618 if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
619 pf->msix_entries[base_idx + i].vector)) {
620 ice_free_cpu_rx_rmap(vsi);
628 * ice_remove_arfs - remove/clear all aRFS resources
629 * @pf: device private structure
631 void ice_remove_arfs(struct ice_pf *pf)
633 struct ice_vsi *pf_vsi;
635 pf_vsi = ice_get_main_vsi(pf);
639 ice_clear_arfs(pf_vsi);
643 * ice_rebuild_arfs - remove/clear all aRFS resources and rebuild after reset
644 * @pf: device private structure
646 void ice_rebuild_arfs(struct ice_pf *pf)
648 struct ice_vsi *pf_vsi;
650 pf_vsi = ice_get_main_vsi(pf);
655 ice_init_arfs(pf_vsi);