1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <net/udp_tunnel.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
12 #include <linux/qed/qed_if.h>
15 #define QEDE_FILTER_PRINT_MAX_LEN (64)
16 struct qede_arfs_tuple {
19 struct in6_addr src_ipv6;
23 struct in6_addr dst_ipv6;
30 /* Describe filtering mode needed for this kind of filter */
31 enum qed_filter_config_mode mode;
33 /* Used to compare new/old filters. Return true if IPs match */
34 bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
36 /* Given an address into ethhdr build a header from tuple info */
37 void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
39 /* Stringify the tuple for a print into the provided buffer */
40 void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
43 struct qede_arfs_fltr_node {
44 #define QEDE_FLTR_VALID 0
47 /* pointer to aRFS packet buffer */
50 /* dma map address of aRFS packet buffer */
53 /* length of aRFS packet buffer */
56 /* tuples to hold from aRFS packet buffer */
57 struct qede_arfs_tuple tuple;
68 struct hlist_node node;
72 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
73 #define QEDE_ARFS_POLL_COUNT 100
74 #define QEDE_RFS_FLW_BITSHIFT (4)
75 #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
76 struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
78 /* lock for filter list access */
79 spinlock_t arfs_list_lock;
80 unsigned long *arfs_fltr_bmap;
83 /* Currently configured filtering mode */
84 enum qed_filter_config_mode mode;
87 static void qede_configure_arfs_fltr(struct qede_dev *edev,
88 struct qede_arfs_fltr_node *n,
89 u16 rxq_id, bool add_fltr)
91 const struct qed_eth_ops *op = edev->ops;
92 struct qed_ntuple_filter_params params;
97 memset(¶ms, 0, sizeof(params));
99 params.addr = n->mapping;
100 params.length = n->buf_len;
102 params.b_is_add = add_fltr;
103 params.b_is_drop = n->b_is_drop;
106 params.b_is_vf = true;
107 params.vf_id = n->vfid - 1;
110 if (n->tuple.stringify) {
111 char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
113 n->tuple.stringify(&n->tuple, tuple_buffer);
114 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
115 "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
116 add_fltr ? "Adding" : "Deleting",
117 n->sw_id, tuple_buffer, n->vfid, rxq_id);
121 n->filter_op = add_fltr;
122 op->ntuple_filter_config(edev->cdev, n, ¶ms);
126 qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
130 if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
131 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
137 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
138 struct qede_arfs_fltr_node *fltr,
141 fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
142 fltr->buf_len, DMA_TO_DEVICE);
143 if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
144 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
145 qede_free_arfs_filter(edev, fltr);
149 INIT_HLIST_NODE(&fltr->node);
150 hlist_add_head(&fltr->node,
151 QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
153 edev->arfs->filter_count++;
154 if (edev->arfs->filter_count == 1 &&
155 edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
156 edev->ops->configure_arfs_searcher(edev->cdev,
158 edev->arfs->mode = fltr->tuple.mode;
165 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
166 struct qede_arfs_fltr_node *fltr)
168 hlist_del(&fltr->node);
169 dma_unmap_single(&edev->pdev->dev, fltr->mapping,
170 fltr->buf_len, DMA_TO_DEVICE);
172 qede_free_arfs_filter(edev, fltr);
174 edev->arfs->filter_count--;
175 if (!edev->arfs->filter_count &&
176 edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
177 enum qed_filter_config_mode mode;
179 mode = QED_FILTER_CONFIG_MODE_DISABLE;
180 edev->ops->configure_arfs_searcher(edev->cdev, mode);
181 edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
185 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
187 struct qede_arfs_fltr_node *fltr = filter;
188 struct qede_dev *edev = dev;
194 "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
195 fw_rc, fltr->flow_id, fltr->sw_id,
196 ntohs(fltr->tuple.src_port),
197 ntohs(fltr->tuple.dst_port), fltr->rxq_id);
199 spin_lock_bh(&edev->arfs->arfs_list_lock);
202 clear_bit(QEDE_FLTR_VALID, &fltr->state);
204 spin_unlock_bh(&edev->arfs->arfs_list_lock);
208 spin_lock_bh(&edev->arfs->arfs_list_lock);
212 if (fltr->filter_op) {
213 set_bit(QEDE_FLTR_VALID, &fltr->state);
214 if (fltr->rxq_id != fltr->next_rxq_id)
215 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
218 clear_bit(QEDE_FLTR_VALID, &fltr->state);
219 if (fltr->rxq_id != fltr->next_rxq_id) {
220 fltr->rxq_id = fltr->next_rxq_id;
221 qede_configure_arfs_fltr(edev, fltr,
226 spin_unlock_bh(&edev->arfs->arfs_list_lock);
229 /* Should be called while qede_lock is held */
230 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
234 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
235 struct hlist_node *temp;
236 struct hlist_head *head;
237 struct qede_arfs_fltr_node *fltr;
239 head = &edev->arfs->arfs_hl_head[i];
241 hlist_for_each_entry_safe(fltr, temp, head, node) {
244 if (edev->state != QEDE_STATE_OPEN)
247 spin_lock_bh(&edev->arfs->arfs_list_lock);
249 if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
250 !fltr->used) || free_fltr) {
251 qede_dequeue_fltr_and_config_searcher(edev,
254 bool flow_exp = false;
255 #ifdef CONFIG_RFS_ACCEL
256 flow_exp = rps_may_expire_flow(edev->ndev,
261 if ((flow_exp || del) && !free_fltr)
262 qede_configure_arfs_fltr(edev, fltr,
267 spin_unlock_bh(&edev->arfs->arfs_list_lock);
271 #ifdef CONFIG_RFS_ACCEL
272 spin_lock_bh(&edev->arfs->arfs_list_lock);
274 if (edev->arfs->filter_count) {
275 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
276 schedule_delayed_work(&edev->sp_task,
277 QEDE_SP_TASK_POLL_DELAY);
280 spin_unlock_bh(&edev->arfs->arfs_list_lock);
284 /* This function waits until all aRFS filters get deleted and freed.
285 * On timeout it frees all filters forcefully.
287 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
289 int count = QEDE_ARFS_POLL_COUNT;
292 qede_process_arfs_filters(edev, false);
294 if (!edev->arfs->filter_count)
302 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
304 /* Something is terribly wrong, free forcefully */
305 qede_process_arfs_filters(edev, true);
309 int qede_alloc_arfs(struct qede_dev *edev)
313 edev->arfs = vzalloc(sizeof(*edev->arfs));
317 spin_lock_init(&edev->arfs->arfs_list_lock);
319 for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
320 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
322 edev->arfs->arfs_fltr_bmap =
323 vzalloc(array_size(sizeof(long),
324 BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
325 if (!edev->arfs->arfs_fltr_bmap) {
331 #ifdef CONFIG_RFS_ACCEL
332 edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
333 if (!edev->ndev->rx_cpu_rmap) {
334 vfree(edev->arfs->arfs_fltr_bmap);
335 edev->arfs->arfs_fltr_bmap = NULL;
344 void qede_free_arfs(struct qede_dev *edev)
349 #ifdef CONFIG_RFS_ACCEL
350 if (edev->ndev->rx_cpu_rmap)
351 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
353 edev->ndev->rx_cpu_rmap = NULL;
355 vfree(edev->arfs->arfs_fltr_bmap);
356 edev->arfs->arfs_fltr_bmap = NULL;
361 #ifdef CONFIG_RFS_ACCEL
362 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
363 const struct sk_buff *skb)
365 if (skb->protocol == htons(ETH_P_IP)) {
366 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
367 tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
372 struct in6_addr *src = &tpos->tuple.src_ipv6;
373 u8 size = sizeof(struct in6_addr);
375 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
376 !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
383 static struct qede_arfs_fltr_node *
384 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
385 __be16 src_port, __be16 dst_port, u8 ip_proto)
387 struct qede_arfs_fltr_node *tpos;
389 hlist_for_each_entry(tpos, h, node)
390 if (tpos->tuple.ip_proto == ip_proto &&
391 tpos->tuple.eth_proto == skb->protocol &&
392 qede_compare_ip_addr(tpos, skb) &&
393 tpos->tuple.src_port == src_port &&
394 tpos->tuple.dst_port == dst_port)
400 static struct qede_arfs_fltr_node *
401 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
403 struct qede_arfs_fltr_node *n;
406 bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
409 if (bit_id >= QEDE_RFS_MAX_FLTR)
412 n = kzalloc(sizeof(*n), GFP_ATOMIC);
416 n->data = kzalloc(min_hlen, GFP_ATOMIC);
422 n->sw_id = (u16)bit_id;
423 set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
427 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
428 u16 rxq_index, u32 flow_id)
430 struct qede_dev *edev = netdev_priv(dev);
431 struct qede_arfs_fltr_node *n;
432 int min_hlen, rc, tp_offset;
438 if (skb->encapsulation)
439 return -EPROTONOSUPPORT;
441 if (skb->protocol != htons(ETH_P_IP) &&
442 skb->protocol != htons(ETH_P_IPV6))
443 return -EPROTONOSUPPORT;
445 if (skb->protocol == htons(ETH_P_IP)) {
446 ip_proto = ip_hdr(skb)->protocol;
447 tp_offset = sizeof(struct iphdr);
449 ip_proto = ipv6_hdr(skb)->nexthdr;
450 tp_offset = sizeof(struct ipv6hdr);
453 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
454 return -EPROTONOSUPPORT;
456 ports = (__be16 *)(skb->data + tp_offset);
457 tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
459 spin_lock_bh(&edev->arfs->arfs_list_lock);
461 n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
462 skb, ports[0], ports[1], ip_proto);
465 n->next_rxq_id = rxq_index;
467 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
468 if (n->rxq_id != rxq_index)
469 qede_configure_arfs_fltr(edev, n, n->rxq_id,
473 n->rxq_id = rxq_index;
474 qede_configure_arfs_fltr(edev, n, n->rxq_id,
483 min_hlen = ETH_HLEN + skb_headlen(skb);
485 n = qede_alloc_filter(edev, min_hlen);
491 n->buf_len = min_hlen;
492 n->rxq_id = rxq_index;
493 n->next_rxq_id = rxq_index;
494 n->tuple.src_port = ports[0];
495 n->tuple.dst_port = ports[1];
496 n->flow_id = flow_id;
498 if (skb->protocol == htons(ETH_P_IP)) {
499 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
500 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
502 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
503 sizeof(struct in6_addr));
504 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
505 sizeof(struct in6_addr));
508 eth = (struct ethhdr *)n->data;
509 eth->h_proto = skb->protocol;
510 n->tuple.eth_proto = skb->protocol;
511 n->tuple.ip_proto = ip_proto;
512 n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
513 memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
515 rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
519 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
521 spin_unlock_bh(&edev->arfs->arfs_list_lock);
523 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
524 schedule_delayed_work(&edev->sp_task, 0);
529 spin_unlock_bh(&edev->arfs->arfs_list_lock);
534 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
536 struct qede_dev *edev = dev;
538 if (edev->vxlan_dst_port != vxlan_port)
539 edev->vxlan_dst_port = 0;
541 if (edev->geneve_dst_port != geneve_port)
542 edev->geneve_dst_port = 0;
545 void qede_force_mac(void *dev, u8 *mac, bool forced)
547 struct qede_dev *edev = dev;
551 if (!is_valid_ether_addr(mac)) {
556 ether_addr_copy(edev->ndev->dev_addr, mac);
560 void qede_fill_rss_params(struct qede_dev *edev,
561 struct qed_update_vport_rss_params *rss, u8 *update)
563 bool need_reset = false;
566 if (QEDE_RSS_COUNT(edev) <= 1) {
567 memset(rss, 0, sizeof(*rss));
572 /* Need to validate current RSS config uses valid entries */
573 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
574 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
580 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
581 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
584 val = QEDE_RSS_COUNT(edev);
585 indir_val = ethtool_rxfh_indir_default(i, val);
586 edev->rss_ind_table[i] = indir_val;
588 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
591 /* Now that we have the queue-indirection, prepare the handles */
592 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
593 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
595 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
598 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
599 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
600 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
602 memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
604 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
605 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
606 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
607 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
609 rss->rss_caps = edev->rss_caps;
614 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
615 enum qed_filter_xcast_params_type opcode,
616 unsigned char mac[ETH_ALEN])
618 struct qed_filter_params filter_cmd;
620 memset(&filter_cmd, 0, sizeof(filter_cmd));
621 filter_cmd.type = QED_FILTER_TYPE_UCAST;
622 filter_cmd.filter.ucast.type = opcode;
623 filter_cmd.filter.ucast.mac_valid = 1;
624 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
626 return edev->ops->filter_config(edev->cdev, &filter_cmd);
629 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
630 enum qed_filter_xcast_params_type opcode,
633 struct qed_filter_params filter_cmd;
635 memset(&filter_cmd, 0, sizeof(filter_cmd));
636 filter_cmd.type = QED_FILTER_TYPE_UCAST;
637 filter_cmd.filter.ucast.type = opcode;
638 filter_cmd.filter.ucast.vlan_valid = 1;
639 filter_cmd.filter.ucast.vlan = vid;
641 return edev->ops->filter_config(edev->cdev, &filter_cmd);
644 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
646 struct qed_update_vport_params *params;
649 /* Proceed only if action actually needs to be performed */
650 if (edev->accept_any_vlan == action)
653 params = vzalloc(sizeof(*params));
657 params->vport_id = 0;
658 params->accept_any_vlan = action;
659 params->update_accept_any_vlan_flg = 1;
661 rc = edev->ops->vport_update(edev->cdev, params);
663 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
664 action ? "enable" : "disable");
666 DP_INFO(edev, "%s accept-any-vlan\n",
667 action ? "enabled" : "disabled");
668 edev->accept_any_vlan = action;
675 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
677 struct qede_dev *edev = netdev_priv(dev);
678 struct qede_vlan *vlan, *tmp;
681 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
683 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
685 DP_INFO(edev, "Failed to allocate struct for vlan\n");
688 INIT_LIST_HEAD(&vlan->list);
690 vlan->configured = false;
692 /* Verify vlan isn't already configured */
693 list_for_each_entry(tmp, &edev->vlan_list, list) {
694 if (tmp->vid == vlan->vid) {
695 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
696 "vlan already configured\n");
702 /* If interface is down, cache this VLAN ID and return */
704 if (edev->state != QEDE_STATE_OPEN) {
705 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
706 "Interface is down, VLAN %d will be configured when interface is up\n",
709 edev->non_configured_vlans++;
710 list_add(&vlan->list, &edev->vlan_list);
714 /* Check for the filter limit.
715 * Note - vlan0 has a reserved filter and can be added without
716 * worrying about quota
718 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
720 rc = qede_set_ucast_rx_vlan(edev,
721 QED_FILTER_XCAST_TYPE_ADD,
724 DP_ERR(edev, "Failed to configure VLAN %d\n",
729 vlan->configured = true;
731 /* vlan0 filter isn't consuming out of our quota */
733 edev->configured_vlans++;
735 /* Out of quota; Activate accept-any-VLAN mode */
736 if (!edev->non_configured_vlans) {
737 rc = qede_config_accept_any_vlan(edev, true);
744 edev->non_configured_vlans++;
747 list_add(&vlan->list, &edev->vlan_list);
754 static void qede_del_vlan_from_list(struct qede_dev *edev,
755 struct qede_vlan *vlan)
757 /* vlan0 filter isn't consuming out of our quota */
758 if (vlan->vid != 0) {
759 if (vlan->configured)
760 edev->configured_vlans--;
762 edev->non_configured_vlans--;
765 list_del(&vlan->list);
769 int qede_configure_vlan_filters(struct qede_dev *edev)
771 int rc = 0, real_rc = 0, accept_any_vlan = 0;
772 struct qed_dev_eth_info *dev_info;
773 struct qede_vlan *vlan = NULL;
775 if (list_empty(&edev->vlan_list))
778 dev_info = &edev->dev_info;
780 /* Configure non-configured vlans */
781 list_for_each_entry(vlan, &edev->vlan_list, list) {
782 if (vlan->configured)
785 /* We have used all our credits, now enable accept_any_vlan */
786 if ((vlan->vid != 0) &&
787 (edev->configured_vlans == dev_info->num_vlan_filters)) {
792 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
794 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
797 DP_ERR(edev, "Failed to configure VLAN %u\n",
803 vlan->configured = true;
804 /* vlan0 filter doesn't consume our VLAN filter's quota */
805 if (vlan->vid != 0) {
806 edev->non_configured_vlans--;
807 edev->configured_vlans++;
811 /* enable accept_any_vlan mode if we have more VLANs than credits,
812 * or remove accept_any_vlan mode if we've actually removed
813 * a non-configured vlan, and all remaining vlans are truly configured.
817 rc = qede_config_accept_any_vlan(edev, true);
818 else if (!edev->non_configured_vlans)
819 rc = qede_config_accept_any_vlan(edev, false);
827 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
829 struct qede_dev *edev = netdev_priv(dev);
830 struct qede_vlan *vlan = NULL;
833 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
835 /* Find whether entry exists */
837 list_for_each_entry(vlan, &edev->vlan_list, list)
838 if (vlan->vid == vid)
841 if (!vlan || (vlan->vid != vid)) {
842 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
843 "Vlan isn't configured\n");
847 if (edev->state != QEDE_STATE_OPEN) {
848 /* As interface is already down, we don't have a VPORT
849 * instance to remove vlan filter. So just update vlan list
851 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
852 "Interface is down, removing VLAN from list only\n");
853 qede_del_vlan_from_list(edev, vlan);
858 if (vlan->configured) {
859 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
862 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
867 qede_del_vlan_from_list(edev, vlan);
869 /* We have removed a VLAN - try to see if we can
870 * configure non-configured VLAN from the list.
872 rc = qede_configure_vlan_filters(edev);
879 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
881 struct qede_vlan *vlan = NULL;
883 if (list_empty(&edev->vlan_list))
886 list_for_each_entry(vlan, &edev->vlan_list, list) {
887 if (!vlan->configured)
890 vlan->configured = false;
892 /* vlan0 filter isn't consuming out of our quota */
893 if (vlan->vid != 0) {
894 edev->non_configured_vlans++;
895 edev->configured_vlans--;
898 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
899 "marked vlan %d as non-configured\n", vlan->vid);
902 edev->accept_any_vlan = false;
905 static void qede_set_features_reload(struct qede_dev *edev,
906 struct qede_reload_args *args)
908 edev->ndev->features = args->u.features;
911 netdev_features_t qede_fix_features(struct net_device *dev,
912 netdev_features_t features)
914 struct qede_dev *edev = netdev_priv(dev);
916 if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
917 !(features & NETIF_F_GRO))
918 features &= ~NETIF_F_GRO_HW;
923 int qede_set_features(struct net_device *dev, netdev_features_t features)
925 struct qede_dev *edev = netdev_priv(dev);
926 netdev_features_t changes = features ^ dev->features;
927 bool need_reload = false;
929 if (changes & NETIF_F_GRO_HW)
933 struct qede_reload_args args;
935 args.u.features = features;
936 args.func = &qede_set_features_reload;
938 /* Make sure that we definitely need to reload.
939 * In case of an eBPF attached program, there will be no FW
940 * aggregations, so no need to actually reload.
944 args.func(edev, &args);
946 qede_reload(edev, &args, true);
955 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
957 struct qede_dev *edev = netdev_priv(dev);
958 struct qed_tunn_params tunn_params;
959 u16 t_port = ntohs(ti->port);
962 memset(&tunn_params, 0, sizeof(tunn_params));
965 case UDP_TUNNEL_TYPE_VXLAN:
966 if (!edev->dev_info.common.vxlan_enable)
969 if (edev->vxlan_dst_port)
972 tunn_params.update_vxlan_port = 1;
973 tunn_params.vxlan_port = t_port;
976 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
980 edev->vxlan_dst_port = t_port;
981 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
984 DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
989 case UDP_TUNNEL_TYPE_GENEVE:
990 if (!edev->dev_info.common.geneve_enable)
993 if (edev->geneve_dst_port)
996 tunn_params.update_geneve_port = 1;
997 tunn_params.geneve_port = t_port;
1000 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1001 __qede_unlock(edev);
1004 edev->geneve_dst_port = t_port;
1005 DP_VERBOSE(edev, QED_MSG_DEBUG,
1006 "Added geneve port=%d\n", t_port);
1008 DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1018 void qede_udp_tunnel_del(struct net_device *dev,
1019 struct udp_tunnel_info *ti)
1021 struct qede_dev *edev = netdev_priv(dev);
1022 struct qed_tunn_params tunn_params;
1023 u16 t_port = ntohs(ti->port);
1025 memset(&tunn_params, 0, sizeof(tunn_params));
1028 case UDP_TUNNEL_TYPE_VXLAN:
1029 if (t_port != edev->vxlan_dst_port)
1032 tunn_params.update_vxlan_port = 1;
1033 tunn_params.vxlan_port = 0;
1036 edev->ops->tunn_config(edev->cdev, &tunn_params);
1037 __qede_unlock(edev);
1039 edev->vxlan_dst_port = 0;
1041 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1045 case UDP_TUNNEL_TYPE_GENEVE:
1046 if (t_port != edev->geneve_dst_port)
1049 tunn_params.update_geneve_port = 1;
1050 tunn_params.geneve_port = 0;
1053 edev->ops->tunn_config(edev->cdev, &tunn_params);
1054 __qede_unlock(edev);
1056 edev->geneve_dst_port = 0;
1058 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1066 static void qede_xdp_reload_func(struct qede_dev *edev,
1067 struct qede_reload_args *args)
1069 struct bpf_prog *old;
1071 old = xchg(&edev->xdp_prog, args->u.new_prog);
1076 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1078 struct qede_reload_args args;
1080 /* If we're called, there was already a bpf reference increment */
1081 args.func = &qede_xdp_reload_func;
1082 args.u.new_prog = prog;
1083 qede_reload(edev, &args, false);
1088 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1090 struct qede_dev *edev = netdev_priv(dev);
1092 switch (xdp->command) {
1093 case XDP_SETUP_PROG:
1094 return qede_xdp_set(edev, xdp->prog);
1095 case XDP_QUERY_PROG:
1096 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1103 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1104 enum qed_filter_xcast_params_type opcode,
1105 unsigned char *mac, int num_macs)
1107 struct qed_filter_params filter_cmd;
1110 memset(&filter_cmd, 0, sizeof(filter_cmd));
1111 filter_cmd.type = QED_FILTER_TYPE_MCAST;
1112 filter_cmd.filter.mcast.type = opcode;
1113 filter_cmd.filter.mcast.num = num_macs;
1115 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1116 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1118 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1121 int qede_set_mac_addr(struct net_device *ndev, void *p)
1123 struct qede_dev *edev = netdev_priv(ndev);
1124 struct sockaddr *addr = p;
1127 /* Make sure the state doesn't transition while changing the MAC.
1128 * Also, all flows accessing the dev_addr field are doing that under
1133 if (!is_valid_ether_addr(addr->sa_data)) {
1134 DP_NOTICE(edev, "The MAC address is not valid\n");
1139 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1140 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1146 if (edev->state == QEDE_STATE_OPEN) {
1147 /* Remove the previous primary mac */
1148 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1154 ether_addr_copy(ndev->dev_addr, addr->sa_data);
1155 DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1157 if (edev->state != QEDE_STATE_OPEN) {
1158 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1159 "The device is currently down\n");
1160 /* Ask PF to explicitly update a copy in bulletin board */
1161 if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1162 edev->ops->req_bulletin_update_mac(edev->cdev,
1167 edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1169 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1172 __qede_unlock(edev);
1177 qede_configure_mcast_filtering(struct net_device *ndev,
1178 enum qed_filter_rx_mode_type *accept_flags)
1180 struct qede_dev *edev = netdev_priv(ndev);
1181 unsigned char *mc_macs, *temp;
1182 struct netdev_hw_addr *ha;
1183 int rc = 0, mc_count;
1186 size = 64 * ETH_ALEN;
1188 mc_macs = kzalloc(size, GFP_KERNEL);
1191 "Failed to allocate memory for multicast MACs\n");
1198 /* Remove all previously configured MAC filters */
1199 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1204 netif_addr_lock_bh(ndev);
1206 mc_count = netdev_mc_count(ndev);
1207 if (mc_count <= 64) {
1208 netdev_for_each_mc_addr(ha, ndev) {
1209 ether_addr_copy(temp, ha->addr);
1214 netif_addr_unlock_bh(ndev);
1216 /* Check for all multicast @@@TBD resource allocation */
1217 if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1218 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1219 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1221 /* Add all multicast MAC filters */
1222 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1231 void qede_set_rx_mode(struct net_device *ndev)
1233 struct qede_dev *edev = netdev_priv(ndev);
1235 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1236 schedule_delayed_work(&edev->sp_task, 0);
1239 /* Must be called with qede_lock held */
1240 void qede_config_rx_mode(struct net_device *ndev)
1242 enum qed_filter_rx_mode_type accept_flags;
1243 struct qede_dev *edev = netdev_priv(ndev);
1244 struct qed_filter_params rx_mode;
1245 unsigned char *uc_macs, *temp;
1246 struct netdev_hw_addr *ha;
1250 netif_addr_lock_bh(ndev);
1252 uc_count = netdev_uc_count(ndev);
1253 size = uc_count * ETH_ALEN;
1255 uc_macs = kzalloc(size, GFP_ATOMIC);
1257 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1258 netif_addr_unlock_bh(ndev);
1263 netdev_for_each_uc_addr(ha, ndev) {
1264 ether_addr_copy(temp, ha->addr);
1268 netif_addr_unlock_bh(ndev);
1270 /* Configure the struct for the Rx mode */
1271 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1272 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1274 /* Remove all previous unicast secondary macs and multicast macs
1275 * (configure / leave the primary mac)
1277 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1278 edev->ndev->dev_addr);
1282 /* Check for promiscuous */
1283 if (ndev->flags & IFF_PROMISC)
1284 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1286 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1288 /* Configure all filters regardless, in case promisc is rejected */
1289 if (uc_count < edev->dev_info.num_mac_filters) {
1293 for (i = 0; i < uc_count; i++) {
1294 rc = qede_set_ucast_rx_mac(edev,
1295 QED_FILTER_XCAST_TYPE_ADD,
1303 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1306 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1310 /* take care of VLAN mode */
1311 if (ndev->flags & IFF_PROMISC) {
1312 qede_config_accept_any_vlan(edev, true);
1313 } else if (!edev->non_configured_vlans) {
1314 /* It's possible that accept_any_vlan mode is set due to a
1315 * previous setting of IFF_PROMISC. If vlan credits are
1316 * sufficient, disable accept_any_vlan.
1318 qede_config_accept_any_vlan(edev, false);
1321 rx_mode.filter.accept_flags = accept_flags;
1322 edev->ops->filter_config(edev->cdev, &rx_mode);
1327 static struct qede_arfs_fltr_node *
1328 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1330 struct qede_arfs_fltr_node *fltr;
1332 hlist_for_each_entry(fltr, head, node)
1333 if (location == fltr->sw_id)
1339 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1342 struct qede_arfs_fltr_node *fltr;
1343 struct hlist_head *head;
1344 int cnt = 0, rc = 0;
1346 info->data = QEDE_RFS_MAX_FLTR;
1355 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1357 hlist_for_each_entry(fltr, head, node) {
1358 if (cnt == info->rule_cnt) {
1363 rule_locs[cnt] = fltr->sw_id;
1367 info->rule_cnt = cnt;
1370 __qede_unlock(edev);
1374 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1376 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1377 struct qede_arfs_fltr_node *fltr = NULL;
1380 cmd->data = QEDE_RFS_MAX_FLTR;
1389 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1392 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1398 if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1399 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1400 fsp->flow_type = TCP_V4_FLOW;
1402 fsp->flow_type = UDP_V4_FLOW;
1404 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1405 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1406 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1407 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1409 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1410 fsp->flow_type = TCP_V6_FLOW;
1412 fsp->flow_type = UDP_V6_FLOW;
1413 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1414 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1415 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1416 &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1417 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1418 &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1421 fsp->ring_cookie = fltr->rxq_id;
1424 fsp->ring_cookie |= ((u64)fltr->vfid) <<
1425 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1428 if (fltr->b_is_drop)
1429 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1431 __qede_unlock(edev);
1436 qede_poll_arfs_filter_config(struct qede_dev *edev,
1437 struct qede_arfs_fltr_node *fltr)
1439 int count = QEDE_ARFS_POLL_COUNT;
1441 while (fltr->used && count) {
1446 if (count == 0 || fltr->fw_rc) {
1447 DP_NOTICE(edev, "Timeout in polling filter config\n");
1448 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1455 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1457 int size = ETH_HLEN;
1459 if (t->eth_proto == htons(ETH_P_IP))
1460 size += sizeof(struct iphdr);
1462 size += sizeof(struct ipv6hdr);
1464 if (t->ip_proto == IPPROTO_TCP)
1465 size += sizeof(struct tcphdr);
1467 size += sizeof(struct udphdr);
1472 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1473 struct qede_arfs_tuple *b)
1475 if (a->eth_proto != htons(ETH_P_IP) ||
1476 b->eth_proto != htons(ETH_P_IP))
1479 return (a->src_ipv4 == b->src_ipv4) &&
1480 (a->dst_ipv4 == b->dst_ipv4);
1483 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1486 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1487 struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1488 struct ethhdr *eth = (struct ethhdr *)header;
1490 eth->h_proto = t->eth_proto;
1491 ip->saddr = t->src_ipv4;
1492 ip->daddr = t->dst_ipv4;
1495 ip->protocol = t->ip_proto;
1496 ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1498 /* ports is weakly typed to suit both TCP and UDP ports */
1499 ports[0] = t->src_port;
1500 ports[1] = t->dst_port;
1503 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1506 const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1508 snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1509 "%s %pI4 (%04x) -> %pI4 (%04x)",
1510 prefix, &t->src_ipv4, t->src_port,
1511 &t->dst_ipv4, t->dst_port);
1514 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1515 struct qede_arfs_tuple *b)
1517 if (a->eth_proto != htons(ETH_P_IPV6) ||
1518 b->eth_proto != htons(ETH_P_IPV6))
1521 if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1524 if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1530 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1533 __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1534 struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1535 struct ethhdr *eth = (struct ethhdr *)header;
1537 eth->h_proto = t->eth_proto;
1538 memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1539 memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1542 if (t->ip_proto == IPPROTO_TCP) {
1543 ip6->nexthdr = NEXTHDR_TCP;
1544 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1546 ip6->nexthdr = NEXTHDR_UDP;
1547 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1550 /* ports is weakly typed to suit both TCP and UDP ports */
1551 ports[0] = t->src_port;
1552 ports[1] = t->dst_port;
1555 /* Validate fields which are set and not accepted by the driver */
1556 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1557 struct ethtool_rx_flow_spec *fs)
1559 if (fs->flow_type & FLOW_MAC_EXT) {
1560 DP_INFO(edev, "Don't support MAC extensions\n");
1564 if ((fs->flow_type & FLOW_EXT) &&
1565 (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1566 DP_INFO(edev, "Don't support vlan-based classification\n");
1570 if ((fs->flow_type & FLOW_EXT) &&
1571 (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1572 DP_INFO(edev, "Don't support user defined data\n");
1579 static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1580 struct qede_arfs_tuple *t)
1582 /* We must have Only 4-tuples/l4 port/src ip/dst ip
1585 if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1586 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1587 } else if (!t->src_port && t->dst_port &&
1588 !t->src_ipv4 && !t->dst_ipv4) {
1589 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1590 } else if (!t->src_port && !t->dst_port &&
1591 !t->dst_ipv4 && t->src_ipv4) {
1592 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1593 } else if (!t->src_port && !t->dst_port &&
1594 t->dst_ipv4 && !t->src_ipv4) {
1595 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1597 DP_INFO(edev, "Invalid N-tuple\n");
1601 t->ip_comp = qede_flow_spec_ipv4_cmp;
1602 t->build_hdr = qede_flow_build_ipv4_hdr;
1603 t->stringify = qede_flow_stringify_ipv4_hdr;
1608 static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1609 struct qede_arfs_tuple *t,
1610 struct in6_addr *zaddr)
1612 /* We must have Only 4-tuples/l4 port/src ip/dst ip
1615 if (t->src_port && t->dst_port &&
1616 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1617 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1618 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1619 } else if (!t->src_port && t->dst_port &&
1620 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1621 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1622 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1623 } else if (!t->src_port && !t->dst_port &&
1624 !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1625 memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1626 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1627 } else if (!t->src_port && !t->dst_port &&
1628 memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1629 !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1630 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1632 DP_INFO(edev, "Invalid N-tuple\n");
1636 t->ip_comp = qede_flow_spec_ipv6_cmp;
1637 t->build_hdr = qede_flow_build_ipv6_hdr;
1642 /* Must be called while qede lock is held */
1643 static struct qede_arfs_fltr_node *
1644 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1646 struct qede_arfs_fltr_node *fltr;
1647 struct hlist_node *temp;
1648 struct hlist_head *head;
1650 head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1652 hlist_for_each_entry_safe(fltr, temp, head, node) {
1653 if (fltr->tuple.ip_proto == t->ip_proto &&
1654 fltr->tuple.src_port == t->src_port &&
1655 fltr->tuple.dst_port == t->dst_port &&
1656 t->ip_comp(&fltr->tuple, t))
1663 static void qede_flow_set_destination(struct qede_dev *edev,
1664 struct qede_arfs_fltr_node *n,
1665 struct ethtool_rx_flow_spec *fs)
1667 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1668 n->b_is_drop = true;
1672 n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1673 n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1674 n->next_rxq_id = n->rxq_id;
1677 DP_VERBOSE(edev, QED_MSG_SP,
1678 "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1681 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1683 struct qede_arfs_fltr_node *fltr = NULL;
1690 fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1695 qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1697 rc = qede_poll_arfs_filter_config(edev, fltr);
1699 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1702 __qede_unlock(edev);
1706 int qede_get_arfs_filter_count(struct qede_dev *edev)
1715 count = edev->arfs->filter_count;
1718 __qede_unlock(edev);
1722 static int qede_parse_actions(struct qede_dev *edev,
1723 struct flow_action *flow_action,
1724 struct netlink_ext_ack *extack)
1726 const struct flow_action_entry *act;
1729 if (!flow_action_has_entries(flow_action)) {
1730 DP_NOTICE(edev, "No actions received\n");
1734 if (!flow_action_basic_hw_stats_check(flow_action, extack))
1737 flow_action_for_each(i, act, flow_action) {
1739 case FLOW_ACTION_DROP:
1741 case FLOW_ACTION_QUEUE:
1745 if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1746 DP_INFO(edev, "Queue out-of-bounds\n");
1759 qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
1760 struct qede_arfs_tuple *t)
1762 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1763 struct flow_match_ports match;
1765 flow_rule_match_ports(rule, &match);
1766 if ((match.key->src && match.mask->src != U16_MAX) ||
1767 (match.key->dst && match.mask->dst != U16_MAX)) {
1768 DP_NOTICE(edev, "Do not support ports masks\n");
1772 t->src_port = match.key->src;
1773 t->dst_port = match.key->dst;
1780 qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
1781 struct qede_arfs_tuple *t)
1783 struct in6_addr zero_addr, addr;
1785 memset(&zero_addr, 0, sizeof(addr));
1786 memset(&addr, 0xff, sizeof(addr));
1788 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1789 struct flow_match_ipv6_addrs match;
1791 flow_rule_match_ipv6_addrs(rule, &match);
1792 if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1793 memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1794 (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1795 memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1797 "Do not support IPv6 address prefix/mask\n");
1801 memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1802 memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1805 if (qede_flow_parse_ports(edev, rule, t))
1808 return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1812 qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
1813 struct qede_arfs_tuple *t)
1815 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1816 struct flow_match_ipv4_addrs match;
1818 flow_rule_match_ipv4_addrs(rule, &match);
1819 if ((match.key->src && match.mask->src != U32_MAX) ||
1820 (match.key->dst && match.mask->dst != U32_MAX)) {
1821 DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
1825 t->src_ipv4 = match.key->src;
1826 t->dst_ipv4 = match.key->dst;
1829 if (qede_flow_parse_ports(edev, rule, t))
1832 return qede_set_v4_tuple_to_profile(edev, t);
1836 qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
1837 struct qede_arfs_tuple *tuple)
1839 tuple->ip_proto = IPPROTO_TCP;
1840 tuple->eth_proto = htons(ETH_P_IPV6);
1842 return qede_flow_parse_v6_common(edev, rule, tuple);
1846 qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
1847 struct qede_arfs_tuple *tuple)
1849 tuple->ip_proto = IPPROTO_TCP;
1850 tuple->eth_proto = htons(ETH_P_IP);
1852 return qede_flow_parse_v4_common(edev, rule, tuple);
1856 qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
1857 struct qede_arfs_tuple *tuple)
1859 tuple->ip_proto = IPPROTO_UDP;
1860 tuple->eth_proto = htons(ETH_P_IPV6);
1862 return qede_flow_parse_v6_common(edev, rule, tuple);
1866 qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
1867 struct qede_arfs_tuple *tuple)
1869 tuple->ip_proto = IPPROTO_UDP;
1870 tuple->eth_proto = htons(ETH_P_IP);
1872 return qede_flow_parse_v4_common(edev, rule, tuple);
1876 qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
1877 struct flow_rule *rule, struct qede_arfs_tuple *tuple)
1879 struct flow_dissector *dissector = rule->match.dissector;
1883 memset(tuple, 0, sizeof(*tuple));
1885 if (dissector->used_keys &
1886 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1887 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1888 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1889 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1890 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
1891 DP_NOTICE(edev, "Unsupported key set:0x%x\n",
1892 dissector->used_keys);
1896 if (proto != htons(ETH_P_IP) &&
1897 proto != htons(ETH_P_IPV6)) {
1898 DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
1899 return -EPROTONOSUPPORT;
1902 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1903 struct flow_match_basic match;
1905 flow_rule_match_basic(rule, &match);
1906 ip_proto = match.key->ip_proto;
1909 if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1910 rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
1911 else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1912 rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
1913 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1914 rc = qede_flow_parse_udp_v4(edev, rule, tuple);
1915 else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1916 rc = qede_flow_parse_udp_v6(edev, rule, tuple);
1918 DP_NOTICE(edev, "Invalid protocol request\n");
1923 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1924 struct flow_cls_offload *f)
1926 struct qede_arfs_fltr_node *n;
1927 int min_hlen, rc = -EINVAL;
1928 struct qede_arfs_tuple t;
1937 /* parse flower attribute and prepare filter */
1938 if (qede_parse_flow_attr(edev, proto, f->rule, &t))
1941 /* Validate profile mode and number of filters */
1942 if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1943 edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1945 "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1946 t.mode, edev->arfs->mode, edev->arfs->filter_count);
1950 /* parse tc actions and get the vf_id */
1951 if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
1954 if (qede_flow_find_fltr(edev, &t)) {
1959 n = kzalloc(sizeof(*n), GFP_KERNEL);
1965 min_hlen = qede_flow_get_min_header_size(&t);
1967 n->data = kzalloc(min_hlen, GFP_KERNEL);
1974 memcpy(&n->tuple, &t, sizeof(n->tuple));
1976 n->buf_len = min_hlen;
1977 n->b_is_drop = true;
1978 n->sw_id = f->cookie;
1980 n->tuple.build_hdr(&n->tuple, n->data);
1982 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1986 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1987 rc = qede_poll_arfs_filter_config(edev, n);
1990 __qede_unlock(edev);
1994 static int qede_flow_spec_validate(struct qede_dev *edev,
1995 struct flow_action *flow_action,
1996 struct qede_arfs_tuple *t,
1999 if (location >= QEDE_RFS_MAX_FLTR) {
2000 DP_INFO(edev, "Location out-of-bounds\n");
2004 /* Check location isn't already in use */
2005 if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
2006 DP_INFO(edev, "Location already in use\n");
2010 /* Check if the filtering-mode could support the filter */
2011 if (edev->arfs->filter_count &&
2012 edev->arfs->mode != t->mode) {
2014 "flow_spec would require filtering mode %08x, but %08x is configured\n",
2015 t->mode, edev->arfs->filter_count);
2019 if (qede_parse_actions(edev, flow_action, NULL))
2025 static int qede_flow_spec_to_rule(struct qede_dev *edev,
2026 struct qede_arfs_tuple *t,
2027 struct ethtool_rx_flow_spec *fs)
2029 struct ethtool_rx_flow_spec_input input = {};
2030 struct ethtool_rx_flow_rule *flow;
2034 if (qede_flow_spec_validate_unused(edev, fs))
2037 switch ((fs->flow_type & ~FLOW_EXT)) {
2040 proto = htons(ETH_P_IP);
2044 proto = htons(ETH_P_IPV6);
2047 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2048 "Can't support flow of type %08x\n", fs->flow_type);
2053 flow = ethtool_rx_flow_rule_create(&input);
2055 return PTR_ERR(flow);
2057 if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
2062 /* Make sure location is valid and filter isn't already set */
2063 err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2066 ethtool_rx_flow_rule_destroy(flow);
2071 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2073 struct ethtool_rx_flow_spec *fsp = &info->fs;
2074 struct qede_arfs_fltr_node *n;
2075 struct qede_arfs_tuple t;
2085 /* Translate the flow specification into something fittign our DB */
2086 rc = qede_flow_spec_to_rule(edev, &t, fsp);
2090 if (qede_flow_find_fltr(edev, &t)) {
2095 n = kzalloc(sizeof(*n), GFP_KERNEL);
2101 min_hlen = qede_flow_get_min_header_size(&t);
2102 n->data = kzalloc(min_hlen, GFP_KERNEL);
2109 n->sw_id = fsp->location;
2110 set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2111 n->buf_len = min_hlen;
2113 memcpy(&n->tuple, &t, sizeof(n->tuple));
2115 qede_flow_set_destination(edev, n, fsp);
2117 /* Build a minimal header according to the flow */
2118 n->tuple.build_hdr(&n->tuple, n->data);
2120 rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2124 qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2125 rc = qede_poll_arfs_filter_config(edev, n);
2127 __qede_unlock(edev);