net: qede: convert to SPDX License Identifiers
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qede / qede_filter.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  */
5
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <net/udp_tunnel.h>
9 #include <linux/bitops.h>
10 #include <linux/vmalloc.h>
11
12 #include <linux/qed/qed_if.h>
13 #include "qede.h"
14
15 #define QEDE_FILTER_PRINT_MAX_LEN       (64)
16 struct qede_arfs_tuple {
17         union {
18                 __be32 src_ipv4;
19                 struct in6_addr src_ipv6;
20         };
21         union {
22                 __be32 dst_ipv4;
23                 struct in6_addr dst_ipv6;
24         };
25         __be16  src_port;
26         __be16  dst_port;
27         __be16  eth_proto;
28         u8      ip_proto;
29
30         /* Describe filtering mode needed for this kind of filter */
31         enum qed_filter_config_mode mode;
32
33         /* Used to compare new/old filters. Return true if IPs match */
34         bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
35
36         /* Given an address into ethhdr build a header from tuple info */
37         void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
38
39         /* Stringify the tuple for a print into the provided buffer */
40         void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
41 };
42
43 struct qede_arfs_fltr_node {
44 #define QEDE_FLTR_VALID  0
45         unsigned long state;
46
47         /* pointer to aRFS packet buffer */
48         void *data;
49
50         /* dma map address of aRFS packet buffer */
51         dma_addr_t mapping;
52
53         /* length of aRFS packet buffer */
54         int buf_len;
55
56         /* tuples to hold from aRFS packet buffer */
57         struct qede_arfs_tuple tuple;
58
59         u32 flow_id;
60         u64 sw_id;
61         u16 rxq_id;
62         u16 next_rxq_id;
63         u8 vfid;
64         bool filter_op;
65         bool used;
66         u8 fw_rc;
67         bool b_is_drop;
68         struct hlist_node node;
69 };
70
71 struct qede_arfs {
72 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
73 #define QEDE_ARFS_POLL_COUNT    100
74 #define QEDE_RFS_FLW_BITSHIFT   (4)
75 #define QEDE_RFS_FLW_MASK       ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
76         struct hlist_head       arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
77
78         /* lock for filter list access */
79         spinlock_t              arfs_list_lock;
80         unsigned long           *arfs_fltr_bmap;
81         int                     filter_count;
82
83         /* Currently configured filtering mode */
84         enum qed_filter_config_mode mode;
85 };
86
87 static void qede_configure_arfs_fltr(struct qede_dev *edev,
88                                      struct qede_arfs_fltr_node *n,
89                                      u16 rxq_id, bool add_fltr)
90 {
91         const struct qed_eth_ops *op = edev->ops;
92         struct qed_ntuple_filter_params params;
93
94         if (n->used)
95                 return;
96
97         memset(&params, 0, sizeof(params));
98
99         params.addr = n->mapping;
100         params.length = n->buf_len;
101         params.qid = rxq_id;
102         params.b_is_add = add_fltr;
103         params.b_is_drop = n->b_is_drop;
104
105         if (n->vfid) {
106                 params.b_is_vf = true;
107                 params.vf_id = n->vfid - 1;
108         }
109
110         if (n->tuple.stringify) {
111                 char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
112
113                 n->tuple.stringify(&n->tuple, tuple_buffer);
114                 DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
115                            "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
116                            add_fltr ? "Adding" : "Deleting",
117                            n->sw_id, tuple_buffer, n->vfid, rxq_id);
118         }
119
120         n->used = true;
121         n->filter_op = add_fltr;
122         op->ntuple_filter_config(edev->cdev, n, &params);
123 }
124
125 static void
126 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
127 {
128         kfree(fltr->data);
129
130         if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
131                 clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
132
133         kfree(fltr);
134 }
135
136 static int
137 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
138                                       struct qede_arfs_fltr_node *fltr,
139                                       u16 bucket_idx)
140 {
141         fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
142                                        fltr->buf_len, DMA_TO_DEVICE);
143         if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
144                 DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
145                 qede_free_arfs_filter(edev, fltr);
146                 return -ENOMEM;
147         }
148
149         INIT_HLIST_NODE(&fltr->node);
150         hlist_add_head(&fltr->node,
151                        QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
152
153         edev->arfs->filter_count++;
154         if (edev->arfs->filter_count == 1 &&
155             edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
156                 edev->ops->configure_arfs_searcher(edev->cdev,
157                                                    fltr->tuple.mode);
158                 edev->arfs->mode = fltr->tuple.mode;
159         }
160
161         return 0;
162 }
163
164 static void
165 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
166                                       struct qede_arfs_fltr_node *fltr)
167 {
168         hlist_del(&fltr->node);
169         dma_unmap_single(&edev->pdev->dev, fltr->mapping,
170                          fltr->buf_len, DMA_TO_DEVICE);
171
172         qede_free_arfs_filter(edev, fltr);
173
174         edev->arfs->filter_count--;
175         if (!edev->arfs->filter_count &&
176             edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
177                 enum qed_filter_config_mode mode;
178
179                 mode = QED_FILTER_CONFIG_MODE_DISABLE;
180                 edev->ops->configure_arfs_searcher(edev->cdev, mode);
181                 edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
182         }
183 }
184
185 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
186 {
187         struct qede_arfs_fltr_node *fltr = filter;
188         struct qede_dev *edev = dev;
189
190         fltr->fw_rc = fw_rc;
191
192         if (fw_rc) {
193                 DP_NOTICE(edev,
194                           "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
195                           fw_rc, fltr->flow_id, fltr->sw_id,
196                           ntohs(fltr->tuple.src_port),
197                           ntohs(fltr->tuple.dst_port), fltr->rxq_id);
198
199                 spin_lock_bh(&edev->arfs->arfs_list_lock);
200
201                 fltr->used = false;
202                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
203
204                 spin_unlock_bh(&edev->arfs->arfs_list_lock);
205                 return;
206         }
207
208         spin_lock_bh(&edev->arfs->arfs_list_lock);
209
210         fltr->used = false;
211
212         if (fltr->filter_op) {
213                 set_bit(QEDE_FLTR_VALID, &fltr->state);
214                 if (fltr->rxq_id != fltr->next_rxq_id)
215                         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
216                                                  false);
217         } else {
218                 clear_bit(QEDE_FLTR_VALID, &fltr->state);
219                 if (fltr->rxq_id != fltr->next_rxq_id) {
220                         fltr->rxq_id = fltr->next_rxq_id;
221                         qede_configure_arfs_fltr(edev, fltr,
222                                                  fltr->rxq_id, true);
223                 }
224         }
225
226         spin_unlock_bh(&edev->arfs->arfs_list_lock);
227 }
228
229 /* Should be called while qede_lock is held */
230 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
231 {
232         int i;
233
234         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
235                 struct hlist_node *temp;
236                 struct hlist_head *head;
237                 struct qede_arfs_fltr_node *fltr;
238
239                 head = &edev->arfs->arfs_hl_head[i];
240
241                 hlist_for_each_entry_safe(fltr, temp, head, node) {
242                         bool del = false;
243
244                         if (edev->state != QEDE_STATE_OPEN)
245                                 del = true;
246
247                         spin_lock_bh(&edev->arfs->arfs_list_lock);
248
249                         if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
250                              !fltr->used) || free_fltr) {
251                                 qede_dequeue_fltr_and_config_searcher(edev,
252                                                                       fltr);
253                         } else {
254                                 bool flow_exp = false;
255 #ifdef CONFIG_RFS_ACCEL
256                                 flow_exp = rps_may_expire_flow(edev->ndev,
257                                                                fltr->rxq_id,
258                                                                fltr->flow_id,
259                                                                fltr->sw_id);
260 #endif
261                                 if ((flow_exp || del) && !free_fltr)
262                                         qede_configure_arfs_fltr(edev, fltr,
263                                                                  fltr->rxq_id,
264                                                                  false);
265                         }
266
267                         spin_unlock_bh(&edev->arfs->arfs_list_lock);
268                 }
269         }
270
271 #ifdef CONFIG_RFS_ACCEL
272         spin_lock_bh(&edev->arfs->arfs_list_lock);
273
274         if (edev->arfs->filter_count) {
275                 set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
276                 schedule_delayed_work(&edev->sp_task,
277                                       QEDE_SP_TASK_POLL_DELAY);
278         }
279
280         spin_unlock_bh(&edev->arfs->arfs_list_lock);
281 #endif
282 }
283
284 /* This function waits until all aRFS filters get deleted and freed.
285  * On timeout it frees all filters forcefully.
286  */
287 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
288 {
289         int count = QEDE_ARFS_POLL_COUNT;
290
291         while (count) {
292                 qede_process_arfs_filters(edev, false);
293
294                 if (!edev->arfs->filter_count)
295                         break;
296
297                 msleep(100);
298                 count--;
299         }
300
301         if (!count) {
302                 DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
303
304                 /* Something is terribly wrong, free forcefully */
305                 qede_process_arfs_filters(edev, true);
306         }
307 }
308
309 int qede_alloc_arfs(struct qede_dev *edev)
310 {
311         int i;
312
313         edev->arfs = vzalloc(sizeof(*edev->arfs));
314         if (!edev->arfs)
315                 return -ENOMEM;
316
317         spin_lock_init(&edev->arfs->arfs_list_lock);
318
319         for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
320                 INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
321
322         edev->arfs->arfs_fltr_bmap =
323                 vzalloc(array_size(sizeof(long),
324                                    BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
325         if (!edev->arfs->arfs_fltr_bmap) {
326                 vfree(edev->arfs);
327                 edev->arfs = NULL;
328                 return -ENOMEM;
329         }
330
331 #ifdef CONFIG_RFS_ACCEL
332         edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
333         if (!edev->ndev->rx_cpu_rmap) {
334                 vfree(edev->arfs->arfs_fltr_bmap);
335                 edev->arfs->arfs_fltr_bmap = NULL;
336                 vfree(edev->arfs);
337                 edev->arfs = NULL;
338                 return -ENOMEM;
339         }
340 #endif
341         return 0;
342 }
343
344 void qede_free_arfs(struct qede_dev *edev)
345 {
346         if (!edev->arfs)
347                 return;
348
349 #ifdef CONFIG_RFS_ACCEL
350         if (edev->ndev->rx_cpu_rmap)
351                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
352
353         edev->ndev->rx_cpu_rmap = NULL;
354 #endif
355         vfree(edev->arfs->arfs_fltr_bmap);
356         edev->arfs->arfs_fltr_bmap = NULL;
357         vfree(edev->arfs);
358         edev->arfs = NULL;
359 }
360
361 #ifdef CONFIG_RFS_ACCEL
362 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
363                                  const struct sk_buff *skb)
364 {
365         if (skb->protocol == htons(ETH_P_IP)) {
366                 if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
367                     tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
368                         return true;
369                 else
370                         return false;
371         } else {
372                 struct in6_addr *src = &tpos->tuple.src_ipv6;
373                 u8 size = sizeof(struct in6_addr);
374
375                 if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
376                     !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
377                         return true;
378                 else
379                         return false;
380         }
381 }
382
383 static struct qede_arfs_fltr_node *
384 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
385                           __be16 src_port, __be16 dst_port, u8 ip_proto)
386 {
387         struct qede_arfs_fltr_node *tpos;
388
389         hlist_for_each_entry(tpos, h, node)
390                 if (tpos->tuple.ip_proto == ip_proto &&
391                     tpos->tuple.eth_proto == skb->protocol &&
392                     qede_compare_ip_addr(tpos, skb) &&
393                     tpos->tuple.src_port == src_port &&
394                     tpos->tuple.dst_port == dst_port)
395                         return tpos;
396
397         return NULL;
398 }
399
400 static struct qede_arfs_fltr_node *
401 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
402 {
403         struct qede_arfs_fltr_node *n;
404         int bit_id;
405
406         bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
407                                      QEDE_RFS_MAX_FLTR);
408
409         if (bit_id >= QEDE_RFS_MAX_FLTR)
410                 return NULL;
411
412         n = kzalloc(sizeof(*n), GFP_ATOMIC);
413         if (!n)
414                 return NULL;
415
416         n->data = kzalloc(min_hlen, GFP_ATOMIC);
417         if (!n->data) {
418                 kfree(n);
419                 return NULL;
420         }
421
422         n->sw_id = (u16)bit_id;
423         set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
424         return n;
425 }
426
427 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
428                        u16 rxq_index, u32 flow_id)
429 {
430         struct qede_dev *edev = netdev_priv(dev);
431         struct qede_arfs_fltr_node *n;
432         int min_hlen, rc, tp_offset;
433         struct ethhdr *eth;
434         __be16 *ports;
435         u16 tbl_idx;
436         u8 ip_proto;
437
438         if (skb->encapsulation)
439                 return -EPROTONOSUPPORT;
440
441         if (skb->protocol != htons(ETH_P_IP) &&
442             skb->protocol != htons(ETH_P_IPV6))
443                 return -EPROTONOSUPPORT;
444
445         if (skb->protocol == htons(ETH_P_IP)) {
446                 ip_proto = ip_hdr(skb)->protocol;
447                 tp_offset = sizeof(struct iphdr);
448         } else {
449                 ip_proto = ipv6_hdr(skb)->nexthdr;
450                 tp_offset = sizeof(struct ipv6hdr);
451         }
452
453         if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
454                 return -EPROTONOSUPPORT;
455
456         ports = (__be16 *)(skb->data + tp_offset);
457         tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
458
459         spin_lock_bh(&edev->arfs->arfs_list_lock);
460
461         n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
462                                       skb, ports[0], ports[1], ip_proto);
463         if (n) {
464                 /* Filter match */
465                 n->next_rxq_id = rxq_index;
466
467                 if (test_bit(QEDE_FLTR_VALID, &n->state)) {
468                         if (n->rxq_id != rxq_index)
469                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
470                                                          false);
471                 } else {
472                         if (!n->used) {
473                                 n->rxq_id = rxq_index;
474                                 qede_configure_arfs_fltr(edev, n, n->rxq_id,
475                                                          true);
476                         }
477                 }
478
479                 rc = n->sw_id;
480                 goto ret_unlock;
481         }
482
483         min_hlen = ETH_HLEN + skb_headlen(skb);
484
485         n = qede_alloc_filter(edev, min_hlen);
486         if (!n) {
487                 rc = -ENOMEM;
488                 goto ret_unlock;
489         }
490
491         n->buf_len = min_hlen;
492         n->rxq_id = rxq_index;
493         n->next_rxq_id = rxq_index;
494         n->tuple.src_port = ports[0];
495         n->tuple.dst_port = ports[1];
496         n->flow_id = flow_id;
497
498         if (skb->protocol == htons(ETH_P_IP)) {
499                 n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
500                 n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
501         } else {
502                 memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
503                        sizeof(struct in6_addr));
504                 memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
505                        sizeof(struct in6_addr));
506         }
507
508         eth = (struct ethhdr *)n->data;
509         eth->h_proto = skb->protocol;
510         n->tuple.eth_proto = skb->protocol;
511         n->tuple.ip_proto = ip_proto;
512         n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
513         memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
514
515         rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
516         if (rc)
517                 goto ret_unlock;
518
519         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
520
521         spin_unlock_bh(&edev->arfs->arfs_list_lock);
522
523         set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
524         schedule_delayed_work(&edev->sp_task, 0);
525
526         return n->sw_id;
527
528 ret_unlock:
529         spin_unlock_bh(&edev->arfs->arfs_list_lock);
530         return rc;
531 }
532 #endif
533
534 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
535 {
536         struct qede_dev *edev = dev;
537
538         if (edev->vxlan_dst_port != vxlan_port)
539                 edev->vxlan_dst_port = 0;
540
541         if (edev->geneve_dst_port != geneve_port)
542                 edev->geneve_dst_port = 0;
543 }
544
545 void qede_force_mac(void *dev, u8 *mac, bool forced)
546 {
547         struct qede_dev *edev = dev;
548
549         __qede_lock(edev);
550
551         if (!is_valid_ether_addr(mac)) {
552                 __qede_unlock(edev);
553                 return;
554         }
555
556         ether_addr_copy(edev->ndev->dev_addr, mac);
557         __qede_unlock(edev);
558 }
559
560 void qede_fill_rss_params(struct qede_dev *edev,
561                           struct qed_update_vport_rss_params *rss, u8 *update)
562 {
563         bool need_reset = false;
564         int i;
565
566         if (QEDE_RSS_COUNT(edev) <= 1) {
567                 memset(rss, 0, sizeof(*rss));
568                 *update = 0;
569                 return;
570         }
571
572         /* Need to validate current RSS config uses valid entries */
573         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
574                 if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
575                         need_reset = true;
576                         break;
577                 }
578         }
579
580         if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
581                 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
582                         u16 indir_val, val;
583
584                         val = QEDE_RSS_COUNT(edev);
585                         indir_val = ethtool_rxfh_indir_default(i, val);
586                         edev->rss_ind_table[i] = indir_val;
587                 }
588                 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
589         }
590
591         /* Now that we have the queue-indirection, prepare the handles */
592         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
593                 u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
594
595                 rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
596         }
597
598         if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
599                 netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
600                 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
601         }
602         memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
603
604         if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
605                 edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
606                     QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
607                 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
608         }
609         rss->rss_caps = edev->rss_caps;
610
611         *update = 1;
612 }
613
614 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
615                                  enum qed_filter_xcast_params_type opcode,
616                                  unsigned char mac[ETH_ALEN])
617 {
618         struct qed_filter_params filter_cmd;
619
620         memset(&filter_cmd, 0, sizeof(filter_cmd));
621         filter_cmd.type = QED_FILTER_TYPE_UCAST;
622         filter_cmd.filter.ucast.type = opcode;
623         filter_cmd.filter.ucast.mac_valid = 1;
624         ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
625
626         return edev->ops->filter_config(edev->cdev, &filter_cmd);
627 }
628
629 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
630                                   enum qed_filter_xcast_params_type opcode,
631                                   u16 vid)
632 {
633         struct qed_filter_params filter_cmd;
634
635         memset(&filter_cmd, 0, sizeof(filter_cmd));
636         filter_cmd.type = QED_FILTER_TYPE_UCAST;
637         filter_cmd.filter.ucast.type = opcode;
638         filter_cmd.filter.ucast.vlan_valid = 1;
639         filter_cmd.filter.ucast.vlan = vid;
640
641         return edev->ops->filter_config(edev->cdev, &filter_cmd);
642 }
643
644 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
645 {
646         struct qed_update_vport_params *params;
647         int rc;
648
649         /* Proceed only if action actually needs to be performed */
650         if (edev->accept_any_vlan == action)
651                 return 0;
652
653         params = vzalloc(sizeof(*params));
654         if (!params)
655                 return -ENOMEM;
656
657         params->vport_id = 0;
658         params->accept_any_vlan = action;
659         params->update_accept_any_vlan_flg = 1;
660
661         rc = edev->ops->vport_update(edev->cdev, params);
662         if (rc) {
663                 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
664                        action ? "enable" : "disable");
665         } else {
666                 DP_INFO(edev, "%s accept-any-vlan\n",
667                         action ? "enabled" : "disabled");
668                 edev->accept_any_vlan = action;
669         }
670
671         vfree(params);
672         return 0;
673 }
674
675 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
676 {
677         struct qede_dev *edev = netdev_priv(dev);
678         struct qede_vlan *vlan, *tmp;
679         int rc = 0;
680
681         DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
682
683         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
684         if (!vlan) {
685                 DP_INFO(edev, "Failed to allocate struct for vlan\n");
686                 return -ENOMEM;
687         }
688         INIT_LIST_HEAD(&vlan->list);
689         vlan->vid = vid;
690         vlan->configured = false;
691
692         /* Verify vlan isn't already configured */
693         list_for_each_entry(tmp, &edev->vlan_list, list) {
694                 if (tmp->vid == vlan->vid) {
695                         DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
696                                    "vlan already configured\n");
697                         kfree(vlan);
698                         return -EEXIST;
699                 }
700         }
701
702         /* If interface is down, cache this VLAN ID and return */
703         __qede_lock(edev);
704         if (edev->state != QEDE_STATE_OPEN) {
705                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
706                            "Interface is down, VLAN %d will be configured when interface is up\n",
707                            vid);
708                 if (vid != 0)
709                         edev->non_configured_vlans++;
710                 list_add(&vlan->list, &edev->vlan_list);
711                 goto out;
712         }
713
714         /* Check for the filter limit.
715          * Note - vlan0 has a reserved filter and can be added without
716          * worrying about quota
717          */
718         if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
719             (vlan->vid == 0)) {
720                 rc = qede_set_ucast_rx_vlan(edev,
721                                             QED_FILTER_XCAST_TYPE_ADD,
722                                             vlan->vid);
723                 if (rc) {
724                         DP_ERR(edev, "Failed to configure VLAN %d\n",
725                                vlan->vid);
726                         kfree(vlan);
727                         goto out;
728                 }
729                 vlan->configured = true;
730
731                 /* vlan0 filter isn't consuming out of our quota */
732                 if (vlan->vid != 0)
733                         edev->configured_vlans++;
734         } else {
735                 /* Out of quota; Activate accept-any-VLAN mode */
736                 if (!edev->non_configured_vlans) {
737                         rc = qede_config_accept_any_vlan(edev, true);
738                         if (rc) {
739                                 kfree(vlan);
740                                 goto out;
741                         }
742                 }
743
744                 edev->non_configured_vlans++;
745         }
746
747         list_add(&vlan->list, &edev->vlan_list);
748
749 out:
750         __qede_unlock(edev);
751         return rc;
752 }
753
754 static void qede_del_vlan_from_list(struct qede_dev *edev,
755                                     struct qede_vlan *vlan)
756 {
757         /* vlan0 filter isn't consuming out of our quota */
758         if (vlan->vid != 0) {
759                 if (vlan->configured)
760                         edev->configured_vlans--;
761                 else
762                         edev->non_configured_vlans--;
763         }
764
765         list_del(&vlan->list);
766         kfree(vlan);
767 }
768
769 int qede_configure_vlan_filters(struct qede_dev *edev)
770 {
771         int rc = 0, real_rc = 0, accept_any_vlan = 0;
772         struct qed_dev_eth_info *dev_info;
773         struct qede_vlan *vlan = NULL;
774
775         if (list_empty(&edev->vlan_list))
776                 return 0;
777
778         dev_info = &edev->dev_info;
779
780         /* Configure non-configured vlans */
781         list_for_each_entry(vlan, &edev->vlan_list, list) {
782                 if (vlan->configured)
783                         continue;
784
785                 /* We have used all our credits, now enable accept_any_vlan */
786                 if ((vlan->vid != 0) &&
787                     (edev->configured_vlans == dev_info->num_vlan_filters)) {
788                         accept_any_vlan = 1;
789                         continue;
790                 }
791
792                 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
793
794                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
795                                             vlan->vid);
796                 if (rc) {
797                         DP_ERR(edev, "Failed to configure VLAN %u\n",
798                                vlan->vid);
799                         real_rc = rc;
800                         continue;
801                 }
802
803                 vlan->configured = true;
804                 /* vlan0 filter doesn't consume our VLAN filter's quota */
805                 if (vlan->vid != 0) {
806                         edev->non_configured_vlans--;
807                         edev->configured_vlans++;
808                 }
809         }
810
811         /* enable accept_any_vlan mode if we have more VLANs than credits,
812          * or remove accept_any_vlan mode if we've actually removed
813          * a non-configured vlan, and all remaining vlans are truly configured.
814          */
815
816         if (accept_any_vlan)
817                 rc = qede_config_accept_any_vlan(edev, true);
818         else if (!edev->non_configured_vlans)
819                 rc = qede_config_accept_any_vlan(edev, false);
820
821         if (rc && !real_rc)
822                 real_rc = rc;
823
824         return real_rc;
825 }
826
827 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
828 {
829         struct qede_dev *edev = netdev_priv(dev);
830         struct qede_vlan *vlan = NULL;
831         int rc = 0;
832
833         DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
834
835         /* Find whether entry exists */
836         __qede_lock(edev);
837         list_for_each_entry(vlan, &edev->vlan_list, list)
838                 if (vlan->vid == vid)
839                         break;
840
841         if (!vlan || (vlan->vid != vid)) {
842                 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
843                            "Vlan isn't configured\n");
844                 goto out;
845         }
846
847         if (edev->state != QEDE_STATE_OPEN) {
848                 /* As interface is already down, we don't have a VPORT
849                  * instance to remove vlan filter. So just update vlan list
850                  */
851                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
852                            "Interface is down, removing VLAN from list only\n");
853                 qede_del_vlan_from_list(edev, vlan);
854                 goto out;
855         }
856
857         /* Remove vlan */
858         if (vlan->configured) {
859                 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
860                                             vid);
861                 if (rc) {
862                         DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
863                         goto out;
864                 }
865         }
866
867         qede_del_vlan_from_list(edev, vlan);
868
869         /* We have removed a VLAN - try to see if we can
870          * configure non-configured VLAN from the list.
871          */
872         rc = qede_configure_vlan_filters(edev);
873
874 out:
875         __qede_unlock(edev);
876         return rc;
877 }
878
879 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
880 {
881         struct qede_vlan *vlan = NULL;
882
883         if (list_empty(&edev->vlan_list))
884                 return;
885
886         list_for_each_entry(vlan, &edev->vlan_list, list) {
887                 if (!vlan->configured)
888                         continue;
889
890                 vlan->configured = false;
891
892                 /* vlan0 filter isn't consuming out of our quota */
893                 if (vlan->vid != 0) {
894                         edev->non_configured_vlans++;
895                         edev->configured_vlans--;
896                 }
897
898                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
899                            "marked vlan %d as non-configured\n", vlan->vid);
900         }
901
902         edev->accept_any_vlan = false;
903 }
904
905 static void qede_set_features_reload(struct qede_dev *edev,
906                                      struct qede_reload_args *args)
907 {
908         edev->ndev->features = args->u.features;
909 }
910
911 netdev_features_t qede_fix_features(struct net_device *dev,
912                                     netdev_features_t features)
913 {
914         struct qede_dev *edev = netdev_priv(dev);
915
916         if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
917             !(features & NETIF_F_GRO))
918                 features &= ~NETIF_F_GRO_HW;
919
920         return features;
921 }
922
923 int qede_set_features(struct net_device *dev, netdev_features_t features)
924 {
925         struct qede_dev *edev = netdev_priv(dev);
926         netdev_features_t changes = features ^ dev->features;
927         bool need_reload = false;
928
929         if (changes & NETIF_F_GRO_HW)
930                 need_reload = true;
931
932         if (need_reload) {
933                 struct qede_reload_args args;
934
935                 args.u.features = features;
936                 args.func = &qede_set_features_reload;
937
938                 /* Make sure that we definitely need to reload.
939                  * In case of an eBPF attached program, there will be no FW
940                  * aggregations, so no need to actually reload.
941                  */
942                 __qede_lock(edev);
943                 if (edev->xdp_prog)
944                         args.func(edev, &args);
945                 else
946                         qede_reload(edev, &args, true);
947                 __qede_unlock(edev);
948
949                 return 1;
950         }
951
952         return 0;
953 }
954
955 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
956 {
957         struct qede_dev *edev = netdev_priv(dev);
958         struct qed_tunn_params tunn_params;
959         u16 t_port = ntohs(ti->port);
960         int rc;
961
962         memset(&tunn_params, 0, sizeof(tunn_params));
963
964         switch (ti->type) {
965         case UDP_TUNNEL_TYPE_VXLAN:
966                 if (!edev->dev_info.common.vxlan_enable)
967                         return;
968
969                 if (edev->vxlan_dst_port)
970                         return;
971
972                 tunn_params.update_vxlan_port = 1;
973                 tunn_params.vxlan_port = t_port;
974
975                 __qede_lock(edev);
976                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
977                 __qede_unlock(edev);
978
979                 if (!rc) {
980                         edev->vxlan_dst_port = t_port;
981                         DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
982                                    t_port);
983                 } else {
984                         DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
985                                   t_port);
986                 }
987
988                 break;
989         case UDP_TUNNEL_TYPE_GENEVE:
990                 if (!edev->dev_info.common.geneve_enable)
991                         return;
992
993                 if (edev->geneve_dst_port)
994                         return;
995
996                 tunn_params.update_geneve_port = 1;
997                 tunn_params.geneve_port = t_port;
998
999                 __qede_lock(edev);
1000                 rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
1001                 __qede_unlock(edev);
1002
1003                 if (!rc) {
1004                         edev->geneve_dst_port = t_port;
1005                         DP_VERBOSE(edev, QED_MSG_DEBUG,
1006                                    "Added geneve port=%d\n", t_port);
1007                 } else {
1008                         DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
1009                                   t_port);
1010                 }
1011
1012                 break;
1013         default:
1014                 return;
1015         }
1016 }
1017
1018 void qede_udp_tunnel_del(struct net_device *dev,
1019                          struct udp_tunnel_info *ti)
1020 {
1021         struct qede_dev *edev = netdev_priv(dev);
1022         struct qed_tunn_params tunn_params;
1023         u16 t_port = ntohs(ti->port);
1024
1025         memset(&tunn_params, 0, sizeof(tunn_params));
1026
1027         switch (ti->type) {
1028         case UDP_TUNNEL_TYPE_VXLAN:
1029                 if (t_port != edev->vxlan_dst_port)
1030                         return;
1031
1032                 tunn_params.update_vxlan_port = 1;
1033                 tunn_params.vxlan_port = 0;
1034
1035                 __qede_lock(edev);
1036                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1037                 __qede_unlock(edev);
1038
1039                 edev->vxlan_dst_port = 0;
1040
1041                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
1042                            t_port);
1043
1044                 break;
1045         case UDP_TUNNEL_TYPE_GENEVE:
1046                 if (t_port != edev->geneve_dst_port)
1047                         return;
1048
1049                 tunn_params.update_geneve_port = 1;
1050                 tunn_params.geneve_port = 0;
1051
1052                 __qede_lock(edev);
1053                 edev->ops->tunn_config(edev->cdev, &tunn_params);
1054                 __qede_unlock(edev);
1055
1056                 edev->geneve_dst_port = 0;
1057
1058                 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
1059                            t_port);
1060                 break;
1061         default:
1062                 return;
1063         }
1064 }
1065
1066 static void qede_xdp_reload_func(struct qede_dev *edev,
1067                                  struct qede_reload_args *args)
1068 {
1069         struct bpf_prog *old;
1070
1071         old = xchg(&edev->xdp_prog, args->u.new_prog);
1072         if (old)
1073                 bpf_prog_put(old);
1074 }
1075
1076 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1077 {
1078         struct qede_reload_args args;
1079
1080         /* If we're called, there was already a bpf reference increment */
1081         args.func = &qede_xdp_reload_func;
1082         args.u.new_prog = prog;
1083         qede_reload(edev, &args, false);
1084
1085         return 0;
1086 }
1087
1088 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1089 {
1090         struct qede_dev *edev = netdev_priv(dev);
1091
1092         switch (xdp->command) {
1093         case XDP_SETUP_PROG:
1094                 return qede_xdp_set(edev, xdp->prog);
1095         case XDP_QUERY_PROG:
1096                 xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0;
1097                 return 0;
1098         default:
1099                 return -EINVAL;
1100         }
1101 }
1102
1103 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1104                                  enum qed_filter_xcast_params_type opcode,
1105                                  unsigned char *mac, int num_macs)
1106 {
1107         struct qed_filter_params filter_cmd;
1108         int i;
1109
1110         memset(&filter_cmd, 0, sizeof(filter_cmd));
1111         filter_cmd.type = QED_FILTER_TYPE_MCAST;
1112         filter_cmd.filter.mcast.type = opcode;
1113         filter_cmd.filter.mcast.num = num_macs;
1114
1115         for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1116                 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
1117
1118         return edev->ops->filter_config(edev->cdev, &filter_cmd);
1119 }
1120
1121 int qede_set_mac_addr(struct net_device *ndev, void *p)
1122 {
1123         struct qede_dev *edev = netdev_priv(ndev);
1124         struct sockaddr *addr = p;
1125         int rc = 0;
1126
1127         /* Make sure the state doesn't transition while changing the MAC.
1128          * Also, all flows accessing the dev_addr field are doing that under
1129          * this lock.
1130          */
1131         __qede_lock(edev);
1132
1133         if (!is_valid_ether_addr(addr->sa_data)) {
1134                 DP_NOTICE(edev, "The MAC address is not valid\n");
1135                 rc = -EFAULT;
1136                 goto out;
1137         }
1138
1139         if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1140                 DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1141                           addr->sa_data);
1142                 rc = -EINVAL;
1143                 goto out;
1144         }
1145
1146         if (edev->state == QEDE_STATE_OPEN) {
1147                 /* Remove the previous primary mac */
1148                 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1149                                            ndev->dev_addr);
1150                 if (rc)
1151                         goto out;
1152         }
1153
1154         ether_addr_copy(ndev->dev_addr, addr->sa_data);
1155         DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1156
1157         if (edev->state != QEDE_STATE_OPEN) {
1158                 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1159                            "The device is currently down\n");
1160                 /* Ask PF to explicitly update a copy in bulletin board */
1161                 if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1162                         edev->ops->req_bulletin_update_mac(edev->cdev,
1163                                                            ndev->dev_addr);
1164                 goto out;
1165         }
1166
1167         edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1168
1169         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1170                                    ndev->dev_addr);
1171 out:
1172         __qede_unlock(edev);
1173         return rc;
1174 }
1175
1176 static int
1177 qede_configure_mcast_filtering(struct net_device *ndev,
1178                                enum qed_filter_rx_mode_type *accept_flags)
1179 {
1180         struct qede_dev *edev = netdev_priv(ndev);
1181         unsigned char *mc_macs, *temp;
1182         struct netdev_hw_addr *ha;
1183         int rc = 0, mc_count;
1184         size_t size;
1185
1186         size = 64 * ETH_ALEN;
1187
1188         mc_macs = kzalloc(size, GFP_KERNEL);
1189         if (!mc_macs) {
1190                 DP_NOTICE(edev,
1191                           "Failed to allocate memory for multicast MACs\n");
1192                 rc = -ENOMEM;
1193                 goto exit;
1194         }
1195
1196         temp = mc_macs;
1197
1198         /* Remove all previously configured MAC filters */
1199         rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1200                                    mc_macs, 1);
1201         if (rc)
1202                 goto exit;
1203
1204         netif_addr_lock_bh(ndev);
1205
1206         mc_count = netdev_mc_count(ndev);
1207         if (mc_count <= 64) {
1208                 netdev_for_each_mc_addr(ha, ndev) {
1209                         ether_addr_copy(temp, ha->addr);
1210                         temp += ETH_ALEN;
1211                 }
1212         }
1213
1214         netif_addr_unlock_bh(ndev);
1215
1216         /* Check for all multicast @@@TBD resource allocation */
1217         if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1218                 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1219                         *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1220         } else {
1221                 /* Add all multicast MAC filters */
1222                 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1223                                            mc_macs, mc_count);
1224         }
1225
1226 exit:
1227         kfree(mc_macs);
1228         return rc;
1229 }
1230
1231 void qede_set_rx_mode(struct net_device *ndev)
1232 {
1233         struct qede_dev *edev = netdev_priv(ndev);
1234
1235         set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1236         schedule_delayed_work(&edev->sp_task, 0);
1237 }
1238
1239 /* Must be called with qede_lock held */
1240 void qede_config_rx_mode(struct net_device *ndev)
1241 {
1242         enum qed_filter_rx_mode_type accept_flags;
1243         struct qede_dev *edev = netdev_priv(ndev);
1244         struct qed_filter_params rx_mode;
1245         unsigned char *uc_macs, *temp;
1246         struct netdev_hw_addr *ha;
1247         int rc, uc_count;
1248         size_t size;
1249
1250         netif_addr_lock_bh(ndev);
1251
1252         uc_count = netdev_uc_count(ndev);
1253         size = uc_count * ETH_ALEN;
1254
1255         uc_macs = kzalloc(size, GFP_ATOMIC);
1256         if (!uc_macs) {
1257                 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1258                 netif_addr_unlock_bh(ndev);
1259                 return;
1260         }
1261
1262         temp = uc_macs;
1263         netdev_for_each_uc_addr(ha, ndev) {
1264                 ether_addr_copy(temp, ha->addr);
1265                 temp += ETH_ALEN;
1266         }
1267
1268         netif_addr_unlock_bh(ndev);
1269
1270         /* Configure the struct for the Rx mode */
1271         memset(&rx_mode, 0, sizeof(struct qed_filter_params));
1272         rx_mode.type = QED_FILTER_TYPE_RX_MODE;
1273
1274         /* Remove all previous unicast secondary macs and multicast macs
1275          * (configure / leave the primary mac)
1276          */
1277         rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1278                                    edev->ndev->dev_addr);
1279         if (rc)
1280                 goto out;
1281
1282         /* Check for promiscuous */
1283         if (ndev->flags & IFF_PROMISC)
1284                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1285         else
1286                 accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1287
1288         /* Configure all filters regardless, in case promisc is rejected */
1289         if (uc_count < edev->dev_info.num_mac_filters) {
1290                 int i;
1291
1292                 temp = uc_macs;
1293                 for (i = 0; i < uc_count; i++) {
1294                         rc = qede_set_ucast_rx_mac(edev,
1295                                                    QED_FILTER_XCAST_TYPE_ADD,
1296                                                    temp);
1297                         if (rc)
1298                                 goto out;
1299
1300                         temp += ETH_ALEN;
1301                 }
1302         } else {
1303                 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1304         }
1305
1306         rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1307         if (rc)
1308                 goto out;
1309
1310         /* take care of VLAN mode */
1311         if (ndev->flags & IFF_PROMISC) {
1312                 qede_config_accept_any_vlan(edev, true);
1313         } else if (!edev->non_configured_vlans) {
1314                 /* It's possible that accept_any_vlan mode is set due to a
1315                  * previous setting of IFF_PROMISC. If vlan credits are
1316                  * sufficient, disable accept_any_vlan.
1317                  */
1318                 qede_config_accept_any_vlan(edev, false);
1319         }
1320
1321         rx_mode.filter.accept_flags = accept_flags;
1322         edev->ops->filter_config(edev->cdev, &rx_mode);
1323 out:
1324         kfree(uc_macs);
1325 }
1326
1327 static struct qede_arfs_fltr_node *
1328 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1329 {
1330         struct qede_arfs_fltr_node *fltr;
1331
1332         hlist_for_each_entry(fltr, head, node)
1333                 if (location == fltr->sw_id)
1334                         return fltr;
1335
1336         return NULL;
1337 }
1338
1339 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1340                           u32 *rule_locs)
1341 {
1342         struct qede_arfs_fltr_node *fltr;
1343         struct hlist_head *head;
1344         int cnt = 0, rc = 0;
1345
1346         info->data = QEDE_RFS_MAX_FLTR;
1347
1348         __qede_lock(edev);
1349
1350         if (!edev->arfs) {
1351                 rc = -EPERM;
1352                 goto unlock;
1353         }
1354
1355         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1356
1357         hlist_for_each_entry(fltr, head, node) {
1358                 if (cnt == info->rule_cnt) {
1359                         rc = -EMSGSIZE;
1360                         goto unlock;
1361                 }
1362
1363                 rule_locs[cnt] = fltr->sw_id;
1364                 cnt++;
1365         }
1366
1367         info->rule_cnt = cnt;
1368
1369 unlock:
1370         __qede_unlock(edev);
1371         return rc;
1372 }
1373
1374 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1375 {
1376         struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1377         struct qede_arfs_fltr_node *fltr = NULL;
1378         int rc = 0;
1379
1380         cmd->data = QEDE_RFS_MAX_FLTR;
1381
1382         __qede_lock(edev);
1383
1384         if (!edev->arfs) {
1385                 rc = -EPERM;
1386                 goto unlock;
1387         }
1388
1389         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1390                                          fsp->location);
1391         if (!fltr) {
1392                 DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1393                           fsp->location);
1394                 rc = -EINVAL;
1395                 goto unlock;
1396         }
1397
1398         if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1399                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1400                         fsp->flow_type = TCP_V4_FLOW;
1401                 else
1402                         fsp->flow_type = UDP_V4_FLOW;
1403
1404                 fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1405                 fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1406                 fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1407                 fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1408         } else {
1409                 if (fltr->tuple.ip_proto == IPPROTO_TCP)
1410                         fsp->flow_type = TCP_V6_FLOW;
1411                 else
1412                         fsp->flow_type = UDP_V6_FLOW;
1413                 fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1414                 fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1415                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1416                        &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1417                 memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1418                        &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1419         }
1420
1421         fsp->ring_cookie = fltr->rxq_id;
1422
1423         if (fltr->vfid) {
1424                 fsp->ring_cookie |= ((u64)fltr->vfid) <<
1425                                         ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1426         }
1427
1428         if (fltr->b_is_drop)
1429                 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1430 unlock:
1431         __qede_unlock(edev);
1432         return rc;
1433 }
1434
1435 static int
1436 qede_poll_arfs_filter_config(struct qede_dev *edev,
1437                              struct qede_arfs_fltr_node *fltr)
1438 {
1439         int count = QEDE_ARFS_POLL_COUNT;
1440
1441         while (fltr->used && count) {
1442                 msleep(20);
1443                 count--;
1444         }
1445
1446         if (count == 0 || fltr->fw_rc) {
1447                 DP_NOTICE(edev, "Timeout in polling filter config\n");
1448                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1449                 return -EIO;
1450         }
1451
1452         return fltr->fw_rc;
1453 }
1454
1455 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1456 {
1457         int size = ETH_HLEN;
1458
1459         if (t->eth_proto == htons(ETH_P_IP))
1460                 size += sizeof(struct iphdr);
1461         else
1462                 size += sizeof(struct ipv6hdr);
1463
1464         if (t->ip_proto == IPPROTO_TCP)
1465                 size += sizeof(struct tcphdr);
1466         else
1467                 size += sizeof(struct udphdr);
1468
1469         return size;
1470 }
1471
1472 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1473                                     struct qede_arfs_tuple *b)
1474 {
1475         if (a->eth_proto != htons(ETH_P_IP) ||
1476             b->eth_proto != htons(ETH_P_IP))
1477                 return false;
1478
1479         return (a->src_ipv4 == b->src_ipv4) &&
1480                (a->dst_ipv4 == b->dst_ipv4);
1481 }
1482
1483 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1484                                      void *header)
1485 {
1486         __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1487         struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1488         struct ethhdr *eth = (struct ethhdr *)header;
1489
1490         eth->h_proto = t->eth_proto;
1491         ip->saddr = t->src_ipv4;
1492         ip->daddr = t->dst_ipv4;
1493         ip->version = 0x4;
1494         ip->ihl = 0x5;
1495         ip->protocol = t->ip_proto;
1496         ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1497
1498         /* ports is weakly typed to suit both TCP and UDP ports */
1499         ports[0] = t->src_port;
1500         ports[1] = t->dst_port;
1501 }
1502
1503 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1504                                          void *buffer)
1505 {
1506         const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1507
1508         snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1509                  "%s %pI4 (%04x) -> %pI4 (%04x)",
1510                  prefix, &t->src_ipv4, t->src_port,
1511                  &t->dst_ipv4, t->dst_port);
1512 }
1513
1514 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1515                                     struct qede_arfs_tuple *b)
1516 {
1517         if (a->eth_proto != htons(ETH_P_IPV6) ||
1518             b->eth_proto != htons(ETH_P_IPV6))
1519                 return false;
1520
1521         if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1522                 return false;
1523
1524         if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1525                 return false;
1526
1527         return true;
1528 }
1529
1530 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1531                                      void *header)
1532 {
1533         __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1534         struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1535         struct ethhdr *eth = (struct ethhdr *)header;
1536
1537         eth->h_proto = t->eth_proto;
1538         memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1539         memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1540         ip6->version = 0x6;
1541
1542         if (t->ip_proto == IPPROTO_TCP) {
1543                 ip6->nexthdr = NEXTHDR_TCP;
1544                 ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1545         } else {
1546                 ip6->nexthdr = NEXTHDR_UDP;
1547                 ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1548         }
1549
1550         /* ports is weakly typed to suit both TCP and UDP ports */
1551         ports[0] = t->src_port;
1552         ports[1] = t->dst_port;
1553 }
1554
1555 /* Validate fields which are set and not accepted by the driver */
1556 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1557                                           struct ethtool_rx_flow_spec *fs)
1558 {
1559         if (fs->flow_type & FLOW_MAC_EXT) {
1560                 DP_INFO(edev, "Don't support MAC extensions\n");
1561                 return -EOPNOTSUPP;
1562         }
1563
1564         if ((fs->flow_type & FLOW_EXT) &&
1565             (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1566                 DP_INFO(edev, "Don't support vlan-based classification\n");
1567                 return -EOPNOTSUPP;
1568         }
1569
1570         if ((fs->flow_type & FLOW_EXT) &&
1571             (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1572                 DP_INFO(edev, "Don't support user defined data\n");
1573                 return -EOPNOTSUPP;
1574         }
1575
1576         return 0;
1577 }
1578
1579 static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
1580                                         struct qede_arfs_tuple *t)
1581 {
1582         /* We must have Only 4-tuples/l4 port/src ip/dst ip
1583          * as an input.
1584          */
1585         if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1586                 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1587         } else if (!t->src_port && t->dst_port &&
1588                    !t->src_ipv4 && !t->dst_ipv4) {
1589                 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1590         } else if (!t->src_port && !t->dst_port &&
1591                    !t->dst_ipv4 && t->src_ipv4) {
1592                 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1593         } else if (!t->src_port && !t->dst_port &&
1594                    t->dst_ipv4 && !t->src_ipv4) {
1595                 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1596         } else {
1597                 DP_INFO(edev, "Invalid N-tuple\n");
1598                 return -EOPNOTSUPP;
1599         }
1600
1601         t->ip_comp = qede_flow_spec_ipv4_cmp;
1602         t->build_hdr = qede_flow_build_ipv4_hdr;
1603         t->stringify = qede_flow_stringify_ipv4_hdr;
1604
1605         return 0;
1606 }
1607
1608 static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
1609                                         struct qede_arfs_tuple *t,
1610                                         struct in6_addr *zaddr)
1611 {
1612         /* We must have Only 4-tuples/l4 port/src ip/dst ip
1613          * as an input.
1614          */
1615         if (t->src_port && t->dst_port &&
1616             memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1617             memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1618                 t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1619         } else if (!t->src_port && t->dst_port &&
1620                    !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1621                    !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1622                 t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1623         } else if (!t->src_port && !t->dst_port &&
1624                    !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1625                    memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1626                 t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1627         } else if (!t->src_port && !t->dst_port &&
1628                    memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1629                    !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1630                 t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1631         } else {
1632                 DP_INFO(edev, "Invalid N-tuple\n");
1633                 return -EOPNOTSUPP;
1634         }
1635
1636         t->ip_comp = qede_flow_spec_ipv6_cmp;
1637         t->build_hdr = qede_flow_build_ipv6_hdr;
1638
1639         return 0;
1640 }
1641
1642 /* Must be called while qede lock is held */
1643 static struct qede_arfs_fltr_node *
1644 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1645 {
1646         struct qede_arfs_fltr_node *fltr;
1647         struct hlist_node *temp;
1648         struct hlist_head *head;
1649
1650         head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1651
1652         hlist_for_each_entry_safe(fltr, temp, head, node) {
1653                 if (fltr->tuple.ip_proto == t->ip_proto &&
1654                     fltr->tuple.src_port == t->src_port &&
1655                     fltr->tuple.dst_port == t->dst_port &&
1656                     t->ip_comp(&fltr->tuple, t))
1657                         return fltr;
1658         }
1659
1660         return NULL;
1661 }
1662
1663 static void qede_flow_set_destination(struct qede_dev *edev,
1664                                       struct qede_arfs_fltr_node *n,
1665                                       struct ethtool_rx_flow_spec *fs)
1666 {
1667         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1668                 n->b_is_drop = true;
1669                 return;
1670         }
1671
1672         n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1673         n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1674         n->next_rxq_id = n->rxq_id;
1675
1676         if (n->vfid)
1677                 DP_VERBOSE(edev, QED_MSG_SP,
1678                            "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1679 }
1680
1681 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1682 {
1683         struct qede_arfs_fltr_node *fltr = NULL;
1684         int rc = -EPERM;
1685
1686         __qede_lock(edev);
1687         if (!edev->arfs)
1688                 goto unlock;
1689
1690         fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1691                                          cookie);
1692         if (!fltr)
1693                 goto unlock;
1694
1695         qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1696
1697         rc = qede_poll_arfs_filter_config(edev, fltr);
1698         if (rc == 0)
1699                 qede_dequeue_fltr_and_config_searcher(edev, fltr);
1700
1701 unlock:
1702         __qede_unlock(edev);
1703         return rc;
1704 }
1705
1706 int qede_get_arfs_filter_count(struct qede_dev *edev)
1707 {
1708         int count = 0;
1709
1710         __qede_lock(edev);
1711
1712         if (!edev->arfs)
1713                 goto unlock;
1714
1715         count = edev->arfs->filter_count;
1716
1717 unlock:
1718         __qede_unlock(edev);
1719         return count;
1720 }
1721
1722 static int qede_parse_actions(struct qede_dev *edev,
1723                               struct flow_action *flow_action,
1724                               struct netlink_ext_ack *extack)
1725 {
1726         const struct flow_action_entry *act;
1727         int i;
1728
1729         if (!flow_action_has_entries(flow_action)) {
1730                 DP_NOTICE(edev, "No actions received\n");
1731                 return -EINVAL;
1732         }
1733
1734         if (!flow_action_basic_hw_stats_check(flow_action, extack))
1735                 return -EOPNOTSUPP;
1736
1737         flow_action_for_each(i, act, flow_action) {
1738                 switch (act->id) {
1739                 case FLOW_ACTION_DROP:
1740                         break;
1741                 case FLOW_ACTION_QUEUE:
1742                         if (act->queue.vf)
1743                                 break;
1744
1745                         if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1746                                 DP_INFO(edev, "Queue out-of-bounds\n");
1747                                 return -EINVAL;
1748                         }
1749                         break;
1750                 default:
1751                         return -EINVAL;
1752                 }
1753         }
1754
1755         return 0;
1756 }
1757
1758 static int
1759 qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
1760                       struct qede_arfs_tuple *t)
1761 {
1762         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1763                 struct flow_match_ports match;
1764
1765                 flow_rule_match_ports(rule, &match);
1766                 if ((match.key->src && match.mask->src != U16_MAX) ||
1767                     (match.key->dst && match.mask->dst != U16_MAX)) {
1768                         DP_NOTICE(edev, "Do not support ports masks\n");
1769                         return -EINVAL;
1770                 }
1771
1772                 t->src_port = match.key->src;
1773                 t->dst_port = match.key->dst;
1774         }
1775
1776         return 0;
1777 }
1778
1779 static int
1780 qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
1781                           struct qede_arfs_tuple *t)
1782 {
1783         struct in6_addr zero_addr, addr;
1784
1785         memset(&zero_addr, 0, sizeof(addr));
1786         memset(&addr, 0xff, sizeof(addr));
1787
1788         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1789                 struct flow_match_ipv6_addrs match;
1790
1791                 flow_rule_match_ipv6_addrs(rule, &match);
1792                 if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1793                      memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1794                     (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1795                      memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1796                         DP_NOTICE(edev,
1797                                   "Do not support IPv6 address prefix/mask\n");
1798                         return -EINVAL;
1799                 }
1800
1801                 memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1802                 memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1803         }
1804
1805         if (qede_flow_parse_ports(edev, rule, t))
1806                 return -EINVAL;
1807
1808         return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
1809 }
1810
1811 static int
1812 qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
1813                         struct qede_arfs_tuple *t)
1814 {
1815         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1816                 struct flow_match_ipv4_addrs match;
1817
1818                 flow_rule_match_ipv4_addrs(rule, &match);
1819                 if ((match.key->src && match.mask->src != U32_MAX) ||
1820                     (match.key->dst && match.mask->dst != U32_MAX)) {
1821                         DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
1822                         return -EINVAL;
1823                 }
1824
1825                 t->src_ipv4 = match.key->src;
1826                 t->dst_ipv4 = match.key->dst;
1827         }
1828
1829         if (qede_flow_parse_ports(edev, rule, t))
1830                 return -EINVAL;
1831
1832         return qede_set_v4_tuple_to_profile(edev, t);
1833 }
1834
1835 static int
1836 qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
1837                      struct qede_arfs_tuple *tuple)
1838 {
1839         tuple->ip_proto = IPPROTO_TCP;
1840         tuple->eth_proto = htons(ETH_P_IPV6);
1841
1842         return qede_flow_parse_v6_common(edev, rule, tuple);
1843 }
1844
1845 static int
1846 qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
1847                      struct qede_arfs_tuple *tuple)
1848 {
1849         tuple->ip_proto = IPPROTO_TCP;
1850         tuple->eth_proto = htons(ETH_P_IP);
1851
1852         return qede_flow_parse_v4_common(edev, rule, tuple);
1853 }
1854
1855 static int
1856 qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
1857                      struct qede_arfs_tuple *tuple)
1858 {
1859         tuple->ip_proto = IPPROTO_UDP;
1860         tuple->eth_proto = htons(ETH_P_IPV6);
1861
1862         return qede_flow_parse_v6_common(edev, rule, tuple);
1863 }
1864
1865 static int
1866 qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
1867                      struct qede_arfs_tuple *tuple)
1868 {
1869         tuple->ip_proto = IPPROTO_UDP;
1870         tuple->eth_proto = htons(ETH_P_IP);
1871
1872         return qede_flow_parse_v4_common(edev, rule, tuple);
1873 }
1874
1875 static int
1876 qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
1877                      struct flow_rule *rule, struct qede_arfs_tuple *tuple)
1878 {
1879         struct flow_dissector *dissector = rule->match.dissector;
1880         int rc = -EINVAL;
1881         u8 ip_proto = 0;
1882
1883         memset(tuple, 0, sizeof(*tuple));
1884
1885         if (dissector->used_keys &
1886             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1887               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1888               BIT(FLOW_DISSECTOR_KEY_BASIC) |
1889               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1890               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
1891                 DP_NOTICE(edev, "Unsupported key set:0x%x\n",
1892                           dissector->used_keys);
1893                 return -EOPNOTSUPP;
1894         }
1895
1896         if (proto != htons(ETH_P_IP) &&
1897             proto != htons(ETH_P_IPV6)) {
1898                 DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
1899                 return -EPROTONOSUPPORT;
1900         }
1901
1902         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1903                 struct flow_match_basic match;
1904
1905                 flow_rule_match_basic(rule, &match);
1906                 ip_proto = match.key->ip_proto;
1907         }
1908
1909         if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1910                 rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
1911         else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1912                 rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
1913         else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1914                 rc = qede_flow_parse_udp_v4(edev, rule, tuple);
1915         else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1916                 rc = qede_flow_parse_udp_v6(edev, rule, tuple);
1917         else
1918                 DP_NOTICE(edev, "Invalid protocol request\n");
1919
1920         return rc;
1921 }
1922
1923 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1924                             struct flow_cls_offload *f)
1925 {
1926         struct qede_arfs_fltr_node *n;
1927         int min_hlen, rc = -EINVAL;
1928         struct qede_arfs_tuple t;
1929
1930         __qede_lock(edev);
1931
1932         if (!edev->arfs) {
1933                 rc = -EPERM;
1934                 goto unlock;
1935         }
1936
1937         /* parse flower attribute and prepare filter */
1938         if (qede_parse_flow_attr(edev, proto, f->rule, &t))
1939                 goto unlock;
1940
1941         /* Validate profile mode and number of filters */
1942         if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1943             edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1944                 DP_NOTICE(edev,
1945                           "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1946                           t.mode, edev->arfs->mode, edev->arfs->filter_count);
1947                 goto unlock;
1948         }
1949
1950         /* parse tc actions and get the vf_id */
1951         if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
1952                 goto unlock;
1953
1954         if (qede_flow_find_fltr(edev, &t)) {
1955                 rc = -EEXIST;
1956                 goto unlock;
1957         }
1958
1959         n = kzalloc(sizeof(*n), GFP_KERNEL);
1960         if (!n) {
1961                 rc = -ENOMEM;
1962                 goto unlock;
1963         }
1964
1965         min_hlen = qede_flow_get_min_header_size(&t);
1966
1967         n->data = kzalloc(min_hlen, GFP_KERNEL);
1968         if (!n->data) {
1969                 kfree(n);
1970                 rc = -ENOMEM;
1971                 goto unlock;
1972         }
1973
1974         memcpy(&n->tuple, &t, sizeof(n->tuple));
1975
1976         n->buf_len = min_hlen;
1977         n->b_is_drop = true;
1978         n->sw_id = f->cookie;
1979
1980         n->tuple.build_hdr(&n->tuple, n->data);
1981
1982         rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1983         if (rc)
1984                 goto unlock;
1985
1986         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1987         rc = qede_poll_arfs_filter_config(edev, n);
1988
1989 unlock:
1990         __qede_unlock(edev);
1991         return rc;
1992 }
1993
1994 static int qede_flow_spec_validate(struct qede_dev *edev,
1995                                    struct flow_action *flow_action,
1996                                    struct qede_arfs_tuple *t,
1997                                    __u32 location)
1998 {
1999         if (location >= QEDE_RFS_MAX_FLTR) {
2000                 DP_INFO(edev, "Location out-of-bounds\n");
2001                 return -EINVAL;
2002         }
2003
2004         /* Check location isn't already in use */
2005         if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
2006                 DP_INFO(edev, "Location already in use\n");
2007                 return -EINVAL;
2008         }
2009
2010         /* Check if the filtering-mode could support the filter */
2011         if (edev->arfs->filter_count &&
2012             edev->arfs->mode != t->mode) {
2013                 DP_INFO(edev,
2014                         "flow_spec would require filtering mode %08x, but %08x is configured\n",
2015                         t->mode, edev->arfs->filter_count);
2016                 return -EINVAL;
2017         }
2018
2019         if (qede_parse_actions(edev, flow_action, NULL))
2020                 return -EINVAL;
2021
2022         return 0;
2023 }
2024
2025 static int qede_flow_spec_to_rule(struct qede_dev *edev,
2026                                   struct qede_arfs_tuple *t,
2027                                   struct ethtool_rx_flow_spec *fs)
2028 {
2029         struct ethtool_rx_flow_spec_input input = {};
2030         struct ethtool_rx_flow_rule *flow;
2031         __be16 proto;
2032         int err = 0;
2033
2034         if (qede_flow_spec_validate_unused(edev, fs))
2035                 return -EOPNOTSUPP;
2036
2037         switch ((fs->flow_type & ~FLOW_EXT)) {
2038         case TCP_V4_FLOW:
2039         case UDP_V4_FLOW:
2040                 proto = htons(ETH_P_IP);
2041                 break;
2042         case TCP_V6_FLOW:
2043         case UDP_V6_FLOW:
2044                 proto = htons(ETH_P_IPV6);
2045                 break;
2046         default:
2047                 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2048                            "Can't support flow of type %08x\n", fs->flow_type);
2049                 return -EOPNOTSUPP;
2050         }
2051
2052         input.fs = fs;
2053         flow = ethtool_rx_flow_rule_create(&input);
2054         if (IS_ERR(flow))
2055                 return PTR_ERR(flow);
2056
2057         if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
2058                 err = -EINVAL;
2059                 goto err_out;
2060         }
2061
2062         /* Make sure location is valid and filter isn't already set */
2063         err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2064                                       fs->location);
2065 err_out:
2066         ethtool_rx_flow_rule_destroy(flow);
2067         return err;
2068
2069 }
2070
2071 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2072 {
2073         struct ethtool_rx_flow_spec *fsp = &info->fs;
2074         struct qede_arfs_fltr_node *n;
2075         struct qede_arfs_tuple t;
2076         int min_hlen, rc;
2077
2078         __qede_lock(edev);
2079
2080         if (!edev->arfs) {
2081                 rc = -EPERM;
2082                 goto unlock;
2083         }
2084
2085         /* Translate the flow specification into something fittign our DB */
2086         rc = qede_flow_spec_to_rule(edev, &t, fsp);
2087         if (rc)
2088                 goto unlock;
2089
2090         if (qede_flow_find_fltr(edev, &t)) {
2091                 rc = -EINVAL;
2092                 goto unlock;
2093         }
2094
2095         n = kzalloc(sizeof(*n), GFP_KERNEL);
2096         if (!n) {
2097                 rc = -ENOMEM;
2098                 goto unlock;
2099         }
2100
2101         min_hlen = qede_flow_get_min_header_size(&t);
2102         n->data = kzalloc(min_hlen, GFP_KERNEL);
2103         if (!n->data) {
2104                 kfree(n);
2105                 rc = -ENOMEM;
2106                 goto unlock;
2107         }
2108
2109         n->sw_id = fsp->location;
2110         set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2111         n->buf_len = min_hlen;
2112
2113         memcpy(&n->tuple, &t, sizeof(n->tuple));
2114
2115         qede_flow_set_destination(edev, n, fsp);
2116
2117         /* Build a minimal header according to the flow */
2118         n->tuple.build_hdr(&n->tuple, n->data);
2119
2120         rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2121         if (rc)
2122                 goto unlock;
2123
2124         qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2125         rc = qede_poll_arfs_filter_config(edev, n);
2126 unlock:
2127         __qede_unlock(edev);
2128
2129         return rc;
2130 }