1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/hash.h>
5 #include <linux/hashtable.h>
6 #include <linux/jhash.h>
7 #include <linux/vmalloc.h>
8 #include <net/pkt_cls.h>
12 #include "../nfp_app.h"
14 struct nfp_mask_id_table {
15 struct hlist_node link;
21 struct nfp_fl_flow_table_cmp_arg {
22 struct net_device *netdev;
27 static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
29 struct nfp_flower_priv *priv = app->priv;
30 struct circ_buf *ring;
32 ring = &priv->stats_ids.free_list;
33 /* Check if buffer is full. */
34 if (!CIRC_SPACE(ring->head, ring->tail,
35 priv->stats_ring_size * NFP_FL_STATS_ELEM_RS -
36 NFP_FL_STATS_ELEM_RS + 1))
39 memcpy(&ring->buf[ring->head], &stats_context_id, NFP_FL_STATS_ELEM_RS);
40 ring->head = (ring->head + NFP_FL_STATS_ELEM_RS) %
41 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
46 static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
48 struct nfp_flower_priv *priv = app->priv;
49 u32 freed_stats_id, temp_stats_id;
50 struct circ_buf *ring;
52 ring = &priv->stats_ids.free_list;
53 freed_stats_id = priv->stats_ring_size;
54 /* Check for unallocated entries first. */
55 if (priv->stats_ids.init_unalloc > 0) {
56 *stats_context_id = priv->stats_ids.init_unalloc - 1;
57 priv->stats_ids.init_unalloc--;
61 /* Check if buffer is empty. */
62 if (ring->head == ring->tail) {
63 *stats_context_id = freed_stats_id;
67 memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS);
68 *stats_context_id = temp_stats_id;
69 memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS);
70 ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) %
71 (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS);
76 /* Must be called with either RTNL or rcu_read_lock */
77 struct nfp_fl_payload *
78 nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
79 struct net_device *netdev, __be32 host_ctx)
81 struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
82 struct nfp_flower_priv *priv = app->priv;
84 flower_cmp_arg.netdev = netdev;
85 flower_cmp_arg.cookie = tc_flower_cookie;
86 flower_cmp_arg.host_ctx = host_ctx;
88 return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
89 nfp_flower_table_params);
92 void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb)
94 unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
95 struct nfp_flower_priv *priv = app->priv;
96 struct nfp_fl_stats_frame *stats;
101 msg = nfp_flower_cmsg_get_data(skb);
103 spin_lock(&priv->stats_lock);
104 for (i = 0; i < msg_len / sizeof(*stats); i++) {
105 stats = (struct nfp_fl_stats_frame *)msg + i;
106 ctx_id = be32_to_cpu(stats->stats_con_id);
107 priv->stats[ctx_id].pkts += be32_to_cpu(stats->pkt_count);
108 priv->stats[ctx_id].bytes += be64_to_cpu(stats->byte_count);
109 priv->stats[ctx_id].used = jiffies;
111 spin_unlock(&priv->stats_lock);
114 static int nfp_release_mask_id(struct nfp_app *app, u8 mask_id)
116 struct nfp_flower_priv *priv = app->priv;
117 struct circ_buf *ring;
119 ring = &priv->mask_ids.mask_id_free_list;
120 /* Checking if buffer is full. */
121 if (CIRC_SPACE(ring->head, ring->tail, NFP_FLOWER_MASK_ENTRY_RS) == 0)
124 memcpy(&ring->buf[ring->head], &mask_id, NFP_FLOWER_MASK_ELEMENT_RS);
125 ring->head = (ring->head + NFP_FLOWER_MASK_ELEMENT_RS) %
126 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
128 priv->mask_ids.last_used[mask_id] = ktime_get();
133 static int nfp_mask_alloc(struct nfp_app *app, u8 *mask_id)
135 struct nfp_flower_priv *priv = app->priv;
136 ktime_t reuse_timeout;
137 struct circ_buf *ring;
138 u8 temp_id, freed_id;
140 ring = &priv->mask_ids.mask_id_free_list;
141 freed_id = NFP_FLOWER_MASK_ENTRY_RS - 1;
142 /* Checking for unallocated entries first. */
143 if (priv->mask_ids.init_unallocated > 0) {
144 *mask_id = priv->mask_ids.init_unallocated;
145 priv->mask_ids.init_unallocated--;
149 /* Checking if buffer is empty. */
150 if (ring->head == ring->tail)
153 memcpy(&temp_id, &ring->buf[ring->tail], NFP_FLOWER_MASK_ELEMENT_RS);
156 reuse_timeout = ktime_add_ns(priv->mask_ids.last_used[*mask_id],
157 NFP_FL_MASK_REUSE_TIME_NS);
159 if (ktime_before(ktime_get(), reuse_timeout))
162 memcpy(&ring->buf[ring->tail], &freed_id, NFP_FLOWER_MASK_ELEMENT_RS);
163 ring->tail = (ring->tail + NFP_FLOWER_MASK_ELEMENT_RS) %
164 (NFP_FLOWER_MASK_ENTRY_RS * NFP_FLOWER_MASK_ELEMENT_RS);
174 nfp_add_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
176 struct nfp_flower_priv *priv = app->priv;
177 struct nfp_mask_id_table *mask_entry;
178 unsigned long hash_key;
181 if (nfp_mask_alloc(app, &mask_id))
184 mask_entry = kmalloc(sizeof(*mask_entry), GFP_KERNEL);
186 nfp_release_mask_id(app, mask_id);
190 INIT_HLIST_NODE(&mask_entry->link);
191 mask_entry->mask_id = mask_id;
192 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
193 mask_entry->hash_key = hash_key;
194 mask_entry->ref_cnt = 1;
195 hash_add(priv->mask_table, &mask_entry->link, hash_key);
200 static struct nfp_mask_id_table *
201 nfp_search_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
203 struct nfp_flower_priv *priv = app->priv;
204 struct nfp_mask_id_table *mask_entry;
205 unsigned long hash_key;
207 hash_key = jhash(mask_data, mask_len, priv->mask_id_seed);
209 hash_for_each_possible(priv->mask_table, mask_entry, link, hash_key)
210 if (mask_entry->hash_key == hash_key)
217 nfp_find_in_mask_table(struct nfp_app *app, char *mask_data, u32 mask_len)
219 struct nfp_mask_id_table *mask_entry;
221 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
225 mask_entry->ref_cnt++;
227 /* Casting u8 to int for later use. */
228 return mask_entry->mask_id;
232 nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
233 u8 *meta_flags, u8 *mask_id)
237 id = nfp_find_in_mask_table(app, mask_data, mask_len);
239 id = nfp_add_mask_table(app, mask_data, mask_len);
242 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
250 nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
251 u8 *meta_flags, u8 *mask_id)
253 struct nfp_mask_id_table *mask_entry;
255 mask_entry = nfp_search_mask_table(app, mask_data, mask_len);
260 *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
262 *mask_id = mask_entry->mask_id;
263 mask_entry->ref_cnt--;
264 if (!mask_entry->ref_cnt) {
265 hash_del(&mask_entry->link);
266 nfp_release_mask_id(app, *mask_id);
269 *meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
275 int nfp_compile_flow_metadata(struct nfp_app *app,
276 struct tc_cls_flower_offload *flow,
277 struct nfp_fl_payload *nfp_flow,
278 struct net_device *netdev)
280 struct nfp_flower_priv *priv = app->priv;
281 struct nfp_fl_payload *check_entry;
285 if (nfp_get_stats_entry(app, &stats_cxt))
288 nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
289 nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
290 nfp_flow->ingress_dev = netdev;
293 if (!nfp_check_mask_add(app, nfp_flow->mask_data,
294 nfp_flow->meta.mask_len,
295 &nfp_flow->meta.flags, &new_mask_id)) {
296 if (nfp_release_stats_entry(app, stats_cxt))
301 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
302 priv->flower_version++;
304 /* Update flow payload with mask ids. */
305 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
306 priv->stats[stats_cxt].pkts = 0;
307 priv->stats[stats_cxt].bytes = 0;
308 priv->stats[stats_cxt].used = jiffies;
310 check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
311 NFP_FL_STATS_CTX_DONT_CARE);
313 if (nfp_release_stats_entry(app, stats_cxt))
316 if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
317 nfp_flow->meta.mask_len,
327 int nfp_modify_flow_metadata(struct nfp_app *app,
328 struct nfp_fl_payload *nfp_flow)
330 struct nfp_flower_priv *priv = app->priv;
334 nfp_check_mask_remove(app, nfp_flow->mask_data,
335 nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
338 nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
339 priv->flower_version++;
341 /* Update flow payload with mask ids. */
342 nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
344 /* Release the stats ctx id. */
345 temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
347 return nfp_release_stats_entry(app, temp_ctx_id);
350 static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
353 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
354 const struct nfp_fl_payload *flow_entry = obj;
356 if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
357 (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
358 flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
359 return flow_entry->tc_flower_cookie != cmp_arg->cookie;
364 static u32 nfp_fl_obj_hashfn(const void *data, u32 len, u32 seed)
366 const struct nfp_fl_payload *flower_entry = data;
368 return jhash2((u32 *)&flower_entry->tc_flower_cookie,
369 sizeof(flower_entry->tc_flower_cookie) / sizeof(u32),
373 static u32 nfp_fl_key_hashfn(const void *data, u32 len, u32 seed)
375 const struct nfp_fl_flow_table_cmp_arg *cmp_arg = data;
377 return jhash2((u32 *)&cmp_arg->cookie,
378 sizeof(cmp_arg->cookie) / sizeof(u32), seed);
381 const struct rhashtable_params nfp_flower_table_params = {
382 .head_offset = offsetof(struct nfp_fl_payload, fl_node),
383 .hashfn = nfp_fl_key_hashfn,
384 .obj_cmpfn = nfp_fl_obj_cmpfn,
385 .obj_hashfn = nfp_fl_obj_hashfn,
386 .automatic_shrinking = true,
389 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
391 struct nfp_flower_priv *priv = app->priv;
394 hash_init(priv->mask_table);
396 err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
400 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
402 /* Init ring buffer and unallocated mask_ids. */
403 priv->mask_ids.mask_id_free_list.buf =
404 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
405 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
406 if (!priv->mask_ids.mask_id_free_list.buf)
407 goto err_free_flow_table;
409 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
411 /* Init timestamps for mask id*/
412 priv->mask_ids.last_used =
413 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
414 sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
415 if (!priv->mask_ids.last_used)
416 goto err_free_mask_id;
418 /* Init ring buffer and unallocated stats_ids. */
419 priv->stats_ids.free_list.buf =
420 vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
421 priv->stats_ring_size));
422 if (!priv->stats_ids.free_list.buf)
423 goto err_free_last_used;
425 priv->stats_ids.init_unalloc = host_ctx_count;
427 priv->stats = kvmalloc_array(priv->stats_ring_size,
428 sizeof(struct nfp_fl_stats), GFP_KERNEL);
430 goto err_free_ring_buf;
432 spin_lock_init(&priv->stats_lock);
437 vfree(priv->stats_ids.free_list.buf);
439 kfree(priv->mask_ids.last_used);
441 kfree(priv->mask_ids.mask_id_free_list.buf);
443 rhashtable_destroy(&priv->flow_table);
447 void nfp_flower_metadata_cleanup(struct nfp_app *app)
449 struct nfp_flower_priv *priv = app->priv;
454 rhashtable_free_and_destroy(&priv->flow_table,
455 nfp_check_rhashtable_empty, NULL);
457 kfree(priv->mask_ids.mask_id_free_list.buf);
458 kfree(priv->mask_ids.last_used);
459 vfree(priv->stats_ids.free_list.buf);