1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
35 #include <net/ndisc.h>
37 #define TBL_MIN_BUCKETS 1024
38 #define MASK_ARRAY_SIZE_MIN 16
39 #define REHASH_INTERVAL (10 * 60 * HZ)
41 #define MC_DEFAULT_HASH_ENTRIES 256
42 #define MC_HASH_SHIFT 8
43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
50 return range->end - range->start;
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 bool full, const struct sw_flow_mask *mask)
56 int start = full ? 0 : mask->range.start;
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 const long *m = (const long *)((const u8 *)&mask->key + start);
59 const long *s = (const long *)((const u8 *)src + start);
60 long *d = (long *)((u8 *)dst + start);
63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 * if 'full' is false the memory outside of the 'mask->range' is left
65 * uninitialized. This can be used as an optimization when further
66 * operations on 'dst' only use contents within 'mask->range'.
68 for (i = 0; i < len; i += sizeof(long))
72 struct sw_flow *ovs_flow_alloc(void)
75 struct sw_flow_stats *stats;
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 flow->stats_last_writer = -1;
83 /* Initialize the default stat node. */
84 stats = kmem_cache_alloc_node(flow_stats_cache,
85 GFP_KERNEL | __GFP_ZERO,
86 node_online(0) ? 0 : NUMA_NO_NODE);
90 spin_lock_init(&stats->lock);
92 RCU_INIT_POINTER(flow->stats[0], stats);
94 cpumask_set_cpu(0, &flow->cpu_used_mask);
98 kmem_cache_free(flow_cache, flow);
99 return ERR_PTR(-ENOMEM);
102 int ovs_flow_tbl_count(const struct flow_table *table)
107 static void flow_free(struct sw_flow *flow)
111 if (ovs_identifier_is_key(&flow->id))
112 kfree(flow->id.unmasked_key);
114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
115 /* We open code this to make sure cpu 0 is always considered */
116 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
117 if (flow->stats[cpu])
118 kmem_cache_free(flow_stats_cache,
119 (struct sw_flow_stats __force *)flow->stats[cpu]);
120 kmem_cache_free(flow_cache, flow);
123 static void rcu_free_flow_callback(struct rcu_head *rcu)
125 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
130 void ovs_flow_free(struct sw_flow *flow, bool deferred)
136 call_rcu(&flow->rcu, rcu_free_flow_callback);
141 static void __table_instance_destroy(struct table_instance *ti)
147 static struct table_instance *table_instance_alloc(int new_size)
149 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
155 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
162 for (i = 0; i < new_size; i++)
163 INIT_HLIST_HEAD(&ti->buckets[i]);
165 ti->n_buckets = new_size;
167 ti->keep_flows = false;
168 get_random_bytes(&ti->hash_seed, sizeof(u32));
173 static void __mask_array_destroy(struct mask_array *ma)
175 free_percpu(ma->masks_usage_cntr);
179 static void mask_array_rcu_cb(struct rcu_head *rcu)
181 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
183 __mask_array_destroy(ma);
186 static void tbl_mask_array_reset_counters(struct mask_array *ma)
190 /* As the per CPU counters are not atomic we can not go ahead and
191 * reset them from another CPU. To be able to still have an approximate
192 * zero based counter we store the value at reset, and subtract it
193 * later when processing.
195 for (i = 0; i < ma->max; i++) {
196 ma->masks_usage_zero_cntr[i] = 0;
198 for_each_possible_cpu(cpu) {
199 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
205 start = u64_stats_fetch_begin_irq(&ma->syncp);
206 counter = usage_counters[i];
207 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
209 ma->masks_usage_zero_cntr[i] += counter;
214 static struct mask_array *tbl_mask_array_alloc(int size)
216 struct mask_array *new;
218 size = max(MASK_ARRAY_SIZE_MIN, size);
219 new = kzalloc(sizeof(struct mask_array) +
220 sizeof(struct sw_flow_mask *) * size +
221 sizeof(u64) * size, GFP_KERNEL);
225 new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
226 sizeof(struct mask_array) +
227 sizeof(struct sw_flow_mask *) *
230 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
232 if (!new->masks_usage_cntr) {
243 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
245 struct mask_array *old;
246 struct mask_array *new;
248 new = tbl_mask_array_alloc(size);
252 old = ovsl_dereference(tbl->mask_array);
256 for (i = 0; i < old->max; i++) {
257 if (ovsl_dereference(old->masks[i]))
258 new->masks[new->count++] = old->masks[i];
260 call_rcu(&old->rcu, mask_array_rcu_cb);
263 rcu_assign_pointer(tbl->mask_array, new);
268 static int tbl_mask_array_add_mask(struct flow_table *tbl,
269 struct sw_flow_mask *new)
271 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
272 int err, ma_count = READ_ONCE(ma->count);
274 if (ma_count >= ma->max) {
275 err = tbl_mask_array_realloc(tbl, ma->max +
276 MASK_ARRAY_SIZE_MIN);
280 ma = ovsl_dereference(tbl->mask_array);
282 /* On every add or delete we need to reset the counters so
283 * every new mask gets a fair chance of being prioritized.
285 tbl_mask_array_reset_counters(ma);
288 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
290 rcu_assign_pointer(ma->masks[ma_count], new);
291 WRITE_ONCE(ma->count, ma_count +1);
296 static void tbl_mask_array_del_mask(struct flow_table *tbl,
297 struct sw_flow_mask *mask)
299 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
300 int i, ma_count = READ_ONCE(ma->count);
302 /* Remove the deleted mask pointers from the array */
303 for (i = 0; i < ma_count; i++) {
304 if (mask == ovsl_dereference(ma->masks[i]))
312 WRITE_ONCE(ma->count, ma_count -1);
314 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
315 RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
317 kfree_rcu(mask, rcu);
319 /* Shrink the mask array if necessary. */
320 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
321 ma_count <= (ma->max / 3))
322 tbl_mask_array_realloc(tbl, ma->max / 2);
324 tbl_mask_array_reset_counters(ma);
328 /* Remove 'mask' from the mask list, if it is not needed any more. */
329 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
332 /* ovs-lock is required to protect mask-refcount and
336 BUG_ON(!mask->ref_count);
339 if (!mask->ref_count)
340 tbl_mask_array_del_mask(tbl, mask);
344 static void __mask_cache_destroy(struct mask_cache *mc)
346 free_percpu(mc->mask_cache);
350 static void mask_cache_rcu_cb(struct rcu_head *rcu)
352 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
354 __mask_cache_destroy(mc);
357 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
359 struct mask_cache_entry __percpu *cache = NULL;
360 struct mask_cache *new;
362 /* Only allow size to be 0, or a power of 2, and does not exceed
363 * percpu allocation size.
365 if ((!is_power_of_2(size) && size != 0) ||
366 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
369 new = kzalloc(sizeof(*new), GFP_KERNEL);
373 new->cache_size = size;
374 if (new->cache_size > 0) {
375 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
377 __alignof__(struct mask_cache_entry));
384 new->mask_cache = cache;
387 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
389 struct mask_cache *mc = rcu_dereference(table->mask_cache);
390 struct mask_cache *new;
392 if (size == mc->cache_size)
395 if ((!is_power_of_2(size) && size != 0) ||
396 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
399 new = tbl_mask_cache_alloc(size);
403 rcu_assign_pointer(table->mask_cache, new);
404 call_rcu(&mc->rcu, mask_cache_rcu_cb);
409 int ovs_flow_tbl_init(struct flow_table *table)
411 struct table_instance *ti, *ufid_ti;
412 struct mask_cache *mc;
413 struct mask_array *ma;
415 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
419 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
421 goto free_mask_cache;
423 ti = table_instance_alloc(TBL_MIN_BUCKETS);
425 goto free_mask_array;
427 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
431 rcu_assign_pointer(table->ti, ti);
432 rcu_assign_pointer(table->ufid_ti, ufid_ti);
433 rcu_assign_pointer(table->mask_array, ma);
434 rcu_assign_pointer(table->mask_cache, mc);
435 table->last_rehash = jiffies;
437 table->ufid_count = 0;
441 __table_instance_destroy(ti);
443 __mask_array_destroy(ma);
445 __mask_cache_destroy(mc);
449 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
451 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
453 __table_instance_destroy(ti);
456 static void table_instance_flow_free(struct flow_table *table,
457 struct table_instance *ti,
458 struct table_instance *ufid_ti,
459 struct sw_flow *flow,
462 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
466 if (ovs_identifier_is_ufid(&flow->id)) {
467 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
473 flow_mask_remove(table, flow->mask);
476 static void table_instance_destroy(struct flow_table *table,
477 struct table_instance *ti,
478 struct table_instance *ufid_ti,
490 for (i = 0; i < ti->n_buckets; i++) {
491 struct sw_flow *flow;
492 struct hlist_head *head = &ti->buckets[i];
493 struct hlist_node *n;
495 hlist_for_each_entry_safe(flow, n, head,
496 flow_table.node[ti->node_ver]) {
498 table_instance_flow_free(table, ti, ufid_ti,
500 ovs_flow_free(flow, deferred);
506 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
507 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
509 __table_instance_destroy(ti);
510 __table_instance_destroy(ufid_ti);
514 /* No need for locking this function is called from RCU callback or
517 void ovs_flow_tbl_destroy(struct flow_table *table)
519 struct table_instance *ti = rcu_dereference_raw(table->ti);
520 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
521 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
522 struct mask_array *ma = rcu_dereference_raw(table->mask_array);
524 call_rcu(&mc->rcu, mask_cache_rcu_cb);
525 call_rcu(&ma->rcu, mask_array_rcu_cb);
526 table_instance_destroy(table, ti, ufid_ti, false);
529 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
530 u32 *bucket, u32 *last)
532 struct sw_flow *flow;
533 struct hlist_head *head;
538 while (*bucket < ti->n_buckets) {
540 head = &ti->buckets[*bucket];
541 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
556 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
558 hash = jhash_1word(hash, ti->hash_seed);
559 return &ti->buckets[hash & (ti->n_buckets - 1)];
562 static void table_instance_insert(struct table_instance *ti,
563 struct sw_flow *flow)
565 struct hlist_head *head;
567 head = find_bucket(ti, flow->flow_table.hash);
568 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
571 static void ufid_table_instance_insert(struct table_instance *ti,
572 struct sw_flow *flow)
574 struct hlist_head *head;
576 head = find_bucket(ti, flow->ufid_table.hash);
577 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
580 static void flow_table_copy_flows(struct table_instance *old,
581 struct table_instance *new, bool ufid)
586 old_ver = old->node_ver;
587 new->node_ver = !old_ver;
589 /* Insert in new table. */
590 for (i = 0; i < old->n_buckets; i++) {
591 struct sw_flow *flow;
592 struct hlist_head *head = &old->buckets[i];
595 hlist_for_each_entry_rcu(flow, head,
596 ufid_table.node[old_ver],
597 lockdep_ovsl_is_held())
598 ufid_table_instance_insert(new, flow);
600 hlist_for_each_entry_rcu(flow, head,
601 flow_table.node[old_ver],
602 lockdep_ovsl_is_held())
603 table_instance_insert(new, flow);
606 old->keep_flows = true;
609 static struct table_instance *table_instance_rehash(struct table_instance *ti,
610 int n_buckets, bool ufid)
612 struct table_instance *new_ti;
614 new_ti = table_instance_alloc(n_buckets);
618 flow_table_copy_flows(ti, new_ti, ufid);
623 int ovs_flow_tbl_flush(struct flow_table *flow_table)
625 struct table_instance *old_ti, *new_ti;
626 struct table_instance *old_ufid_ti, *new_ufid_ti;
628 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
631 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
635 old_ti = ovsl_dereference(flow_table->ti);
636 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
638 rcu_assign_pointer(flow_table->ti, new_ti);
639 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
640 flow_table->last_rehash = jiffies;
641 flow_table->count = 0;
642 flow_table->ufid_count = 0;
644 table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
648 __table_instance_destroy(new_ti);
652 static u32 flow_hash(const struct sw_flow_key *key,
653 const struct sw_flow_key_range *range)
655 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
657 /* Make sure number of hash bytes are multiple of u32. */
658 int hash_u32s = range_n_bytes(range) >> 2;
660 return jhash2(hash_key, hash_u32s, 0);
663 static int flow_key_start(const struct sw_flow_key *key)
668 return rounddown(offsetof(struct sw_flow_key, phy),
672 static bool cmp_key(const struct sw_flow_key *key1,
673 const struct sw_flow_key *key2,
674 int key_start, int key_end)
676 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
677 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
681 for (i = key_start; i < key_end; i += sizeof(long))
682 diffs |= *cp1++ ^ *cp2++;
687 static bool flow_cmp_masked_key(const struct sw_flow *flow,
688 const struct sw_flow_key *key,
689 const struct sw_flow_key_range *range)
691 return cmp_key(&flow->key, key, range->start, range->end);
694 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
695 const struct sw_flow_match *match)
697 struct sw_flow_key *key = match->key;
698 int key_start = flow_key_start(key);
699 int key_end = match->range.end;
701 BUG_ON(ovs_identifier_is_ufid(&flow->id));
702 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
705 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
706 const struct sw_flow_key *unmasked,
707 const struct sw_flow_mask *mask,
710 struct sw_flow *flow;
711 struct hlist_head *head;
713 struct sw_flow_key masked_key;
715 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
716 hash = flow_hash(&masked_key, &mask->range);
717 head = find_bucket(ti, hash);
720 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
721 lockdep_ovsl_is_held()) {
722 if (flow->mask == mask && flow->flow_table.hash == hash &&
723 flow_cmp_masked_key(flow, &masked_key, &mask->range))
729 /* Flow lookup does full lookup on flow table. It starts with
730 * mask from index passed in *index.
732 static struct sw_flow *flow_lookup(struct flow_table *tbl,
733 struct table_instance *ti,
734 struct mask_array *ma,
735 const struct sw_flow_key *key,
740 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
741 struct sw_flow *flow;
742 struct sw_flow_mask *mask;
745 if (likely(*index < ma->max)) {
746 mask = rcu_dereference_ovsl(ma->masks[*index]);
748 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
750 u64_stats_update_begin(&ma->syncp);
751 usage_counters[*index]++;
752 u64_stats_update_end(&ma->syncp);
759 for (i = 0; i < ma->max; i++) {
764 mask = rcu_dereference_ovsl(ma->masks[i]);
768 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
769 if (flow) { /* Found */
771 u64_stats_update_begin(&ma->syncp);
772 usage_counters[*index]++;
773 u64_stats_update_end(&ma->syncp);
782 * mask_cache maps flow to probable mask. This cache is not tightly
783 * coupled cache, It means updates to mask list can result in inconsistent
784 * cache entry in mask cache.
785 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
786 * In case of a hash collision the entry is hashed in next segment.
788 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
789 const struct sw_flow_key *key,
794 struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
795 struct mask_array *ma = rcu_dereference(tbl->mask_array);
796 struct table_instance *ti = rcu_dereference(tbl->ti);
797 struct mask_cache_entry *entries, *ce;
798 struct sw_flow *flow;
804 if (unlikely(!skb_hash || mc->cache_size == 0)) {
808 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
812 /* Pre and post recirulation flows usually have the same skb_hash
813 * value. To avoid hash collisions, rehash the 'skb_hash' with
816 skb_hash = jhash_1word(skb_hash, key->recirc_id);
820 entries = this_cpu_ptr(mc->mask_cache);
822 /* Find the cache entry 'ce' to operate on. */
823 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
824 int index = hash & (mc->cache_size - 1);
825 struct mask_cache_entry *e;
828 if (e->skb_hash == skb_hash) {
829 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
830 n_cache_hit, &e->mask_index);
836 if (!ce || e->skb_hash < ce->skb_hash)
837 ce = e; /* A better replacement cache candidate. */
839 hash >>= MC_HASH_SHIFT;
842 /* Cache miss, do full lookup. */
843 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
846 ce->skb_hash = skb_hash;
852 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
853 const struct sw_flow_key *key)
855 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
856 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
857 u32 __always_unused n_mask_hit;
858 u32 __always_unused n_cache_hit;
861 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
864 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
865 const struct sw_flow_match *match)
867 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
870 /* Always called under ovs-mutex. */
871 for (i = 0; i < ma->max; i++) {
872 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
873 u32 __always_unused n_mask_hit;
874 struct sw_flow_mask *mask;
875 struct sw_flow *flow;
877 mask = ovsl_dereference(ma->masks[i]);
881 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
882 if (flow && ovs_identifier_is_key(&flow->id) &&
883 ovs_flow_cmp_unmasked_key(flow, match)) {
891 static u32 ufid_hash(const struct sw_flow_id *sfid)
893 return jhash(sfid->ufid, sfid->ufid_len, 0);
896 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
897 const struct sw_flow_id *sfid)
899 if (flow->id.ufid_len != sfid->ufid_len)
902 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
905 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
907 if (ovs_identifier_is_ufid(&flow->id))
908 return flow_cmp_masked_key(flow, match->key, &match->range);
910 return ovs_flow_cmp_unmasked_key(flow, match);
913 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
914 const struct sw_flow_id *ufid)
916 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
917 struct sw_flow *flow;
918 struct hlist_head *head;
921 hash = ufid_hash(ufid);
922 head = find_bucket(ti, hash);
923 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
924 lockdep_ovsl_is_held()) {
925 if (flow->ufid_table.hash == hash &&
926 ovs_flow_cmp_ufid(flow, ufid))
932 int ovs_flow_tbl_num_masks(const struct flow_table *table)
934 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
935 return READ_ONCE(ma->count);
938 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
940 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
942 return READ_ONCE(mc->cache_size);
945 static struct table_instance *table_instance_expand(struct table_instance *ti,
948 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
951 /* Must be called with OVS mutex held. */
952 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
954 struct table_instance *ti = ovsl_dereference(table->ti);
955 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
957 BUG_ON(table->count == 0);
958 table_instance_flow_free(table, ti, ufid_ti, flow, true);
961 static struct sw_flow_mask *mask_alloc(void)
963 struct sw_flow_mask *mask;
965 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
972 static bool mask_equal(const struct sw_flow_mask *a,
973 const struct sw_flow_mask *b)
975 const u8 *a_ = (const u8 *)&a->key + a->range.start;
976 const u8 *b_ = (const u8 *)&b->key + b->range.start;
978 return (a->range.end == b->range.end)
979 && (a->range.start == b->range.start)
980 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
983 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
984 const struct sw_flow_mask *mask)
986 struct mask_array *ma;
989 ma = ovsl_dereference(tbl->mask_array);
990 for (i = 0; i < ma->max; i++) {
991 struct sw_flow_mask *t;
992 t = ovsl_dereference(ma->masks[i]);
994 if (t && mask_equal(mask, t))
1001 /* Add 'mask' into the mask list, if it is not already there. */
1002 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
1003 const struct sw_flow_mask *new)
1005 struct sw_flow_mask *mask;
1007 mask = flow_mask_find(tbl, new);
1009 /* Allocate a new mask if none exsits. */
1010 mask = mask_alloc();
1013 mask->key = new->key;
1014 mask->range = new->range;
1016 /* Add mask to mask-list. */
1017 if (tbl_mask_array_add_mask(tbl, mask)) {
1022 BUG_ON(!mask->ref_count);
1030 /* Must be called with OVS mutex held. */
1031 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1033 struct table_instance *new_ti = NULL;
1034 struct table_instance *ti;
1036 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1037 ti = ovsl_dereference(table->ti);
1038 table_instance_insert(ti, flow);
1041 /* Expand table, if necessary, to make room. */
1042 if (table->count > ti->n_buckets)
1043 new_ti = table_instance_expand(ti, false);
1044 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1045 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1048 rcu_assign_pointer(table->ti, new_ti);
1049 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1050 table->last_rehash = jiffies;
1054 /* Must be called with OVS mutex held. */
1055 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1057 struct table_instance *ti;
1059 flow->ufid_table.hash = ufid_hash(&flow->id);
1060 ti = ovsl_dereference(table->ufid_ti);
1061 ufid_table_instance_insert(ti, flow);
1062 table->ufid_count++;
1064 /* Expand table, if necessary, to make room. */
1065 if (table->ufid_count > ti->n_buckets) {
1066 struct table_instance *new_ti;
1068 new_ti = table_instance_expand(ti, true);
1070 rcu_assign_pointer(table->ufid_ti, new_ti);
1071 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1076 /* Must be called with OVS mutex held. */
1077 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1078 const struct sw_flow_mask *mask)
1082 err = flow_mask_insert(table, flow, mask);
1085 flow_key_insert(table, flow);
1086 if (ovs_identifier_is_ufid(&flow->id))
1087 flow_ufid_insert(table, flow);
1092 static int compare_mask_and_count(const void *a, const void *b)
1094 const struct mask_count *mc_a = a;
1095 const struct mask_count *mc_b = b;
1097 return (s64)mc_b->counter - (s64)mc_a->counter;
1100 /* Must be called with OVS mutex held. */
1101 void ovs_flow_masks_rebalance(struct flow_table *table)
1103 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1104 struct mask_count *masks_and_count;
1105 struct mask_array *new;
1106 int masks_entries = 0;
1109 /* Build array of all current entries with use counters. */
1110 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1112 if (!masks_and_count)
1115 for (i = 0; i < ma->max; i++) {
1116 struct sw_flow_mask *mask;
1120 mask = rcu_dereference_ovsl(ma->masks[i]);
1121 if (unlikely(!mask))
1124 masks_and_count[i].index = i;
1125 masks_and_count[i].counter = 0;
1127 for_each_possible_cpu(cpu) {
1128 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
1133 start = u64_stats_fetch_begin_irq(&ma->syncp);
1134 counter = usage_counters[i];
1135 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
1137 masks_and_count[i].counter += counter;
1140 /* Subtract the zero count value. */
1141 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1143 /* Rather than calling tbl_mask_array_reset_counters()
1144 * below when no change is needed, do it inline here.
1146 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1150 goto free_mask_entries;
1152 /* Sort the entries */
1154 sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1155 compare_mask_and_count, NULL);
1157 /* If the order is the same, nothing to do... */
1158 for (i = 0; i < masks_entries; i++) {
1159 if (i != masks_and_count[i].index)
1162 if (i == masks_entries)
1163 goto free_mask_entries;
1165 /* Rebuilt the new list in order of usage. */
1166 new = tbl_mask_array_alloc(ma->max);
1168 goto free_mask_entries;
1170 for (i = 0; i < masks_entries; i++) {
1171 int index = masks_and_count[i].index;
1173 if (ovsl_dereference(ma->masks[index]))
1174 new->masks[new->count++] = ma->masks[index];
1177 rcu_assign_pointer(table->mask_array, new);
1178 call_rcu(&ma->rcu, mask_array_rcu_cb);
1181 kfree(masks_and_count);
1184 /* Initializes the flow module.
1185 * Returns zero if successful or a negative error code. */
1186 int ovs_flow_init(void)
1188 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1189 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1191 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1193 * sizeof(struct sw_flow_stats *)),
1195 if (flow_cache == NULL)
1199 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1200 0, SLAB_HWCACHE_ALIGN, NULL);
1201 if (flow_stats_cache == NULL) {
1202 kmem_cache_destroy(flow_cache);
1210 /* Uninitializes the flow module. */
1211 void ovs_flow_exit(void)
1213 kmem_cache_destroy(flow_stats_cache);
1214 kmem_cache_destroy(flow_cache);