1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
32 #include <linux/sort.h>
35 #include <net/ndisc.h>
37 #define TBL_MIN_BUCKETS 1024
38 #define MASK_ARRAY_SIZE_MIN 16
39 #define REHASH_INTERVAL (10 * 60 * HZ)
41 #define MC_DEFAULT_HASH_ENTRIES 256
42 #define MC_HASH_SHIFT 8
43 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
45 static struct kmem_cache *flow_cache;
46 struct kmem_cache *flow_stats_cache __read_mostly;
48 static u16 range_n_bytes(const struct sw_flow_key_range *range)
50 return range->end - range->start;
53 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
54 bool full, const struct sw_flow_mask *mask)
56 int start = full ? 0 : mask->range.start;
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
58 const long *m = (const long *)((const u8 *)&mask->key + start);
59 const long *s = (const long *)((const u8 *)src + start);
60 long *d = (long *)((u8 *)dst + start);
63 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
64 * if 'full' is false the memory outside of the 'mask->range' is left
65 * uninitialized. This can be used as an optimization when further
66 * operations on 'dst' only use contents within 'mask->range'.
68 for (i = 0; i < len; i += sizeof(long))
72 struct sw_flow *ovs_flow_alloc(void)
75 struct sw_flow_stats *stats;
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 flow->stats_last_writer = -1;
83 /* Initialize the default stat node. */
84 stats = kmem_cache_alloc_node(flow_stats_cache,
85 GFP_KERNEL | __GFP_ZERO,
86 node_online(0) ? 0 : NUMA_NO_NODE);
90 spin_lock_init(&stats->lock);
92 RCU_INIT_POINTER(flow->stats[0], stats);
94 cpumask_set_cpu(0, &flow->cpu_used_mask);
98 kmem_cache_free(flow_cache, flow);
99 return ERR_PTR(-ENOMEM);
102 int ovs_flow_tbl_count(const struct flow_table *table)
107 static void flow_free(struct sw_flow *flow)
111 if (ovs_identifier_is_key(&flow->id))
112 kfree(flow->id.unmasked_key);
114 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
116 /* We open code this to make sure cpu 0 is always considered */
117 for (cpu = 0; cpu < nr_cpu_ids;
118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
119 if (flow->stats[cpu])
120 kmem_cache_free(flow_stats_cache,
121 (struct sw_flow_stats __force *)flow->stats[cpu]);
124 kmem_cache_free(flow_cache, flow);
127 static void rcu_free_flow_callback(struct rcu_head *rcu)
129 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
134 void ovs_flow_free(struct sw_flow *flow, bool deferred)
140 call_rcu(&flow->rcu, rcu_free_flow_callback);
145 static void __table_instance_destroy(struct table_instance *ti)
151 static struct table_instance *table_instance_alloc(int new_size)
153 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
159 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
166 for (i = 0; i < new_size; i++)
167 INIT_HLIST_HEAD(&ti->buckets[i]);
169 ti->n_buckets = new_size;
171 get_random_bytes(&ti->hash_seed, sizeof(u32));
176 static void __mask_array_destroy(struct mask_array *ma)
178 free_percpu(ma->masks_usage_cntr);
182 static void mask_array_rcu_cb(struct rcu_head *rcu)
184 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
186 __mask_array_destroy(ma);
189 static void tbl_mask_array_reset_counters(struct mask_array *ma)
193 /* As the per CPU counters are not atomic we can not go ahead and
194 * reset them from another CPU. To be able to still have an approximate
195 * zero based counter we store the value at reset, and subtract it
196 * later when processing.
198 for (i = 0; i < ma->max; i++) {
199 ma->masks_usage_zero_cntr[i] = 0;
201 for_each_possible_cpu(cpu) {
202 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
208 start = u64_stats_fetch_begin_irq(&ma->syncp);
209 counter = usage_counters[i];
210 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
212 ma->masks_usage_zero_cntr[i] += counter;
217 static struct mask_array *tbl_mask_array_alloc(int size)
219 struct mask_array *new;
221 size = max(MASK_ARRAY_SIZE_MIN, size);
222 new = kzalloc(sizeof(struct mask_array) +
223 sizeof(struct sw_flow_mask *) * size +
224 sizeof(u64) * size, GFP_KERNEL);
228 new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
229 sizeof(struct mask_array) +
230 sizeof(struct sw_flow_mask *) *
233 new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
235 if (!new->masks_usage_cntr) {
246 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
248 struct mask_array *old;
249 struct mask_array *new;
251 new = tbl_mask_array_alloc(size);
255 old = ovsl_dereference(tbl->mask_array);
259 for (i = 0; i < old->max; i++) {
260 if (ovsl_dereference(old->masks[i]))
261 new->masks[new->count++] = old->masks[i];
263 call_rcu(&old->rcu, mask_array_rcu_cb);
266 rcu_assign_pointer(tbl->mask_array, new);
271 static int tbl_mask_array_add_mask(struct flow_table *tbl,
272 struct sw_flow_mask *new)
274 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
275 int err, ma_count = READ_ONCE(ma->count);
277 if (ma_count >= ma->max) {
278 err = tbl_mask_array_realloc(tbl, ma->max +
279 MASK_ARRAY_SIZE_MIN);
283 ma = ovsl_dereference(tbl->mask_array);
285 /* On every add or delete we need to reset the counters so
286 * every new mask gets a fair chance of being prioritized.
288 tbl_mask_array_reset_counters(ma);
291 BUG_ON(ovsl_dereference(ma->masks[ma_count]));
293 rcu_assign_pointer(ma->masks[ma_count], new);
294 WRITE_ONCE(ma->count, ma_count + 1);
299 static void tbl_mask_array_del_mask(struct flow_table *tbl,
300 struct sw_flow_mask *mask)
302 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
303 int i, ma_count = READ_ONCE(ma->count);
305 /* Remove the deleted mask pointers from the array */
306 for (i = 0; i < ma_count; i++) {
307 if (mask == ovsl_dereference(ma->masks[i]))
315 WRITE_ONCE(ma->count, ma_count - 1);
317 rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
318 RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
320 kfree_rcu(mask, rcu);
322 /* Shrink the mask array if necessary. */
323 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
324 ma_count <= (ma->max / 3))
325 tbl_mask_array_realloc(tbl, ma->max / 2);
327 tbl_mask_array_reset_counters(ma);
331 /* Remove 'mask' from the mask list, if it is not needed any more. */
332 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
335 /* ovs-lock is required to protect mask-refcount and
339 BUG_ON(!mask->ref_count);
342 if (!mask->ref_count)
343 tbl_mask_array_del_mask(tbl, mask);
347 static void __mask_cache_destroy(struct mask_cache *mc)
349 free_percpu(mc->mask_cache);
353 static void mask_cache_rcu_cb(struct rcu_head *rcu)
355 struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
357 __mask_cache_destroy(mc);
360 static struct mask_cache *tbl_mask_cache_alloc(u32 size)
362 struct mask_cache_entry __percpu *cache = NULL;
363 struct mask_cache *new;
365 /* Only allow size to be 0, or a power of 2, and does not exceed
366 * percpu allocation size.
368 if ((!is_power_of_2(size) && size != 0) ||
369 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
372 new = kzalloc(sizeof(*new), GFP_KERNEL);
376 new->cache_size = size;
377 if (new->cache_size > 0) {
378 cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
380 __alignof__(struct mask_cache_entry));
387 new->mask_cache = cache;
390 int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
392 struct mask_cache *mc = rcu_dereference(table->mask_cache);
393 struct mask_cache *new;
395 if (size == mc->cache_size)
398 if ((!is_power_of_2(size) && size != 0) ||
399 (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
402 new = tbl_mask_cache_alloc(size);
406 rcu_assign_pointer(table->mask_cache, new);
407 call_rcu(&mc->rcu, mask_cache_rcu_cb);
412 int ovs_flow_tbl_init(struct flow_table *table)
414 struct table_instance *ti, *ufid_ti;
415 struct mask_cache *mc;
416 struct mask_array *ma;
418 mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
422 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
424 goto free_mask_cache;
426 ti = table_instance_alloc(TBL_MIN_BUCKETS);
428 goto free_mask_array;
430 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
434 rcu_assign_pointer(table->ti, ti);
435 rcu_assign_pointer(table->ufid_ti, ufid_ti);
436 rcu_assign_pointer(table->mask_array, ma);
437 rcu_assign_pointer(table->mask_cache, mc);
438 table->last_rehash = jiffies;
440 table->ufid_count = 0;
444 __table_instance_destroy(ti);
446 __mask_array_destroy(ma);
448 __mask_cache_destroy(mc);
452 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
454 struct table_instance *ti;
456 ti = container_of(rcu, struct table_instance, rcu);
457 __table_instance_destroy(ti);
460 static void table_instance_flow_free(struct flow_table *table,
461 struct table_instance *ti,
462 struct table_instance *ufid_ti,
463 struct sw_flow *flow)
465 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
468 if (ovs_identifier_is_ufid(&flow->id)) {
469 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
473 flow_mask_remove(table, flow->mask);
476 /* Must be called with OVS mutex held. */
477 void table_instance_flow_flush(struct flow_table *table,
478 struct table_instance *ti,
479 struct table_instance *ufid_ti)
483 for (i = 0; i < ti->n_buckets; i++) {
484 struct hlist_head *head = &ti->buckets[i];
485 struct hlist_node *n;
486 struct sw_flow *flow;
488 hlist_for_each_entry_safe(flow, n, head,
489 flow_table.node[ti->node_ver]) {
491 table_instance_flow_free(table, ti, ufid_ti,
493 ovs_flow_free(flow, true);
497 if (WARN_ON(table->count != 0 ||
498 table->ufid_count != 0)) {
500 table->ufid_count = 0;
504 static void table_instance_destroy(struct table_instance *ti,
505 struct table_instance *ufid_ti)
507 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
508 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
511 /* No need for locking this function is called from RCU callback or
514 void ovs_flow_tbl_destroy(struct flow_table *table)
516 struct table_instance *ti = rcu_dereference_raw(table->ti);
517 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
518 struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
519 struct mask_array *ma = rcu_dereference_raw(table->mask_array);
521 call_rcu(&mc->rcu, mask_cache_rcu_cb);
522 call_rcu(&ma->rcu, mask_array_rcu_cb);
523 table_instance_destroy(ti, ufid_ti);
526 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
527 u32 *bucket, u32 *last)
529 struct sw_flow *flow;
530 struct hlist_head *head;
535 while (*bucket < ti->n_buckets) {
537 head = &ti->buckets[*bucket];
538 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
553 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
555 hash = jhash_1word(hash, ti->hash_seed);
556 return &ti->buckets[hash & (ti->n_buckets - 1)];
559 static void table_instance_insert(struct table_instance *ti,
560 struct sw_flow *flow)
562 struct hlist_head *head;
564 head = find_bucket(ti, flow->flow_table.hash);
565 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
568 static void ufid_table_instance_insert(struct table_instance *ti,
569 struct sw_flow *flow)
571 struct hlist_head *head;
573 head = find_bucket(ti, flow->ufid_table.hash);
574 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
577 static void flow_table_copy_flows(struct table_instance *old,
578 struct table_instance *new, bool ufid)
583 old_ver = old->node_ver;
584 new->node_ver = !old_ver;
586 /* Insert in new table. */
587 for (i = 0; i < old->n_buckets; i++) {
588 struct sw_flow *flow;
589 struct hlist_head *head = &old->buckets[i];
592 hlist_for_each_entry_rcu(flow, head,
593 ufid_table.node[old_ver],
594 lockdep_ovsl_is_held())
595 ufid_table_instance_insert(new, flow);
597 hlist_for_each_entry_rcu(flow, head,
598 flow_table.node[old_ver],
599 lockdep_ovsl_is_held())
600 table_instance_insert(new, flow);
604 static struct table_instance *table_instance_rehash(struct table_instance *ti,
605 int n_buckets, bool ufid)
607 struct table_instance *new_ti;
609 new_ti = table_instance_alloc(n_buckets);
613 flow_table_copy_flows(ti, new_ti, ufid);
618 int ovs_flow_tbl_flush(struct flow_table *flow_table)
620 struct table_instance *old_ti, *new_ti;
621 struct table_instance *old_ufid_ti, *new_ufid_ti;
623 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
626 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
630 old_ti = ovsl_dereference(flow_table->ti);
631 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
633 rcu_assign_pointer(flow_table->ti, new_ti);
634 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
635 flow_table->last_rehash = jiffies;
637 table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
638 table_instance_destroy(old_ti, old_ufid_ti);
642 __table_instance_destroy(new_ti);
646 static u32 flow_hash(const struct sw_flow_key *key,
647 const struct sw_flow_key_range *range)
649 const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
651 /* Make sure number of hash bytes are multiple of u32. */
652 int hash_u32s = range_n_bytes(range) >> 2;
654 return jhash2(hash_key, hash_u32s, 0);
657 static int flow_key_start(const struct sw_flow_key *key)
662 return rounddown(offsetof(struct sw_flow_key, phy),
666 static bool cmp_key(const struct sw_flow_key *key1,
667 const struct sw_flow_key *key2,
668 int key_start, int key_end)
670 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
671 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
675 for (i = key_start; i < key_end; i += sizeof(long))
676 diffs |= *cp1++ ^ *cp2++;
681 static bool flow_cmp_masked_key(const struct sw_flow *flow,
682 const struct sw_flow_key *key,
683 const struct sw_flow_key_range *range)
685 return cmp_key(&flow->key, key, range->start, range->end);
688 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
689 const struct sw_flow_match *match)
691 struct sw_flow_key *key = match->key;
692 int key_start = flow_key_start(key);
693 int key_end = match->range.end;
695 BUG_ON(ovs_identifier_is_ufid(&flow->id));
696 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
699 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
700 const struct sw_flow_key *unmasked,
701 const struct sw_flow_mask *mask,
704 struct sw_flow *flow;
705 struct hlist_head *head;
707 struct sw_flow_key masked_key;
709 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
710 hash = flow_hash(&masked_key, &mask->range);
711 head = find_bucket(ti, hash);
714 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
715 lockdep_ovsl_is_held()) {
716 if (flow->mask == mask && flow->flow_table.hash == hash &&
717 flow_cmp_masked_key(flow, &masked_key, &mask->range))
723 /* Flow lookup does full lookup on flow table. It starts with
724 * mask from index passed in *index.
726 static struct sw_flow *flow_lookup(struct flow_table *tbl,
727 struct table_instance *ti,
728 struct mask_array *ma,
729 const struct sw_flow_key *key,
734 u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
735 struct sw_flow *flow;
736 struct sw_flow_mask *mask;
739 if (likely(*index < ma->max)) {
740 mask = rcu_dereference_ovsl(ma->masks[*index]);
742 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
744 u64_stats_update_begin(&ma->syncp);
745 usage_counters[*index]++;
746 u64_stats_update_end(&ma->syncp);
753 for (i = 0; i < ma->max; i++) {
758 mask = rcu_dereference_ovsl(ma->masks[i]);
762 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
763 if (flow) { /* Found */
765 u64_stats_update_begin(&ma->syncp);
766 usage_counters[*index]++;
767 u64_stats_update_end(&ma->syncp);
776 * mask_cache maps flow to probable mask. This cache is not tightly
777 * coupled cache, It means updates to mask list can result in inconsistent
778 * cache entry in mask cache.
779 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
780 * In case of a hash collision the entry is hashed in next segment.
782 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
783 const struct sw_flow_key *key,
788 struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
789 struct mask_array *ma = rcu_dereference(tbl->mask_array);
790 struct table_instance *ti = rcu_dereference(tbl->ti);
791 struct mask_cache_entry *entries, *ce;
792 struct sw_flow *flow;
798 if (unlikely(!skb_hash || mc->cache_size == 0)) {
802 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
806 /* Pre and post recirulation flows usually have the same skb_hash
807 * value. To avoid hash collisions, rehash the 'skb_hash' with
810 skb_hash = jhash_1word(skb_hash, key->recirc_id);
814 entries = this_cpu_ptr(mc->mask_cache);
816 /* Find the cache entry 'ce' to operate on. */
817 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
818 int index = hash & (mc->cache_size - 1);
819 struct mask_cache_entry *e;
822 if (e->skb_hash == skb_hash) {
823 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
824 n_cache_hit, &e->mask_index);
830 if (!ce || e->skb_hash < ce->skb_hash)
831 ce = e; /* A better replacement cache candidate. */
833 hash >>= MC_HASH_SHIFT;
836 /* Cache miss, do full lookup. */
837 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
840 ce->skb_hash = skb_hash;
846 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
847 const struct sw_flow_key *key)
849 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
850 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
851 u32 __always_unused n_mask_hit;
852 u32 __always_unused n_cache_hit;
855 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
858 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
859 const struct sw_flow_match *match)
861 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
864 /* Always called under ovs-mutex. */
865 for (i = 0; i < ma->max; i++) {
866 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
867 u32 __always_unused n_mask_hit;
868 struct sw_flow_mask *mask;
869 struct sw_flow *flow;
871 mask = ovsl_dereference(ma->masks[i]);
875 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
876 if (flow && ovs_identifier_is_key(&flow->id) &&
877 ovs_flow_cmp_unmasked_key(flow, match)) {
885 static u32 ufid_hash(const struct sw_flow_id *sfid)
887 return jhash(sfid->ufid, sfid->ufid_len, 0);
890 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
891 const struct sw_flow_id *sfid)
893 if (flow->id.ufid_len != sfid->ufid_len)
896 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
899 bool ovs_flow_cmp(const struct sw_flow *flow,
900 const struct sw_flow_match *match)
902 if (ovs_identifier_is_ufid(&flow->id))
903 return flow_cmp_masked_key(flow, match->key, &match->range);
905 return ovs_flow_cmp_unmasked_key(flow, match);
908 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
909 const struct sw_flow_id *ufid)
911 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
912 struct sw_flow *flow;
913 struct hlist_head *head;
916 hash = ufid_hash(ufid);
917 head = find_bucket(ti, hash);
918 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
919 lockdep_ovsl_is_held()) {
920 if (flow->ufid_table.hash == hash &&
921 ovs_flow_cmp_ufid(flow, ufid))
927 int ovs_flow_tbl_num_masks(const struct flow_table *table)
929 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
930 return READ_ONCE(ma->count);
933 u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
935 struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
937 return READ_ONCE(mc->cache_size);
940 static struct table_instance *table_instance_expand(struct table_instance *ti,
943 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
946 /* Must be called with OVS mutex held. */
947 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
949 struct table_instance *ti = ovsl_dereference(table->ti);
950 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
952 BUG_ON(table->count == 0);
953 table_instance_flow_free(table, ti, ufid_ti, flow);
956 static struct sw_flow_mask *mask_alloc(void)
958 struct sw_flow_mask *mask;
960 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
967 static bool mask_equal(const struct sw_flow_mask *a,
968 const struct sw_flow_mask *b)
970 const u8 *a_ = (const u8 *)&a->key + a->range.start;
971 const u8 *b_ = (const u8 *)&b->key + b->range.start;
973 return (a->range.end == b->range.end)
974 && (a->range.start == b->range.start)
975 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
978 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
979 const struct sw_flow_mask *mask)
981 struct mask_array *ma;
984 ma = ovsl_dereference(tbl->mask_array);
985 for (i = 0; i < ma->max; i++) {
986 struct sw_flow_mask *t;
987 t = ovsl_dereference(ma->masks[i]);
989 if (t && mask_equal(mask, t))
996 /* Add 'mask' into the mask list, if it is not already there. */
997 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
998 const struct sw_flow_mask *new)
1000 struct sw_flow_mask *mask;
1002 mask = flow_mask_find(tbl, new);
1004 /* Allocate a new mask if none exsits. */
1005 mask = mask_alloc();
1008 mask->key = new->key;
1009 mask->range = new->range;
1011 /* Add mask to mask-list. */
1012 if (tbl_mask_array_add_mask(tbl, mask)) {
1017 BUG_ON(!mask->ref_count);
1025 /* Must be called with OVS mutex held. */
1026 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
1028 struct table_instance *new_ti = NULL;
1029 struct table_instance *ti;
1031 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
1032 ti = ovsl_dereference(table->ti);
1033 table_instance_insert(ti, flow);
1036 /* Expand table, if necessary, to make room. */
1037 if (table->count > ti->n_buckets)
1038 new_ti = table_instance_expand(ti, false);
1039 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
1040 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
1043 rcu_assign_pointer(table->ti, new_ti);
1044 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1045 table->last_rehash = jiffies;
1049 /* Must be called with OVS mutex held. */
1050 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
1052 struct table_instance *ti;
1054 flow->ufid_table.hash = ufid_hash(&flow->id);
1055 ti = ovsl_dereference(table->ufid_ti);
1056 ufid_table_instance_insert(ti, flow);
1057 table->ufid_count++;
1059 /* Expand table, if necessary, to make room. */
1060 if (table->ufid_count > ti->n_buckets) {
1061 struct table_instance *new_ti;
1063 new_ti = table_instance_expand(ti, true);
1065 rcu_assign_pointer(table->ufid_ti, new_ti);
1066 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
1071 /* Must be called with OVS mutex held. */
1072 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
1073 const struct sw_flow_mask *mask)
1077 err = flow_mask_insert(table, flow, mask);
1080 flow_key_insert(table, flow);
1081 if (ovs_identifier_is_ufid(&flow->id))
1082 flow_ufid_insert(table, flow);
1087 static int compare_mask_and_count(const void *a, const void *b)
1089 const struct mask_count *mc_a = a;
1090 const struct mask_count *mc_b = b;
1092 return (s64)mc_b->counter - (s64)mc_a->counter;
1095 /* Must be called with OVS mutex held. */
1096 void ovs_flow_masks_rebalance(struct flow_table *table)
1098 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
1099 struct mask_count *masks_and_count;
1100 struct mask_array *new;
1101 int masks_entries = 0;
1104 /* Build array of all current entries with use counters. */
1105 masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
1107 if (!masks_and_count)
1110 for (i = 0; i < ma->max; i++) {
1111 struct sw_flow_mask *mask;
1115 mask = rcu_dereference_ovsl(ma->masks[i]);
1116 if (unlikely(!mask))
1119 masks_and_count[i].index = i;
1120 masks_and_count[i].counter = 0;
1122 for_each_possible_cpu(cpu) {
1123 u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
1128 start = u64_stats_fetch_begin_irq(&ma->syncp);
1129 counter = usage_counters[i];
1130 } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
1132 masks_and_count[i].counter += counter;
1135 /* Subtract the zero count value. */
1136 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
1138 /* Rather than calling tbl_mask_array_reset_counters()
1139 * below when no change is needed, do it inline here.
1141 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
1145 goto free_mask_entries;
1147 /* Sort the entries */
1149 sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
1150 compare_mask_and_count, NULL);
1152 /* If the order is the same, nothing to do... */
1153 for (i = 0; i < masks_entries; i++) {
1154 if (i != masks_and_count[i].index)
1157 if (i == masks_entries)
1158 goto free_mask_entries;
1160 /* Rebuilt the new list in order of usage. */
1161 new = tbl_mask_array_alloc(ma->max);
1163 goto free_mask_entries;
1165 for (i = 0; i < masks_entries; i++) {
1166 int index = masks_and_count[i].index;
1168 if (ovsl_dereference(ma->masks[index]))
1169 new->masks[new->count++] = ma->masks[index];
1172 rcu_assign_pointer(table->mask_array, new);
1173 call_rcu(&ma->rcu, mask_array_rcu_cb);
1176 kfree(masks_and_count);
1179 /* Initializes the flow module.
1180 * Returns zero if successful or a negative error code. */
1181 int ovs_flow_init(void)
1183 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
1184 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
1186 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
1188 * sizeof(struct sw_flow_stats *)),
1190 if (flow_cache == NULL)
1194 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
1195 0, SLAB_HWCACHE_ALIGN, NULL);
1196 if (flow_stats_cache == NULL) {
1197 kmem_cache_destroy(flow_cache);
1205 /* Uninitializes the flow module. */
1206 void ovs_flow_exit(void)
1208 kmem_cache_destroy(flow_stats_cache);
1209 kmem_cache_destroy(flow_cache);