1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
8 #include "flow_netlink.h"
9 #include <linux/uaccess.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_vlan.h>
14 #include <net/llc_pdu.h>
15 #include <linux/kernel.h>
16 #include <linux/jhash.h>
17 #include <linux/jiffies.h>
18 #include <linux/llc.h>
19 #include <linux/module.h>
21 #include <linux/rcupdate.h>
22 #include <linux/cpumask.h>
23 #include <linux/if_arp.h>
25 #include <linux/ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/icmp.h>
30 #include <linux/icmpv6.h>
31 #include <linux/rculist.h>
34 #include <net/ndisc.h>
36 #define TBL_MIN_BUCKETS 1024
37 #define MASK_ARRAY_SIZE_MIN 16
38 #define REHASH_INTERVAL (10 * 60 * HZ)
40 #define MC_HASH_SHIFT 8
41 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
42 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
44 static struct kmem_cache *flow_cache;
45 struct kmem_cache *flow_stats_cache __read_mostly;
47 static u16 range_n_bytes(const struct sw_flow_key_range *range)
49 return range->end - range->start;
52 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
53 bool full, const struct sw_flow_mask *mask)
55 int start = full ? 0 : mask->range.start;
56 int len = full ? sizeof *dst : range_n_bytes(&mask->range);
57 const long *m = (const long *)((const u8 *)&mask->key + start);
58 const long *s = (const long *)((const u8 *)src + start);
59 long *d = (long *)((u8 *)dst + start);
62 /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
63 * if 'full' is false the memory outside of the 'mask->range' is left
64 * uninitialized. This can be used as an optimization when further
65 * operations on 'dst' only use contents within 'mask->range'.
67 for (i = 0; i < len; i += sizeof(long))
71 struct sw_flow *ovs_flow_alloc(void)
74 struct sw_flow_stats *stats;
76 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
78 return ERR_PTR(-ENOMEM);
80 flow->stats_last_writer = -1;
82 /* Initialize the default stat node. */
83 stats = kmem_cache_alloc_node(flow_stats_cache,
84 GFP_KERNEL | __GFP_ZERO,
85 node_online(0) ? 0 : NUMA_NO_NODE);
89 spin_lock_init(&stats->lock);
91 RCU_INIT_POINTER(flow->stats[0], stats);
93 cpumask_set_cpu(0, &flow->cpu_used_mask);
97 kmem_cache_free(flow_cache, flow);
98 return ERR_PTR(-ENOMEM);
101 int ovs_flow_tbl_count(const struct flow_table *table)
106 static void flow_free(struct sw_flow *flow)
110 if (ovs_identifier_is_key(&flow->id))
111 kfree(flow->id.unmasked_key);
113 ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
114 /* We open code this to make sure cpu 0 is always considered */
115 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
116 if (flow->stats[cpu])
117 kmem_cache_free(flow_stats_cache,
118 (struct sw_flow_stats __force *)flow->stats[cpu]);
119 kmem_cache_free(flow_cache, flow);
122 static void rcu_free_flow_callback(struct rcu_head *rcu)
124 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
129 void ovs_flow_free(struct sw_flow *flow, bool deferred)
135 call_rcu(&flow->rcu, rcu_free_flow_callback);
140 static void __table_instance_destroy(struct table_instance *ti)
146 static struct table_instance *table_instance_alloc(int new_size)
148 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
154 ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
161 for (i = 0; i < new_size; i++)
162 INIT_HLIST_HEAD(&ti->buckets[i]);
164 ti->n_buckets = new_size;
166 ti->keep_flows = false;
167 get_random_bytes(&ti->hash_seed, sizeof(u32));
172 static struct mask_array *tbl_mask_array_alloc(int size)
174 struct mask_array *new;
176 size = max(MASK_ARRAY_SIZE_MIN, size);
177 new = kzalloc(sizeof(struct mask_array) +
178 sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
188 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
190 struct mask_array *old;
191 struct mask_array *new;
193 new = tbl_mask_array_alloc(size);
197 old = ovsl_dereference(tbl->mask_array);
201 for (i = 0; i < old->max; i++) {
202 if (ovsl_dereference(old->masks[i]))
203 new->masks[new->count++] = old->masks[i];
207 rcu_assign_pointer(tbl->mask_array, new);
213 int ovs_flow_tbl_init(struct flow_table *table)
215 struct table_instance *ti, *ufid_ti;
216 struct mask_array *ma;
218 table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
220 __alignof__(struct mask_cache_entry));
221 if (!table->mask_cache)
224 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
226 goto free_mask_cache;
228 ti = table_instance_alloc(TBL_MIN_BUCKETS);
230 goto free_mask_array;
232 ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
236 rcu_assign_pointer(table->ti, ti);
237 rcu_assign_pointer(table->ufid_ti, ufid_ti);
238 rcu_assign_pointer(table->mask_array, ma);
239 table->last_rehash = jiffies;
241 table->ufid_count = 0;
245 __table_instance_destroy(ti);
249 free_percpu(table->mask_cache);
253 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
255 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
257 __table_instance_destroy(ti);
260 static void table_instance_destroy(struct table_instance *ti,
261 struct table_instance *ufid_ti,
273 for (i = 0; i < ti->n_buckets; i++) {
274 struct sw_flow *flow;
275 struct hlist_head *head = &ti->buckets[i];
276 struct hlist_node *n;
277 int ver = ti->node_ver;
278 int ufid_ver = ufid_ti->node_ver;
280 hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) {
281 hlist_del_rcu(&flow->flow_table.node[ver]);
282 if (ovs_identifier_is_ufid(&flow->id))
283 hlist_del_rcu(&flow->ufid_table.node[ufid_ver]);
284 ovs_flow_free(flow, deferred);
290 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
291 call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
293 __table_instance_destroy(ti);
294 __table_instance_destroy(ufid_ti);
298 /* No need for locking this function is called from RCU callback or
301 void ovs_flow_tbl_destroy(struct flow_table *table)
303 struct table_instance *ti = rcu_dereference_raw(table->ti);
304 struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
306 free_percpu(table->mask_cache);
307 kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
308 table_instance_destroy(ti, ufid_ti, false);
311 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
312 u32 *bucket, u32 *last)
314 struct sw_flow *flow;
315 struct hlist_head *head;
320 while (*bucket < ti->n_buckets) {
322 head = &ti->buckets[*bucket];
323 hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
338 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
340 hash = jhash_1word(hash, ti->hash_seed);
341 return &ti->buckets[hash & (ti->n_buckets - 1)];
344 static void table_instance_insert(struct table_instance *ti,
345 struct sw_flow *flow)
347 struct hlist_head *head;
349 head = find_bucket(ti, flow->flow_table.hash);
350 hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
353 static void ufid_table_instance_insert(struct table_instance *ti,
354 struct sw_flow *flow)
356 struct hlist_head *head;
358 head = find_bucket(ti, flow->ufid_table.hash);
359 hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
362 static void flow_table_copy_flows(struct table_instance *old,
363 struct table_instance *new, bool ufid)
368 old_ver = old->node_ver;
369 new->node_ver = !old_ver;
371 /* Insert in new table. */
372 for (i = 0; i < old->n_buckets; i++) {
373 struct sw_flow *flow;
374 struct hlist_head *head = &old->buckets[i];
377 hlist_for_each_entry(flow, head,
378 ufid_table.node[old_ver])
379 ufid_table_instance_insert(new, flow);
381 hlist_for_each_entry(flow, head,
382 flow_table.node[old_ver])
383 table_instance_insert(new, flow);
386 old->keep_flows = true;
389 static struct table_instance *table_instance_rehash(struct table_instance *ti,
390 int n_buckets, bool ufid)
392 struct table_instance *new_ti;
394 new_ti = table_instance_alloc(n_buckets);
398 flow_table_copy_flows(ti, new_ti, ufid);
403 int ovs_flow_tbl_flush(struct flow_table *flow_table)
405 struct table_instance *old_ti, *new_ti;
406 struct table_instance *old_ufid_ti, *new_ufid_ti;
408 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
411 new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
415 old_ti = ovsl_dereference(flow_table->ti);
416 old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
418 rcu_assign_pointer(flow_table->ti, new_ti);
419 rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
420 flow_table->last_rehash = jiffies;
421 flow_table->count = 0;
422 flow_table->ufid_count = 0;
424 table_instance_destroy(old_ti, old_ufid_ti, true);
428 __table_instance_destroy(new_ti);
432 static u32 flow_hash(const struct sw_flow_key *key,
433 const struct sw_flow_key_range *range)
435 int key_start = range->start;
436 int key_end = range->end;
437 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
438 int hash_u32s = (key_end - key_start) >> 2;
440 /* Make sure number of hash bytes are multiple of u32. */
441 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
443 return jhash2(hash_key, hash_u32s, 0);
446 static int flow_key_start(const struct sw_flow_key *key)
451 return rounddown(offsetof(struct sw_flow_key, phy),
455 static bool cmp_key(const struct sw_flow_key *key1,
456 const struct sw_flow_key *key2,
457 int key_start, int key_end)
459 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
460 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
464 for (i = key_start; i < key_end; i += sizeof(long))
465 diffs |= *cp1++ ^ *cp2++;
470 static bool flow_cmp_masked_key(const struct sw_flow *flow,
471 const struct sw_flow_key *key,
472 const struct sw_flow_key_range *range)
474 return cmp_key(&flow->key, key, range->start, range->end);
477 static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
478 const struct sw_flow_match *match)
480 struct sw_flow_key *key = match->key;
481 int key_start = flow_key_start(key);
482 int key_end = match->range.end;
484 BUG_ON(ovs_identifier_is_ufid(&flow->id));
485 return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
488 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
489 const struct sw_flow_key *unmasked,
490 const struct sw_flow_mask *mask,
493 struct sw_flow *flow;
494 struct hlist_head *head;
496 struct sw_flow_key masked_key;
498 ovs_flow_mask_key(&masked_key, unmasked, false, mask);
499 hash = flow_hash(&masked_key, &mask->range);
500 head = find_bucket(ti, hash);
503 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
504 if (flow->mask == mask && flow->flow_table.hash == hash &&
505 flow_cmp_masked_key(flow, &masked_key, &mask->range))
511 static struct sw_flow *flow_lookup(struct flow_table *tbl,
512 struct table_instance *ti,
513 struct mask_array *ma,
514 const struct sw_flow_key *key,
518 struct sw_flow *flow;
521 for (i = 0; i < ma->max; i++) {
522 struct sw_flow_mask *mask;
524 mask = rcu_dereference_ovsl(ma->masks[i]);
526 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
527 if (flow) { /* Found */
538 * mask_cache maps flow to probable mask. This cache is not tightly
539 * coupled cache, It means updates to mask list can result in inconsistent
540 * cache entry in mask cache.
541 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
542 * In case of a hash collision the entry is hashed in next segment.
544 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
545 const struct sw_flow_key *key,
549 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
550 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
551 struct mask_cache_entry *entries, *ce, *del;
552 struct sw_flow *flow;
557 if (unlikely(!skb_hash)) {
558 u32 __always_unused mask_index;
560 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
564 entries = this_cpu_ptr(tbl->mask_cache);
566 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
569 index = hash & (MC_HASH_ENTRIES - 1);
570 ce = &entries[index];
572 if (ce->skb_hash == skb_hash) {
573 struct sw_flow_mask *mask;
574 struct sw_flow *flow;
576 mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]);
578 flow = masked_flow_lookup(ti, key, mask,
580 if (flow) /* Found */
588 if (!del || (del->skb_hash && !ce->skb_hash) ||
589 (rcu_dereference_ovsl(ma->masks[del->mask_index]) &&
590 !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) {
594 hash >>= MC_HASH_SHIFT;
597 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index);
600 del->skb_hash = skb_hash;
605 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
606 const struct sw_flow_key *key)
608 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
609 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
611 u32 __always_unused n_mask_hit;
612 u32 __always_unused index;
614 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
617 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
618 const struct sw_flow_match *match)
620 struct mask_array *ma = ovsl_dereference(tbl->mask_array);
623 /* Always called under ovs-mutex. */
624 for (i = 0; i < ma->max; i++) {
625 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
626 u32 __always_unused n_mask_hit;
627 struct sw_flow_mask *mask;
628 struct sw_flow *flow;
630 mask = ovsl_dereference(ma->masks[i]);
634 flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
635 if (flow && ovs_identifier_is_key(&flow->id) &&
636 ovs_flow_cmp_unmasked_key(flow, match)) {
644 static u32 ufid_hash(const struct sw_flow_id *sfid)
646 return jhash(sfid->ufid, sfid->ufid_len, 0);
649 static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
650 const struct sw_flow_id *sfid)
652 if (flow->id.ufid_len != sfid->ufid_len)
655 return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
658 bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
660 if (ovs_identifier_is_ufid(&flow->id))
661 return flow_cmp_masked_key(flow, match->key, &match->range);
663 return ovs_flow_cmp_unmasked_key(flow, match);
666 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
667 const struct sw_flow_id *ufid)
669 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
670 struct sw_flow *flow;
671 struct hlist_head *head;
674 hash = ufid_hash(ufid);
675 head = find_bucket(ti, hash);
676 hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
677 if (flow->ufid_table.hash == hash &&
678 ovs_flow_cmp_ufid(flow, ufid))
684 int ovs_flow_tbl_num_masks(const struct flow_table *table)
686 struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
691 static struct table_instance *table_instance_expand(struct table_instance *ti,
694 return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
697 static void tbl_mask_array_delete_mask(struct mask_array *ma,
698 struct sw_flow_mask *mask)
702 /* Remove the deleted mask pointers from the array */
703 for (i = 0; i < ma->max; i++) {
704 if (mask == ovsl_dereference(ma->masks[i])) {
705 RCU_INIT_POINTER(ma->masks[i], NULL);
707 kfree_rcu(mask, rcu);
714 /* Remove 'mask' from the mask list, if it is not needed any more. */
715 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
718 /* ovs-lock is required to protect mask-refcount and
722 BUG_ON(!mask->ref_count);
725 if (!mask->ref_count) {
726 struct mask_array *ma;
728 ma = ovsl_dereference(tbl->mask_array);
729 tbl_mask_array_delete_mask(ma, mask);
731 /* Shrink the mask array if necessary. */
732 if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
733 ma->count <= (ma->max / 3))
734 tbl_mask_array_realloc(tbl, ma->max / 2);
739 /* Must be called with OVS mutex held. */
740 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
742 struct table_instance *ti = ovsl_dereference(table->ti);
743 struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
745 BUG_ON(table->count == 0);
746 hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
748 if (ovs_identifier_is_ufid(&flow->id)) {
749 hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
753 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
754 * accessible as long as the RCU read lock is held.
756 flow_mask_remove(table, flow->mask);
759 static struct sw_flow_mask *mask_alloc(void)
761 struct sw_flow_mask *mask;
763 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
770 static bool mask_equal(const struct sw_flow_mask *a,
771 const struct sw_flow_mask *b)
773 const u8 *a_ = (const u8 *)&a->key + a->range.start;
774 const u8 *b_ = (const u8 *)&b->key + b->range.start;
776 return (a->range.end == b->range.end)
777 && (a->range.start == b->range.start)
778 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
781 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
782 const struct sw_flow_mask *mask)
784 struct mask_array *ma;
787 ma = ovsl_dereference(tbl->mask_array);
788 for (i = 0; i < ma->max; i++) {
789 struct sw_flow_mask *t;
790 t = ovsl_dereference(ma->masks[i]);
792 if (t && mask_equal(mask, t))
799 /* Add 'mask' into the mask list, if it is not already there. */
800 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
801 const struct sw_flow_mask *new)
803 struct sw_flow_mask *mask;
805 mask = flow_mask_find(tbl, new);
807 struct mask_array *ma;
810 /* Allocate a new mask if none exsits. */
814 mask->key = new->key;
815 mask->range = new->range;
817 /* Add mask to mask-list. */
818 ma = ovsl_dereference(tbl->mask_array);
819 if (ma->count >= ma->max) {
822 err = tbl_mask_array_realloc(tbl, ma->max +
823 MASK_ARRAY_SIZE_MIN);
829 ma = ovsl_dereference(tbl->mask_array);
832 for (i = 0; i < ma->max; i++) {
833 const struct sw_flow_mask *t;
835 t = ovsl_dereference(ma->masks[i]);
837 rcu_assign_pointer(ma->masks[i], mask);
843 BUG_ON(!mask->ref_count);
851 /* Must be called with OVS mutex held. */
852 static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
854 struct table_instance *new_ti = NULL;
855 struct table_instance *ti;
857 flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
858 ti = ovsl_dereference(table->ti);
859 table_instance_insert(ti, flow);
862 /* Expand table, if necessary, to make room. */
863 if (table->count > ti->n_buckets)
864 new_ti = table_instance_expand(ti, false);
865 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
866 new_ti = table_instance_rehash(ti, ti->n_buckets, false);
869 rcu_assign_pointer(table->ti, new_ti);
870 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
871 table->last_rehash = jiffies;
875 /* Must be called with OVS mutex held. */
876 static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
878 struct table_instance *ti;
880 flow->ufid_table.hash = ufid_hash(&flow->id);
881 ti = ovsl_dereference(table->ufid_ti);
882 ufid_table_instance_insert(ti, flow);
885 /* Expand table, if necessary, to make room. */
886 if (table->ufid_count > ti->n_buckets) {
887 struct table_instance *new_ti;
889 new_ti = table_instance_expand(ti, true);
891 rcu_assign_pointer(table->ufid_ti, new_ti);
892 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
897 /* Must be called with OVS mutex held. */
898 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
899 const struct sw_flow_mask *mask)
903 err = flow_mask_insert(table, flow, mask);
906 flow_key_insert(table, flow);
907 if (ovs_identifier_is_ufid(&flow->id))
908 flow_ufid_insert(table, flow);
913 /* Initializes the flow module.
914 * Returns zero if successful or a negative error code. */
915 int ovs_flow_init(void)
917 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
918 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
920 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
922 * sizeof(struct sw_flow_stats *)),
924 if (flow_cache == NULL)
928 = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
929 0, SLAB_HWCACHE_ALIGN, NULL);
930 if (flow_stats_cache == NULL) {
931 kmem_cache_destroy(flow_cache);
939 /* Uninitializes the flow module. */
940 void ovs_flow_exit(void)
942 kmem_cache_destroy(flow_stats_cache);
943 kmem_cache_destroy(flow_cache);