Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[linux-2.6-microblaze.git] / net / openvswitch / flow_table.c
index e223584..87c286a 100644 (file)
@@ -111,12 +111,16 @@ static void flow_free(struct sw_flow *flow)
        if (ovs_identifier_is_key(&flow->id))
                kfree(flow->id.unmasked_key);
        if (flow->sf_acts)
-               ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
+               ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
+                                         flow->sf_acts);
        /* We open code this to make sure cpu 0 is always considered */
-       for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
+       for (cpu = 0; cpu < nr_cpu_ids;
+            cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
                if (flow->stats[cpu])
                        kmem_cache_free(flow_stats_cache,
                                        (struct sw_flow_stats __force *)flow->stats[cpu]);
+       }
+
        kmem_cache_free(flow_cache, flow);
 }
 
@@ -164,7 +168,6 @@ static struct table_instance *table_instance_alloc(int new_size)
 
        ti->n_buckets = new_size;
        ti->node_ver = 0;
-       ti->keep_flows = false;
        get_random_bytes(&ti->hash_seed, sizeof(u32));
 
        return ti;
@@ -192,7 +195,7 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
         * zero based counter we store the value at reset, and subtract it
         * later when processing.
         */
-       for (i = 0; i < ma->max; i++)  {
+       for (i = 0; i < ma->max; i++) {
                ma->masks_usage_zero_cntr[i] = 0;
 
                for_each_possible_cpu(cpu) {
@@ -273,7 +276,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
 
        if (ma_count >= ma->max) {
                err = tbl_mask_array_realloc(tbl, ma->max +
-                                             MASK_ARRAY_SIZE_MIN);
+                                                 MASK_ARRAY_SIZE_MIN);
                if (err)
                        return err;
 
@@ -288,7 +291,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
        BUG_ON(ovsl_dereference(ma->masks[ma_count]));
 
        rcu_assign_pointer(ma->masks[ma_count], new);
-       WRITE_ONCE(ma->count, ma_count +1);
+       WRITE_ONCE(ma->count, ma_count + 1);
 
        return 0;
 }
@@ -309,10 +312,10 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl,
        return;
 
 found:
-       WRITE_ONCE(ma->count, ma_count -1);
+       WRITE_ONCE(ma->count, ma_count - 1);
 
-       rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
-       RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
+       rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
+       RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
 
        kfree_rcu(mask, rcu);
 
@@ -448,26 +451,23 @@ free_mask_cache:
 
 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
 {
-       struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
+       struct table_instance *ti;
 
+       ti = container_of(rcu, struct table_instance, rcu);
        __table_instance_destroy(ti);
 }
 
 static void table_instance_flow_free(struct flow_table *table,
-                                 struct table_instance *ti,
-                                 struct table_instance *ufid_ti,
-                                 struct sw_flow *flow,
-                                 bool count)
+                                    struct table_instance *ti,
+                                    struct table_instance *ufid_ti,
+                                    struct sw_flow *flow)
 {
        hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
-       if (count)
-               table->count--;
+       table->count--;
 
        if (ovs_identifier_is_ufid(&flow->id)) {
                hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
-
-               if (count)
-                       table->ufid_count--;
+               table->ufid_count--;
        }
 
        flow_mask_remove(table, flow->mask);
@@ -480,22 +480,25 @@ void table_instance_flow_flush(struct flow_table *table,
 {
        int i;
 
-       if (ti->keep_flows)
-               return;
-
        for (i = 0; i < ti->n_buckets; i++) {
-               struct sw_flow *flow;
                struct hlist_head *head = &ti->buckets[i];
                struct hlist_node *n;
+               struct sw_flow *flow;
 
                hlist_for_each_entry_safe(flow, n, head,
                                          flow_table.node[ti->node_ver]) {
 
                        table_instance_flow_free(table, ti, ufid_ti,
-                                                flow, false);
+                                                flow);
                        ovs_flow_free(flow, true);
                }
        }
+
+       if (WARN_ON(table->count != 0 ||
+                   table->ufid_count != 0)) {
+               table->count = 0;
+               table->ufid_count = 0;
+       }
 }
 
 static void table_instance_destroy(struct table_instance *ti,
@@ -596,8 +599,6 @@ static void flow_table_copy_flows(struct table_instance *old,
                                                 lockdep_ovsl_is_held())
                                table_instance_insert(new, flow);
        }
-
-       old->keep_flows = true;
 }
 
 static struct table_instance *table_instance_rehash(struct table_instance *ti,
@@ -632,8 +633,6 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
        rcu_assign_pointer(flow_table->ti, new_ti);
        rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
        flow_table->last_rehash = jiffies;
-       flow_table->count = 0;
-       flow_table->ufid_count = 0;
 
        table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
        table_instance_destroy(old_ti, old_ufid_ti);
@@ -661,7 +660,7 @@ static int flow_key_start(const struct sw_flow_key *key)
                return 0;
        else
                return rounddown(offsetof(struct sw_flow_key, phy),
-                                         sizeof(long));
+                                sizeof(long));
 }
 
 static bool cmp_key(const struct sw_flow_key *key1,
@@ -673,7 +672,7 @@ static bool cmp_key(const struct sw_flow_key *key1,
        long diffs = 0;
        int i;
 
-       for (i = key_start; i < key_end;  i += sizeof(long))
+       for (i = key_start; i < key_end; i += sizeof(long))
                diffs |= *cp1++ ^ *cp2++;
 
        return diffs == 0;
@@ -713,7 +712,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
        (*n_mask_hit)++;
 
        hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
-                               lockdep_ovsl_is_held()) {
+                                lockdep_ovsl_is_held()) {
                if (flow->mask == mask && flow->flow_table.hash == hash &&
                    flow_cmp_masked_key(flow, &masked_key, &mask->range))
                        return flow;
@@ -897,7 +896,8 @@ static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
        return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
 }
 
-bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
+bool ovs_flow_cmp(const struct sw_flow *flow,
+                 const struct sw_flow_match *match)
 {
        if (ovs_identifier_is_ufid(&flow->id))
                return flow_cmp_masked_key(flow, match->key, &match->range);
@@ -916,7 +916,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
        hash = ufid_hash(ufid);
        head = find_bucket(ti, hash);
        hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
-                               lockdep_ovsl_is_held()) {
+                                lockdep_ovsl_is_held()) {
                if (flow->ufid_table.hash == hash &&
                    ovs_flow_cmp_ufid(flow, ufid))
                        return flow;
@@ -950,7 +950,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
        struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
 
        BUG_ON(table->count == 0);
-       table_instance_flow_free(table, ti, ufid_ti, flow, true);
+       table_instance_flow_free(table, ti, ufid_ti, flow);
 }
 
 static struct sw_flow_mask *mask_alloc(void)
@@ -1107,7 +1107,7 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
        if (!masks_and_count)
                return;
 
-       for (i = 0; i < ma->max; i++)  {
+       for (i = 0; i < ma->max; i++) {
                struct sw_flow_mask *mask;
                unsigned int start;
                int cpu;