Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso...
[linux-2.6-microblaze.git] / lib / percpu-refcount.c
index 0ba686b..e59eda0 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
+#include <linux/slab.h>
 #include <linux/percpu-refcount.h>
 
 /*
@@ -64,18 +65,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
        size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
                             __alignof__(unsigned long));
        unsigned long start_count = 0;
+       struct percpu_ref_data *data;
 
        ref->percpu_count_ptr = (unsigned long)
                __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
        if (!ref->percpu_count_ptr)
                return -ENOMEM;
 
-       ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
-       ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
+       data = kzalloc(sizeof(*ref->data), gfp);
+       if (!data) {
+               free_percpu((void __percpu *)ref->percpu_count_ptr);
+               return -ENOMEM;
+       }
+
+       data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
+       data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
 
        if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
                ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-               ref->allow_reinit = true;
+               data->allow_reinit = true;
        } else {
                start_count += PERCPU_COUNT_BIAS;
        }
@@ -85,14 +93,28 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
        else
                start_count++;
 
-       atomic_long_set(&ref->count, start_count);
+       atomic_long_set(&data->count, start_count);
 
-       ref->release = release;
-       ref->confirm_switch = NULL;
+       data->release = release;
+       data->confirm_switch = NULL;
+       data->ref = ref;
+       ref->data = data;
        return 0;
 }
 EXPORT_SYMBOL_GPL(percpu_ref_init);
 
+static void __percpu_ref_exit(struct percpu_ref *ref)
+{
+       unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+
+       if (percpu_count) {
+               /* non-NULL confirm_switch indicates switching in progress */
+               WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
+               free_percpu(percpu_count);
+               ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
+       }
+}
+
 /**
  * percpu_ref_exit - undo percpu_ref_init()
  * @ref: percpu_ref to exit
@@ -105,27 +127,36 @@ EXPORT_SYMBOL_GPL(percpu_ref_init);
  */
 void percpu_ref_exit(struct percpu_ref *ref)
 {
-       unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
+       struct percpu_ref_data *data = ref->data;
+       unsigned long flags;
 
-       if (percpu_count) {
-               /* non-NULL confirm_switch indicates switching in progress */
-               WARN_ON_ONCE(ref->confirm_switch);
-               free_percpu(percpu_count);
-               ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
-       }
+       __percpu_ref_exit(ref);
+
+       if (!data)
+               return;
+
+       spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+       ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
+               __PERCPU_REF_FLAG_BITS;
+       ref->data = NULL;
+       spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+       kfree(data);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_exit);
 
 static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
 {
-       struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+       struct percpu_ref_data *data = container_of(rcu,
+                       struct percpu_ref_data, rcu);
+       struct percpu_ref *ref = data->ref;
 
-       ref->confirm_switch(ref);
-       ref->confirm_switch = NULL;
+       data->confirm_switch(ref);
+       data->confirm_switch = NULL;
        wake_up_all(&percpu_ref_switch_waitq);
 
-       if (!ref->allow_reinit)
-               percpu_ref_exit(ref);
+       if (!data->allow_reinit)
+               __percpu_ref_exit(ref);
 
        /* drop ref from percpu_ref_switch_to_atomic() */
        percpu_ref_put(ref);
@@ -133,7 +164,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
 
 static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
 {
-       struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
+       struct percpu_ref_data *data = container_of(rcu,
+                       struct percpu_ref_data, rcu);
+       struct percpu_ref *ref = data->ref;
        unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
        unsigned long count = 0;
        int cpu;
@@ -142,7 +175,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
                count += *per_cpu_ptr(percpu_count, cpu);
 
        pr_debug("global %lu percpu %lu\n",
-                atomic_long_read(&ref->count), count);
+                atomic_long_read(&data->count), count);
 
        /*
         * It's crucial that we sum the percpu counters _before_ adding the sum
@@ -156,11 +189,11 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
         * reaching 0 before we add the percpu counts. But doing it at the same
         * time is equivalent and saves us atomic operations:
         */
-       atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
+       atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
 
-       WARN_ONCE(atomic_long_read(&ref->count) <= 0,
+       WARN_ONCE(atomic_long_read(&data->count) <= 0,
                  "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
-                 ref->release, atomic_long_read(&ref->count));
+                 data->release, atomic_long_read(&data->count));
 
        /* @ref is viewed as dead on all CPUs, send out switch confirmation */
        percpu_ref_call_confirm_rcu(rcu);
@@ -186,10 +219,11 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
         * Non-NULL ->confirm_switch is used to indicate that switching is
         * in progress.  Use noop one if unspecified.
         */
-       ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+       ref->data->confirm_switch = confirm_switch ?:
+               percpu_ref_noop_confirm_switch;
 
        percpu_ref_get(ref);    /* put after confirmation */
-       call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+       call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
 }
 
 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -202,10 +236,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
        if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
                return;
 
-       if (WARN_ON_ONCE(!ref->allow_reinit))
+       if (WARN_ON_ONCE(!ref->data->allow_reinit))
                return;
 
-       atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
+       atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
 
        /*
         * Restore per-cpu operation.  smp_store_release() is paired
@@ -223,6 +257,8 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
 static void __percpu_ref_switch_mode(struct percpu_ref *ref,
                                     percpu_ref_func_t *confirm_switch)
 {
+       struct percpu_ref_data *data = ref->data;
+
        lockdep_assert_held(&percpu_ref_switch_lock);
 
        /*
@@ -230,10 +266,10 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref,
         * its completion.  If the caller ensures that ATOMIC switching
         * isn't in progress, this function can be called from any context.
         */
-       wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+       wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
                            percpu_ref_switch_lock);
 
-       if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+       if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
                __percpu_ref_switch_to_atomic(ref, confirm_switch);
        else
                __percpu_ref_switch_to_percpu(ref);
@@ -266,7 +302,7 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       ref->force_atomic = true;
+       ref->data->force_atomic = true;
        __percpu_ref_switch_mode(ref, confirm_switch);
 
        spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -284,7 +320,7 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
 {
        percpu_ref_switch_to_atomic(ref, NULL);
-       wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+       wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
 }
 EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
 
@@ -312,7 +348,7 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
 
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
-       ref->force_atomic = false;
+       ref->data->force_atomic = false;
        __percpu_ref_switch_mode(ref, NULL);
 
        spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
@@ -344,7 +380,8 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
        spin_lock_irqsave(&percpu_ref_switch_lock, flags);
 
        WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
-                 "%s called more than once on %ps!", __func__, ref->release);
+                 "%s called more than once on %ps!", __func__,
+                 ref->data->release);
 
        ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
        __percpu_ref_switch_mode(ref, confirm_kill);
@@ -354,6 +391,34 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 }
 EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
 
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+       unsigned long __percpu *percpu_count;
+       unsigned long count, flags;
+
+       if (__ref_is_percpu(ref, &percpu_count))
+               return false;
+
+       /* protect us from being destroyed */
+       spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+       if (ref->data)
+               count = atomic_long_read(&ref->data->count);
+       else
+               count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
+       spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+
+       return count == 0;
+}
+EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
+
 /**
  * percpu_ref_reinit - re-initialize a percpu refcount
  * @ref: perpcu_ref to re-initialize