perf stat: Fix --no-scale
[linux-2.6-microblaze.git] / kernel / fork.c
index b69248e..77059b2 100644 (file)
@@ -429,7 +429,7 @@ static void release_task_stack(struct task_struct *tsk)
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 void put_task_stack(struct task_struct *tsk)
 {
-       if (atomic_dec_and_test(&tsk->stack_refcount))
+       if (refcount_dec_and_test(&tsk->stack_refcount))
                release_task_stack(tsk);
 }
 #endif
@@ -447,7 +447,7 @@ void free_task(struct task_struct *tsk)
         * If the task had a separate stack allocation, it should be gone
         * by now.
         */
-       WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
+       WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
 #endif
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
@@ -710,14 +710,14 @@ static inline void free_signal_struct(struct signal_struct *sig)
 
 static inline void put_signal_struct(struct signal_struct *sig)
 {
-       if (atomic_dec_and_test(&sig->sigcnt))
+       if (refcount_dec_and_test(&sig->sigcnt))
                free_signal_struct(sig);
 }
 
 void __put_task_struct(struct task_struct *tsk)
 {
        WARN_ON(!tsk->exit_state);
-       WARN_ON(atomic_read(&tsk->usage));
+       WARN_ON(refcount_read(&tsk->usage));
        WARN_ON(tsk == current);
 
        cgroup_free(tsk);
@@ -867,7 +867,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->stack_vm_area = stack_vm_area;
 #endif
 #ifdef CONFIG_THREAD_INFO_IN_TASK
-       atomic_set(&tsk->stack_refcount, 1);
+       refcount_set(&tsk->stack_refcount, 1);
 #endif
 
        if (err)
@@ -896,7 +896,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
         * One for us, one for whoever does the "release_task()" (usually
         * parent)
         */
-       atomic_set(&tsk->usage, 2);
+       refcount_set(&tsk->usage, 2);
 #ifdef CONFIG_BLK_DEV_IO_TRACE
        tsk->btrace_seq = 0;
 #endif
@@ -1463,7 +1463,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
        struct sighand_struct *sig;
 
        if (clone_flags & CLONE_SIGHAND) {
-               atomic_inc(&current->sighand->count);
+               refcount_inc(&current->sighand->count);
                return 0;
        }
        sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
@@ -1471,7 +1471,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
        if (!sig)
                return -ENOMEM;
 
-       atomic_set(&sig->count, 1);
+       refcount_set(&sig->count, 1);
        spin_lock_irq(&current->sighand->siglock);
        memcpy(sig->action, current->sighand->action, sizeof(sig->action));
        spin_unlock_irq(&current->sighand->siglock);
@@ -1480,7 +1480,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
 
 void __cleanup_sighand(struct sighand_struct *sighand)
 {
-       if (atomic_dec_and_test(&sighand->count)) {
+       if (refcount_dec_and_test(&sighand->count)) {
                signalfd_cleanup(sighand);
                /*
                 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
@@ -1527,7 +1527,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
 
        sig->nr_threads = 1;
        atomic_set(&sig->live, 1);
-       atomic_set(&sig->sigcnt, 1);
+       refcount_set(&sig->sigcnt, 1);
 
        /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
        sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
@@ -2082,7 +2082,7 @@ static __latent_entropy struct task_struct *copy_process(
                } else {
                        current->signal->nr_threads++;
                        atomic_inc(&current->signal->live);
-                       atomic_inc(&current->signal->sigcnt);
+                       refcount_inc(&current->signal->sigcnt);
                        task_join_group_stop(p);
                        list_add_tail_rcu(&p->thread_group,
                                          &p->group_leader->thread_group);
@@ -2439,7 +2439,7 @@ static int check_unshare_flags(unsigned long unshare_flags)
                        return -EINVAL;
        }
        if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
-               if (atomic_read(&current->sighand->count) > 1)
+               if (refcount_read(&current->sighand->count) > 1)
                        return -EINVAL;
        }
        if (unshare_flags & CLONE_VM) {