cpufreq: remove redundant CPUFREQ_INCOMPATIBLE notifier event
[linux-2.6-microblaze.git] / drivers / cpufreq / cpufreq.c
index 3199b8d..293f47b 100644 (file)
@@ -999,8 +999,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
        }
 }
 
-static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
-                                    struct device *dev)
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
 {
        struct freq_attr **drv_attr;
        int ret = 0;
@@ -1057,8 +1056,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
        return cpufreq_set_policy(policy, &new_policy);
 }
 
-static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
-                                 unsigned int cpu, struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
        int ret = 0;
 
@@ -1092,11 +1090,15 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
        return 0;
 }
 
-static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
+static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
+       struct device *dev = get_cpu_device(cpu);
        struct cpufreq_policy *policy;
        int ret;
 
+       if (WARN_ON(!dev))
+               return NULL;
+
        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
        if (!policy)
                return NULL;
@@ -1124,10 +1126,10 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
        init_completion(&policy->kobj_unregister);
        INIT_WORK(&policy->update, handle_update);
 
-       policy->cpu = dev->id;
+       policy->cpu = cpu;
 
        /* Set this once on allocation */
-       policy->kobj_cpu = dev->id;
+       policy->kobj_cpu = cpu;
 
        return policy;
 
@@ -1189,55 +1191,34 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
        kfree(policy);
 }
 
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static int cpufreq_online(unsigned int cpu)
 {
-       unsigned int j, cpu = dev->id;
-       int ret = -ENOMEM;
        struct cpufreq_policy *policy;
+       bool new_policy;
        unsigned long flags;
-       bool recover_policy;
-
-       pr_debug("adding CPU %u\n", cpu);
+       unsigned int j;
+       int ret;
 
-       if (cpu_is_offline(cpu)) {
-               /*
-                * Only possible if we are here from the subsys_interface add
-                * callback.  A hotplug notifier will follow and we will handle
-                * it as CPU online then.  For now, just create the sysfs link,
-                * unless there is no policy or the link is already present.
-                */
-               policy = per_cpu(cpufreq_cpu_data, cpu);
-               return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-                       ? add_cpu_dev_symlink(policy, cpu) : 0;
-       }
+       pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
        /* Check if this CPU already has a policy to manage it */
        policy = per_cpu(cpufreq_cpu_data, cpu);
        if (policy) {
                WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
                if (!policy_is_inactive(policy))
-                       return cpufreq_add_policy_cpu(policy, cpu, dev);
+                       return cpufreq_add_policy_cpu(policy, cpu);
 
                /* This is the only online CPU for the policy.  Start over. */
-               recover_policy = true;
+               new_policy = false;
                down_write(&policy->rwsem);
                policy->cpu = cpu;
                policy->governor = NULL;
                up_write(&policy->rwsem);
        } else {
-               recover_policy = false;
-               policy = cpufreq_policy_alloc(dev);
+               new_policy = true;
+               policy = cpufreq_policy_alloc(cpu);
                if (!policy)
-                       goto out_release_rwsem;
+                       return -ENOMEM;
        }
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1253,12 +1234,12 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        down_write(&policy->rwsem);
 
-       /* related cpus should atleast have policy->cpus */
-       cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
-       /* Remember which CPUs have been present at the policy creation time. */
-       if (!recover_policy)
+       if (new_policy) {
+               /* related_cpus should at least include policy->cpus. */
+               cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+               /* Remember CPUs present at the policy creation time. */
                cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+       }
 
        /*
         * affected cpus must always be the one, which are online. We aren't
@@ -1266,7 +1247,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       if (!recover_policy) {
+       if (new_policy) {
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
 
@@ -1327,8 +1308,8 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
-       if (!recover_policy) {
-               ret = cpufreq_add_dev_interface(policy, dev);
+       if (new_policy) {
+               ret = cpufreq_add_dev_interface(policy);
                if (ret)
                        goto out_exit_policy;
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -1343,10 +1324,12 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        if (ret) {
                pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
                       __func__, cpu, ret);
-               goto out_remove_policy_notify;
+               /* cpufreq_policy_free() will notify based on this */
+               new_policy = false;
+               goto out_exit_policy;
        }
 
-       if (!recover_policy) {
+       if (new_policy) {
                policy->user_policy.policy = policy->policy;
                policy->user_policy.governor = policy->governor;
        }
@@ -1362,17 +1345,42 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        return 0;
 
-out_remove_policy_notify:
-       /* cpufreq_policy_free() will notify based on this */
-       recover_policy = true;
 out_exit_policy:
        up_write(&policy->rwsem);
 
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 out_free_policy:
-       cpufreq_policy_free(policy, recover_policy);
-out_release_rwsem:
+       cpufreq_policy_free(policy, !new_policy);
+       return ret;
+}
+
+/**
+ * cpufreq_add_dev - the cpufreq interface for a CPU device.
+ * @dev: CPU device.
+ * @sif: Subsystem interface structure pointer (not used)
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+       unsigned cpu = dev->id;
+       int ret;
+
+       dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
+
+       if (cpu_online(cpu)) {
+               ret = cpufreq_online(cpu);
+       } else {
+               /*
+                * A hotplug notifier will follow and we will handle it as CPU
+                * online then.  For now, just create the sysfs link, unless
+                * there is no policy or the link is already present.
+                */
+               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+               ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+                       ? add_cpu_dev_symlink(policy, cpu) : 0;
+       }
+
        return ret;
 }
 
@@ -2182,7 +2190,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
 
        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-       if (new_policy->min > policy->max || new_policy->max < policy->min)
+       /*
+       * This check works well when we store new min/max freq attributes,
+       * because new_policy is a copy of policy with one field updated.
+       */
+       if (new_policy->min > new_policy->max)
                return -EINVAL;
 
        /* verify the cpu speed can be set within this limit */
@@ -2194,10 +2206,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_ADJUST, new_policy);
 
-       /* adjust if necessary - hardware incompatibility*/
-       blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-                       CPUFREQ_INCOMPATIBLE, new_policy);
-
        /*
         * verify the cpu speed can be set within this limit, which might be
         * different to the first one
@@ -2339,27 +2347,23 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
-       struct device *dev;
 
-       dev = get_cpu_device(cpu);
-       if (dev) {
-               switch (action & ~CPU_TASKS_FROZEN) {
-               case CPU_ONLINE:
-                       cpufreq_add_dev(dev, NULL);
-                       break;
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_ONLINE:
+               cpufreq_online(cpu);
+               break;
 
-               case CPU_DOWN_PREPARE:
-                       cpufreq_offline_prepare(cpu);
-                       break;
+       case CPU_DOWN_PREPARE:
+               cpufreq_offline_prepare(cpu);
+               break;
 
-               case CPU_POST_DEAD:
-                       cpufreq_offline_finish(cpu);
-                       break;
+       case CPU_POST_DEAD:
+               cpufreq_offline_finish(cpu);
+               break;
 
-               case CPU_DOWN_FAILED:
-                       cpufreq_add_dev(dev, NULL);
-                       break;
-               }
+       case CPU_DOWN_FAILED:
+               cpufreq_online(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -2467,10 +2471,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        pr_debug("trying to register driver %s\n", driver_data->name);
 
+       /* Protect against concurrent CPU online/offline. */
+       get_online_cpus();
+
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        if (cpufreq_driver) {
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-               return -EEXIST;
+               ret = -EEXIST;
+               goto out;
        }
        cpufreq_driver = driver_data;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2509,7 +2517,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        register_hotcpu_notifier(&cpufreq_cpu_notifier);
        pr_debug("driver %s up and running\n", driver_data->name);
 
-       return 0;
+out:
+       put_online_cpus();
+       return ret;
+
 err_if_unreg:
        subsys_interface_unregister(&cpufreq_interface);
 err_boost_unreg:
@@ -2519,7 +2530,7 @@ err_null_driver:
        write_lock_irqsave(&cpufreq_driver_lock, flags);
        cpufreq_driver = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-       return ret;
+       goto out;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);