diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-31 15:09:20 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-31 15:09:20 -0800 |
commit | 28e0cf22c1221650b4bfba48808d966160c42320 (patch) | |
tree | 79e530ac09f62000c1d0ec998a1bfa5404a2577a | |
parent | 9aef3b7c208b216b54a2e6614c6287ca8a09cf6f (diff) | |
parent | c0672860199ac009af7cf198a134ee7a4c3a9bb3 (diff) | |
download | kernel-crypto-28e0cf22c1221650b4bfba48808d966160c42320.tar.gz kernel-crypto-28e0cf22c1221650b4bfba48808d966160c42320.tar.xz kernel-crypto-28e0cf22c1221650b4bfba48808d966160c42320.zip |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 1 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/p4-clockmod.c | 9 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 70 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 52 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 41 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 78 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 3 |
7 files changed, 141 insertions, 113 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index 0f1eb507233..26892d2099b 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig @@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI config X86_GX_SUSPMOD tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" + depends on PCI help This add the CPUFreq driver for NatSemi Geode processors which support suspend modulation. diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c index 270f2188d68..cc73a7ae34b 100644 --- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c @@ -52,6 +52,7 @@ enum { static int has_N44_O17_errata[NR_CPUS]; +static int has_N60_errata[NR_CPUS]; static unsigned int stock_freq; static struct cpufreq_driver p4clockmod_driver; static unsigned int cpufreq_p4_get(unsigned int cpu); @@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; dprintk("has errata -- disabling low frequencies\n"); + break; + + case 0x0f29: + has_N60_errata[policy->cpu] = 1; + dprintk("has errata -- disabling frequencies lower than 2ghz\n"); + break; } /* get max frequency */ @@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { if ((i<2) && (has_N44_O17_errata[policy->cpu])) p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; else p4clockmod_table[i].frequency = (stock_freq * i)/8; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 277a843a87a..7a511479ae2 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/cpu.h> #include <linux/completion.h> +#include <linux/mutex.h> #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) @@ -55,7 +56,7 @@ static DECLARE_RWSEM (cpufreq_notifier_rwsem); static LIST_HEAD(cpufreq_governor_list); -static DECLARE_MUTEX (cpufreq_governor_sem); +static DEFINE_MUTEX (cpufreq_governor_mutex); struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) { @@ -297,18 +298,18 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, return -EINVAL; } else { struct cpufreq_governor *t; - down(&cpufreq_governor_sem); + mutex_lock(&cpufreq_governor_mutex); if (!cpufreq_driver || !cpufreq_driver->target) goto out; list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { *governor = t; - up(&cpufreq_governor_sem); + mutex_unlock(&cpufreq_governor_mutex); return 0; } } out: - up(&cpufreq_governor_sem); + mutex_unlock(&cpufreq_governor_mutex); } return -EINVAL; } @@ -600,7 +601,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) policy->cpu = cpu; policy->cpus = cpumask_of_cpu(cpu); - init_MUTEX_LOCKED(&policy->lock); + mutex_init(&policy->lock); + mutex_lock(&policy->lock); init_completion(&policy->kobj_unregister); INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); @@ -610,6 +612,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) ret = cpufreq_driver->init(policy); if (ret) { dprintk("initialization failed\n"); + mutex_unlock(&policy->lock); goto err_out; } @@ -621,9 +624,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); ret = kobject_register(&policy->kobj); - if (ret) + if (ret) { + mutex_unlock(&policy->lock); goto err_out_driver_exit; - + } /* set up files for this cpu device */ drv_attr = cpufreq_driver->attr; while ((drv_attr) && (*drv_attr)) { @@ -641,7 +645,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); policy->governor = NULL; /* to assure that the starting sequence is * run in cpufreq_set_policy */ - up(&policy->lock); + mutex_unlock(&policy->lock); /* set default policy */ @@ -762,10 +766,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) spin_unlock_irqrestore(&cpufreq_driver_lock, flags); #endif - down(&data->lock); + mutex_lock(&data->lock); if (cpufreq_driver->target) __cpufreq_governor(data, CPUFREQ_GOV_STOP); - up(&data->lock); + mutex_unlock(&data->lock); kobject_unregister(&data->kobj); @@ -834,9 +838,9 @@ unsigned int cpufreq_quick_get(unsigned int cpu) unsigned int ret = 0; if (policy) { - down(&policy->lock); + mutex_lock(&policy->lock); ret = policy->cur; - up(&policy->lock); + mutex_unlock(&policy->lock); cpufreq_cpu_put(policy); } @@ -862,7 +866,7 @@ unsigned int cpufreq_get(unsigned int cpu) if (!cpufreq_driver->get) goto out; - down(&policy->lock); + mutex_lock(&policy->lock); ret = cpufreq_driver->get(cpu); @@ -875,7 +879,7 @@ unsigned int cpufreq_get(unsigned int cpu) } } - up(&policy->lock); + mutex_unlock(&policy->lock); out: cpufreq_cpu_put(policy); @@ -1158,11 +1162,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, if (!policy) return -EINVAL; - down(&policy->lock); + mutex_lock(&policy->lock); ret = __cpufreq_driver_target(policy, target_freq, relation); - up(&policy->lock); + mutex_unlock(&policy->lock); cpufreq_cpu_put(policy); @@ -1199,9 +1203,9 @@ int cpufreq_governor(unsigned int cpu, unsigned int event) if (!policy) return -EINVAL; - down(&policy->lock); + mutex_lock(&policy->lock); ret = __cpufreq_governor(policy, event); - up(&policy->lock); + mutex_unlock(&policy->lock); cpufreq_cpu_put(policy); @@ -1217,17 +1221,17 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) if (!governor) return -EINVAL; - down(&cpufreq_governor_sem); + mutex_lock(&cpufreq_governor_mutex); list_for_each_entry(t, &cpufreq_governor_list, governor_list) { if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { - up(&cpufreq_governor_sem); + mutex_unlock(&cpufreq_governor_mutex); return -EBUSY; } } list_add(&governor->governor_list, &cpufreq_governor_list); - up(&cpufreq_governor_sem); + mutex_unlock(&cpufreq_governor_mutex); return 0; } @@ -1239,9 +1243,9 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) if (!governor) return; - down(&cpufreq_governor_sem); + mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); - up(&cpufreq_governor_sem); + mutex_unlock(&cpufreq_governor_mutex); return; } EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); @@ -1268,9 +1272,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) if (!cpu_policy) return -EINVAL; - down(&cpu_policy->lock); + mutex_lock(&cpu_policy->lock); memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); - up(&cpu_policy->lock); + mutex_unlock(&cpu_policy->lock); cpufreq_cpu_put(cpu_policy); @@ -1382,7 +1386,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) return -EINVAL; /* lock this CPU */ - down(&data->lock); + mutex_lock(&data->lock); ret = __cpufreq_set_policy(data, policy); data->user_policy.min = data->min; @@ -1390,7 +1394,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) data->user_policy.policy = data->policy; data->user_policy.governor = data->governor; - up(&data->lock); + mutex_unlock(&data->lock); cpufreq_cpu_put(data); return ret; @@ -1414,7 +1418,7 @@ int cpufreq_update_policy(unsigned int cpu) if (!data) return -ENODEV; - down(&data->lock); + mutex_lock(&data->lock); dprintk("updating policy for CPU %u\n", cpu); memcpy(&policy, @@ -1425,9 +1429,17 @@ int cpufreq_update_policy(unsigned int cpu) policy.policy = data->user_policy.policy; policy.governor = data->user_policy.governor; + /* BIOS might change freq behind our back + -> ask driver for current freq and notify governors about a change */ + if (cpufreq_driver->get) { + policy.cur = cpufreq_driver->get(cpu); + if (data->cur != policy.cur) + cpufreq_out_of_sync(cpu, data->cur, policy.cur); + } + ret = __cpufreq_set_policy(data, &policy); - up(&data->lock); + mutex_unlock(&data->lock); cpufreq_cpu_put(data); return ret; diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 39543a2bed0..ac38766b258 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -28,7 +28,7 @@ #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> - +#include <linux/mutex.h> /* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler @@ -71,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -static DECLARE_MUTEX (dbs_sem); +static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); struct dbs_tuners { @@ -139,9 +139,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, if (ret != 1 ) return -EINVAL; - down(&dbs_sem); + mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -153,14 +153,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, int ret; ret = sscanf (buf, "%u", &input); - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.sampling_rate = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -172,16 +172,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, int ret; ret = sscanf (buf, "%u", &input); - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD || input <= dbs_tuners_ins.down_threshold) { - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.up_threshold = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -193,16 +193,16 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused, int ret; ret = sscanf (buf, "%u", &input); - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || input < MIN_FREQUENCY_DOWN_THRESHOLD || input >= dbs_tuners_ins.up_threshold) { - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.down_threshold = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -222,9 +222,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, if ( input > 1 ) input = 1; - down(&dbs_sem); + mutex_lock(&dbs_mutex); if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; @@ -236,7 +236,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -257,9 +257,9 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy, /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ - down(&dbs_sem); + mutex_lock(&dbs_mutex); dbs_tuners_ins.freq_step = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -444,12 +444,12 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; - down(&dbs_sem); + mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); } static inline void dbs_timer_init(void) @@ -487,7 +487,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (this_dbs_info->enable) /* Already enabled */ break; - down(&dbs_sem); + mutex_lock(&dbs_mutex); for_each_cpu_mask(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); @@ -521,11 +521,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_init(); } - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_STOP: - down(&dbs_sem); + mutex_lock(&dbs_mutex); this_dbs_info->enable = 0; sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; @@ -536,12 +536,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 0) dbs_timer_exit(); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, @@ -550,7 +550,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index e69fd8dd1f1..9ee9411f186 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -27,6 +27,7 @@ #include <linux/jiffies.h> #include <linux/kernel_stat.h> #include <linux/percpu.h> +#include <linux/mutex.h> /* * dbs is used in this file as a shortform for demandbased switching @@ -70,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ -static DECLARE_MUTEX (dbs_sem); +static DEFINE_MUTEX (dbs_mutex); static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); struct dbs_tuners { @@ -136,9 +137,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - down(&dbs_sem); + mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -150,14 +151,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, int ret; ret = sscanf (buf, "%u", &input); - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.sampling_rate = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -169,15 +170,15 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, int ret; ret = sscanf (buf, "%u", &input); - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || input < MIN_FREQUENCY_UP_THRESHOLD) { - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return -EINVAL; } dbs_tuners_ins.up_threshold = input; - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -197,9 +198,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, if ( input > 1 ) input = 1; - down(&dbs_sem); + mutex_lock(&dbs_mutex); if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; @@ -211,7 +212,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; } - up(&dbs_sem); + mutex_unlock(&dbs_mutex); return count; } @@ -356,12 +357,12 @@ static void dbs_check_cpu(int cpu) static void do_dbs_timer(void *data) { int i; - down(&dbs_sem); + mutex_lock(&dbs_mutex); for_each_online_cpu(i) dbs_check_cpu(i); schedule_delayed_work(&dbs_work, usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); } static inline void dbs_timer_init(void) @@ -399,7 +400,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (this_dbs_info->enable) /* Already enabled */ break; - down(&dbs_sem); + mutex_lock(&dbs_mutex); for_each_cpu_mask(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cpu_dbs_info, j); @@ -435,11 +436,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_init(); } - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_STOP: - down(&dbs_sem); + mutex_lock(&dbs_mutex); this_dbs_info->enable = 0; sysfs_remove_group(&policy->kobj, &dbs_attr_group); dbs_enable--; @@ -450,12 +451,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (dbs_enable == 0) dbs_timer_exit(); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; case CPUFREQ_GOV_LIMITS: - down(&dbs_sem); + mutex_lock(&dbs_mutex); if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, @@ -464,7 +465,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, __cpufreq_driver_target( this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); - up(&dbs_sem); + mutex_unlock(&dbs_mutex); break; } return 0; diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index d32bf3593cd..92a0be22a2a 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -1,3 +1,4 @@ + /* * linux/drivers/cpufreq/cpufreq_userspace.c * @@ -21,6 +22,7 @@ #include <linux/types.h> #include <linux/fs.h> #include <linux/sysfs.h> +#include <linux/mutex.h> #include <asm/uaccess.h> @@ -33,9 +35,8 @@ static unsigned int cpu_min_freq[NR_CPUS]; static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ static unsigned int cpu_is_managed[NR_CPUS]; -static struct cpufreq_policy current_policy[NR_CPUS]; -static DECLARE_MUTEX (userspace_sem); +static DEFINE_MUTEX (userspace_mutex); #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) @@ -64,35 +65,34 @@ static struct notifier_block userspace_cpufreq_notifier_block = { * * Sets the CPU frequency to freq. */ -static int cpufreq_set(unsigned int freq, unsigned int cpu) +static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) { int ret = -EINVAL; - dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq); + dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); - down(&userspace_sem); - if (!cpu_is_managed[cpu]) + mutex_lock(&userspace_mutex); + if (!cpu_is_managed[policy->cpu]) goto err; - cpu_set_freq[cpu] = freq; + cpu_set_freq[policy->cpu] = freq; - if (freq < cpu_min_freq[cpu]) - freq = cpu_min_freq[cpu]; - if (freq > cpu_max_freq[cpu]) - freq = cpu_max_freq[cpu]; + if (freq < cpu_min_freq[policy->cpu]) + freq = cpu_min_freq[policy->cpu]; + if (freq > cpu_max_freq[policy->cpu]) + freq = cpu_max_freq[policy->cpu]; /* * We're safe from concurrent calls to ->target() here - * as we hold the userspace_sem lock. If we were calling + * as we hold the userspace_mutex lock. If we were calling * cpufreq_driver_target, a deadlock situation might occur: - * A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock) - * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem) + * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock) + * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex) */ - ret = __cpufreq_driver_target(¤t_policy[cpu], freq, - CPUFREQ_RELATION_L); + ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); err: - up(&userspace_sem); + mutex_unlock(&userspace_mutex); return ret; } @@ -113,7 +113,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count) if (ret != 1) return -EINVAL; - cpufreq_set(freq, policy->cpu); + cpufreq_set(freq, policy); return count; } @@ -134,44 +134,48 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, if (!cpu_online(cpu)) return -EINVAL; BUG_ON(!policy->cur); - down(&userspace_sem); + mutex_lock(&userspace_mutex); cpu_is_managed[cpu] = 1; cpu_min_freq[cpu] = policy->min; cpu_max_freq[cpu] = policy->max; cpu_cur_freq[cpu] = policy->cur; cpu_set_freq[cpu] = policy->cur; sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); - memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); - up(&userspace_sem); + mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_STOP: - down(&userspace_sem); + mutex_lock(&userspace_mutex); cpu_is_managed[cpu] = 0; cpu_min_freq[cpu] = 0; cpu_max_freq[cpu] = 0; cpu_set_freq[cpu] = 0; sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); dprintk("managing cpu %u stopped\n", cpu); - up(&userspace_sem); + mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: - down(&userspace_sem); - cpu_min_freq[cpu] = policy->min; - cpu_max_freq[cpu] = policy->max; - dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]); + mutex_lock(&userspace_mutex); + dprintk("limit event for cpu %u: %u - %u kHz," + "currently %u kHz, last set to %u kHz\n", + cpu, policy->min, policy->max, + cpu_cur_freq[cpu], cpu_set_freq[cpu]); if (policy->max < cpu_set_freq[cpu]) { - __cpufreq_driver_target(¤t_policy[cpu], policy->max, - CPUFREQ_RELATION_H); - } else if (policy->min > cpu_set_freq[cpu]) { - __cpufreq_driver_target(¤t_policy[cpu], policy->min, - CPUFREQ_RELATION_L); - } else { - __cpufreq_driver_target(¤t_policy[cpu], cpu_set_freq[cpu], - CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_H); + } + else if (policy->min > cpu_set_freq[cpu]) { + __cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_L); } - memcpy (¤t_policy[cpu], policy, sizeof(struct cpufreq_policy)); - up(&userspace_sem); + else { + __cpufreq_driver_target(policy, cpu_set_freq[cpu], + CPUFREQ_RELATION_L); + } + cpu_min_freq[cpu] = policy->min; + cpu_max_freq[cpu] = policy->max; + cpu_cur_freq[cpu] = policy->cur; + mutex_unlock(&userspace_mutex); break; } return 0; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index c31650df924..17866d7e2b7 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -14,6 +14,7 @@ #ifndef _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H +#include <linux/mutex.h> #include <linux/config.h> #include <linux/notifier.h> #include <linux/threads.h> @@ -82,7 +83,7 @@ struct cpufreq_policy { unsigned int policy; /* see above */ struct cpufreq_governor *governor; /* see below */ - struct semaphore lock; /* CPU ->setpolicy or ->target may + struct mutex lock; /* CPU ->setpolicy or ->target may only be called once a time */ struct work_struct update; /* if update_policy() needs to be |