summaryrefslogtreecommitdiffstats
path: root/acpi-ensure-thermal-limits-match-cpu-freq.patch
diff options
context:
space:
mode:
authorJosh Boyer <jwboyer@redhat.com>2011-08-30 09:00:08 -0400
committerJosh Boyer <jwboyer@redhat.com>2011-08-30 09:00:40 -0400
commit5e97a201ac969e408daf84e2c5407d8a4876b519 (patch)
tree4c4c757f007c77440160017ae965819f394fd980 /acpi-ensure-thermal-limits-match-cpu-freq.patch
parentfe5d1cd9ddbfd3e99210c4951f2894693ad81b93 (diff)
downloadkernel-5e97a201ac969e408daf84e2c5407d8a4876b519.tar.gz
kernel-5e97a201ac969e408daf84e2c5407d8a4876b519.tar.xz
kernel-5e97a201ac969e408daf84e2c5407d8a4876b519.zip
Add patch to fix rhbz 606017
Diffstat (limited to 'acpi-ensure-thermal-limits-match-cpu-freq.patch')
-rw-r--r--acpi-ensure-thermal-limits-match-cpu-freq.patch128
1 files changed, 128 insertions, 0 deletions
diff --git a/acpi-ensure-thermal-limits-match-cpu-freq.patch b/acpi-ensure-thermal-limits-match-cpu-freq.patch
new file mode 100644
index 000000000..3945af197
--- /dev/null
+++ b/acpi-ensure-thermal-limits-match-cpu-freq.patch
@@ -0,0 +1,128 @@
+The ACPI thermal management code supports slowing down a CPU when it's
+overheating. Right now that's done by choosing to run it at 100%, 75%, 50%
+or 25% of full speed. However, most CPUs do not allow an arbitrary
+frequency to be set and so will run at the first frequency below that value.
+This doesn't match the intent of the specification, which is to drop the
+frequency state by state until the temperature stabalises. Fix this up
+so it uses actual frequencies rather than percentages.
+
+Reported by: Gene Snider <snider6982@comcast.net>
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+---
+ drivers/acpi/processor_thermal.c | 45 +++++++++++++++++++++----------------
+ 1 files changed, 25 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
+index 870550d..1c4eb60 100644
+--- a/drivers/acpi/processor_thermal.c
++++ b/drivers/acpi/processor_thermal.c
+@@ -52,10 +52,8 @@ ACPI_MODULE_NAME("processor_thermal");
+ * _any_ cpufreq driver and not only the acpi-cpufreq driver.
+ */
+
+-#define CPUFREQ_THERMAL_MIN_STEP 0
+-#define CPUFREQ_THERMAL_MAX_STEP 3
+
+-static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
++static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_limit_state);
+ static unsigned int acpi_thermal_cpufreq_is_init = 0;
+
+ static int cpu_has_cpufreq(unsigned int cpu)
+@@ -70,19 +68,19 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+ {
+ struct cpufreq_policy *policy = data;
+- unsigned long max_freq = 0;
++ int state = per_cpu(cpufreq_thermal_limit_state, policy->cpu);
++ struct cpufreq_frequency_table *table;
+
+ if (event != CPUFREQ_ADJUST)
+- goto out;
++ return 0;
++
++ table = cpufreq_frequency_get_table(policy->cpu);
+
+- max_freq = (
+- policy->cpuinfo.max_freq *
+- (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+- ) / 100;
++ if (!table)
++ return 0;
+
+- cpufreq_verify_within_limits(policy, 0, max_freq);
++ cpufreq_verify_within_limits(policy, 0, table[state].frequency);
+
+- out:
+ return 0;
+ }
+
+@@ -92,10 +90,21 @@ static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
+
+ static int cpufreq_get_max_state(unsigned int cpu)
+ {
++ int count = 0;
++ struct cpufreq_frequency_table *table;
++
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+- return CPUFREQ_THERMAL_MAX_STEP;
++ table = cpufreq_frequency_get_table(cpu);
++
++ if (!table)
++ return 0;
++
++ while (table[count].frequency != CPUFREQ_TABLE_END)
++ count++;
++
++ return count;
+ }
+
+ static int cpufreq_get_cur_state(unsigned int cpu)
+@@ -103,7 +112,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+- return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
++ return per_cpu(cpufreq_thermal_limit_state, cpu);
+ }
+
+ static int cpufreq_set_cur_state(unsigned int cpu, int state)
+@@ -111,7 +120,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+- per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
++ per_cpu(cpufreq_thermal_limit_state, cpu) = state;
+ cpufreq_update_policy(cpu);
+ return 0;
+ }
+@@ -122,7 +131,7 @@ void acpi_thermal_cpufreq_init(void)
+
+ for (i = 0; i < nr_cpu_ids; i++)
+ if (cpu_present(i))
+- per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
++ per_cpu(cpufreq_thermal_limit_state, i) = 0;
+
+ i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
+ CPUFREQ_POLICY_NOTIFIER);
+@@ -170,15 +179,11 @@ int acpi_processor_get_limit_info(struct acpi_processor *pr)
+ return 0;
+ }
+
+-/* thermal coolign device callbacks */
++/* thermal cooling device callbacks */
+ static int acpi_processor_max_state(struct acpi_processor *pr)
+ {
+ int max_state = 0;
+
+- /*
+- * There exists four states according to
+- * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+- */
+ max_state += cpufreq_get_max_state(pr->id);
+ if (pr->flags.throttling)
+ max_state += (pr->throttling.state_count -1);
+--
+1.7.6.1
+