diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-03-03 14:13:02 -0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 17:40:56 +0200 |
commit | 768d95051bdaf60b4eb89b42c133b14627f478f2 (patch) | |
tree | 805873c963721c6a52c3b0737118f6f18d2a02d8 /arch/x86 | |
parent | 1dbb4726faebe9e64a1e9cf40e3b39fffa065a65 (diff) | |
download | kernel-crypto-768d95051bdaf60b4eb89b42c133b14627f478f2.tar.gz kernel-crypto-768d95051bdaf60b4eb89b42c133b14627f478f2.tar.xz kernel-crypto-768d95051bdaf60b4eb89b42c133b14627f478f2.zip |
x86: move sibling functions to common file
set_cpu_sibling_map() and remove_sibling_info() are
equal between architectures, and are now moved to common
file
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/smpboot.c | 88 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_32.c | 88 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot_64.c | 89 |
3 files changed, 88 insertions, 177 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 40a3b56952e..d774520a6b4 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -29,7 +29,95 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); /* Per CPU bogomips and other parameters */ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); + +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + +void __cpuinit set_cpu_sibling_map(int cpu) +{ + int i; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + cpu_set(cpu, cpu_sibling_setup_map); + + if (smp_num_siblings > 1) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (c->phys_proc_id == cpu_data(i).phys_proc_id && + c->cpu_core_id == cpu_data(i).cpu_core_id) { + cpu_set(i, per_cpu(cpu_sibling_map, cpu)); + cpu_set(cpu, per_cpu(cpu_sibling_map, i)); + cpu_set(i, per_cpu(cpu_core_map, cpu)); + cpu_set(cpu, per_cpu(cpu_core_map, i)); + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); + } + } + } else { + cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); + } + + cpu_set(cpu, c->llc_shared_map); + + if (current_cpu_data.x86_max_cores == 1) { + per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); + c->booted_cores = 1; + return; + } + + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && + per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { + cpu_set(i, c->llc_shared_map); + cpu_set(cpu, cpu_data(i).llc_shared_map); + } + if (c->phys_proc_id == cpu_data(i).phys_proc_id) { + cpu_set(i, per_cpu(cpu_core_map, cpu)); + cpu_set(cpu, per_cpu(cpu_core_map, i)); + /* + * Does this new cpu bringup a new core? + */ + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { + /* + * for each core in package, increment + * the booted_cores for this new cpu + */ + if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) + c->booted_cores++; + /* + * increment the core count for all + * the other cpus in this package + */ + if (i != cpu) + cpu_data(i).booted_cores++; + } else if (i != cpu && !c->booted_cores) + c->booted_cores = cpu_data(i).booted_cores; + } + } +} + #ifdef CONFIG_HOTPLUG_CPU +void remove_siblinginfo(int cpu) +{ + int sibling; + struct cpuinfo_x86 *c = &cpu_data(cpu); + + for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { + cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); + /*/ + * last thread sibling in this cpu core going down + */ + if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) + cpu_data(sibling).booted_cores--; + } + + for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) + cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); + cpus_clear(per_cpu(cpu_sibling_map, cpu)); + cpus_clear(per_cpu(cpu_core_map, cpu)); + c->phys_proc_id = 0; + c->cpu_core_id = 0; + cpu_clear(cpu, cpu_sibling_setup_map); +} int additional_cpus __initdata = -1; diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c index 0fbc98163b4..322f46674d4 100644 --- a/arch/x86/kernel/smpboot_32.c +++ b/arch/x86/kernel/smpboot_32.c @@ -274,71 +274,6 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - -void __cpuinit set_cpu_sibling_map(int cpu) -{ - int i; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpu_set(cpu, cpu_sibling_setup_map); - - if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - } - } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - } - - cpu_set(cpu, c->llc_shared_map); - - if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c->booted_cores = 1; - return; - } - - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - /* - * Does this new cpu bringup a new core? - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { - /* - * for each core in package, increment - * the booted_cores for this new cpu - */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c->booted_cores++; - /* - * increment the core count for all - * the other cpus in this package - */ - if (i != cpu) - cpu_data(i).booted_cores++; - } else if (i != cpu && !c->booted_cores) - c->booted_cores = cpu_data(i).booted_cores; - } - } -} - /* * Activate a secondary processor. */ @@ -1120,29 +1055,6 @@ void __init native_smp_prepare_boot_cpu(void) } #ifdef CONFIG_HOTPLUG_CPU -void remove_siblinginfo(int cpu) -{ - int sibling; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); - /*/ - * last thread sibling in this cpu core going down - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - cpu_data(sibling).booted_cores--; - } - - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - c->phys_proc_id = 0; - c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); -} - int __cpu_disable(void) { cpumask_t map = cpu_online_map; diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 20f1c7df86a..329f9c53a33 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -225,71 +225,6 @@ cpumask_t cpu_coregroup_map(int cpu) return c->llc_shared_map; } -/* representing cpus for which sibling maps can be computed */ -static cpumask_t cpu_sibling_setup_map; - -void __cpuinit set_cpu_sibling_map(int cpu) -{ - int i; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - cpu_set(cpu, cpu_sibling_setup_map); - - if (smp_num_siblings > 1) { - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (c->phys_proc_id == cpu_data(i).phys_proc_id && - c->cpu_core_id == cpu_data(i).cpu_core_id) { - cpu_set(i, per_cpu(cpu_sibling_map, cpu)); - cpu_set(cpu, per_cpu(cpu_sibling_map, i)); - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - } - } else { - cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); - } - - cpu_set(cpu, c->llc_shared_map); - - if (current_cpu_data.x86_max_cores == 1) { - per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); - c->booted_cores = 1; - return; - } - - for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && - per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { - cpu_set(i, c->llc_shared_map); - cpu_set(cpu, cpu_data(i).llc_shared_map); - } - if (c->phys_proc_id == cpu_data(i).phys_proc_id) { - cpu_set(i, per_cpu(cpu_core_map, cpu)); - cpu_set(cpu, per_cpu(cpu_core_map, i)); - /* - * Does this new cpu bringup a new core? - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { - /* - * for each core in package, increment - * the booted_cores for this new cpu - */ - if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) - c->booted_cores++; - /* - * increment the core count for all - * the other cpus in this package - */ - if (i != cpu) - cpu_data(i).booted_cores++; - } else if (i != cpu && !c->booted_cores) - c->booted_cores = cpu_data(i).booted_cores; - } - } -} - /* * Setup code on secondary processor (after comming out of the trampoline) */ @@ -917,30 +852,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus) } #ifdef CONFIG_HOTPLUG_CPU - -void remove_siblinginfo(int cpu) -{ - int sibling; - struct cpuinfo_x86 *c = &cpu_data(cpu); - - for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { - cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); - /* - * last thread sibling in this cpu core going down - */ - if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) - cpu_data(sibling).booted_cores--; - } - - for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); - cpus_clear(per_cpu(cpu_core_map, cpu)); - c->phys_proc_id = 0; - c->cpu_core_id = 0; - cpu_clear(cpu, cpu_sibling_setup_map); -} - static void __ref remove_cpu_from_maps(void) { int cpu = smp_processor_id(); |