diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2007-05-02 19:27:13 +0200 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 19:27:13 +0200 |
commit | a75c54f933bd8db9f4a609bd128663c179b3e6a1 (patch) | |
tree | 8b7dd866185bec34146eb537f057b6b496c78443 | |
parent | 82d1bb725e128c97b362a4b33fcbfff08fdaaa5a (diff) | |
download | kernel-crypto-a75c54f933bd8db9f4a609bd128663c179b3e6a1.tar.gz kernel-crypto-a75c54f933bd8db9f4a609bd128663c179b3e6a1.tar.xz kernel-crypto-a75c54f933bd8db9f4a609bd128663c179b3e6a1.zip |
[PATCH] i386: i386 separate hardware-defined TSS from Linux additions
On Thu, 2007-03-29 at 13:16 +0200, Andi Kleen wrote:
> Please clean it up properly with two structs.
Not sure about this, now I've done it. Running it here.
If you like it, I can do x86-64 as well.
==
lguest defines its own TSS struct because the "struct tss_struct"
contains linux-specific additions. Andi asked me to split the struct
in processor.h.
Unfortunately it makes usage a little awkward.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/asm-offsets.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/doublefault.c | 29 | ||||
-rw-r--r-- | arch/i386/kernel/ioport.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/sysenter.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 8 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 24 |
8 files changed, 47 insertions, 36 deletions
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c index 655cc8d4c74..d558adfc293 100644 --- a/arch/i386/kernel/asm-offsets.c +++ b/arch/i386/kernel/asm-offsets.c @@ -93,7 +93,7 @@ void foo(void) OFFSET(pbe_next, pbe, next); /* Offset from the sysenter stack to tss.esp0 */ - DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) - + DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) - sizeof(struct tss_struct)); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); diff --git a/arch/i386/kernel/doublefault.c b/arch/i386/kernel/doublefault.c index b4d14c2eb34..265c5597efb 100644 --- a/arch/i386/kernel/doublefault.c +++ b/arch/i386/kernel/doublefault.c @@ -33,7 +33,7 @@ static void doublefault_fn(void) printk("double fault, tss at %08lx\n", tss); if (ptr_ok(tss)) { - struct tss_struct *t = (struct tss_struct *)tss; + struct i386_hw_tss *t = (struct i386_hw_tss *)tss; printk("eip = %08lx, esp = %08lx\n", t->eip, t->esp); @@ -49,18 +49,21 @@ static void doublefault_fn(void) } struct tss_struct doublefault_tss __cacheline_aligned = { - .esp0 = STACK_START, - .ss0 = __KERNEL_DS, - .ldt = 0, - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, + .x86_tss = { + .esp0 = STACK_START, + .ss0 = __KERNEL_DS, + .ldt = 0, + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, - .eip = (unsigned long) doublefault_fn, - .eflags = X86_EFLAGS_SF | 0x2, /* 0x2 bit is always set */ - .esp = STACK_START, - .es = __USER_DS, - .cs = __KERNEL_CS, - .ss = __KERNEL_DS, - .ds = __USER_DS, + .eip = (unsigned long) doublefault_fn, + /* 0x2 bit is always set */ + .eflags = X86_EFLAGS_SF | 0x2, + .esp = STACK_START, + .es = __USER_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, + .ds = __USER_DS, - .__cr3 = __pa(swapper_pg_dir) + .__cr3 = __pa(swapper_pg_dir) + } }; diff --git a/arch/i386/kernel/ioport.c b/arch/i386/kernel/ioport.c index 1b4530e6cd8..d1e42e0dbe6 100644 --- a/arch/i386/kernel/ioport.c +++ b/arch/i386/kernel/ioport.c @@ -114,7 +114,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) * Reset the owner so that a process switch will not set * tss->io_bitmap_base to IO_BITMAP_OFFSET. */ - tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; + tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; tss->io_bitmap_owner = NULL; put_cpu(); diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 7e8e129b3d7..5fb9524c6f4 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -375,7 +375,7 @@ void exit_thread(void) t->io_bitmap_max = 0; tss->io_bitmap_owner = NULL; tss->io_bitmap_max = 0; - tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; + tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; put_cpu(); } } @@ -554,7 +554,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p, * Disable the bitmap via an invalid offset. We still cache * the previous bitmap owner and the IO bitmap contents: */ - tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET; + tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; return; } @@ -564,7 +564,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p, * matches the next task, we dont have to do anything but * to set a valid offset in the TSS: */ - tss->io_bitmap_base = IO_BITMAP_OFFSET; + tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; return; } /* @@ -576,7 +576,7 @@ static noinline void __switch_to_xtra(struct task_struct *next_p, * redundant copies when the currently switched task does not * perform any I/O during its timeslice. */ - tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; + tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; } /* diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 0b9768ee1e8..94defac6fc3 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c @@ -183,10 +183,10 @@ void enable_sep_cpu(void) return; } - tss->ss1 = __KERNEL_CS; - tss->esp1 = sizeof(struct tss_struct) + (unsigned long) tss; + tss->x86_tss.ss1 = __KERNEL_CS; + tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss; wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); - wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0); + wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0); put_cpu(); } diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 8722444caca..e0a23bee696 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -596,7 +596,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs, * and we set the offset field correctly. Then we let the CPU to * restart the faulting instruction. */ - if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && + if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY && thread->io_bitmap_ptr) { memcpy(tss->io_bitmap, thread->io_bitmap_ptr, thread->io_bitmap_max); @@ -609,7 +609,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs, thread->io_bitmap_max, 0xff, tss->io_bitmap_max - thread->io_bitmap_max); tss->io_bitmap_max = thread->io_bitmap_max; - tss->io_bitmap_base = IO_BITMAP_OFFSET; + tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; tss->io_bitmap_owner = thread; put_cpu(); return; diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index 626c82063d1..8f3bac47345 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c @@ -230,14 +230,14 @@ static void vmi_set_tr(void) static void vmi_load_esp0(struct tss_struct *tss, struct thread_struct *thread) { - tss->esp0 = thread->esp0; + tss->x86_tss.esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->ss1 != thread->sysenter_cs)) { - tss->ss1 = thread->sysenter_cs; + if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { + tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } - vmi_ops.set_kernel_stack(__KERNEL_DS, tss->esp0); + vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0); } static void vmi_flush_tlb_user(void) diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 77e263267aa..92226047464 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -291,7 +291,8 @@ typedef struct { struct thread_struct; -struct tss_struct { +/* This is the TSS defined by the hardware. */ +struct i386_hw_tss { unsigned short back_link,__blh; unsigned long esp0; unsigned short ss0,__ss0h; @@ -315,6 +316,11 @@ struct tss_struct { unsigned short gs, __gsh; unsigned short ldt, __ldth; unsigned short trace, io_bitmap_base; +} __attribute__((packed)); + +struct tss_struct { + struct i386_hw_tss x86_tss; + /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission @@ -381,10 +387,12 @@ struct thread_struct { * be within the limit. */ #define INIT_TSS { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ + .x86_tss = { \ + .esp0 = sizeof(init_stack) + (long)&init_stack, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ + }, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } @@ -493,10 +501,10 @@ static inline void rep_nop(void) static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) { - tss->esp0 = thread->esp0; + tss->x86_tss.esp0 = thread->esp0; /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->ss1 != thread->sysenter_cs)) { - tss->ss1 = thread->sysenter_cs; + if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { + tss->x86_tss.ss1 = thread->sysenter_cs; wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); } } |