From 7c17e70613d2c70f9d5d0b5d37126fd5e8e9b728 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 2 May 2007 19:27:07 +0200 Subject: [PATCH] x86-64: Get rid of dead code in suspend resume o Get rid of dead code in wakeup.S o We never restore from saved_gdt, saved_idt, saved_ltd, saved_tss, saved_cr3, saved_cr4, saved_cr0, real_save_gdt, saved_efer, saved_efer2. Get rid of of associated code. o Get rid of bogus_magic, bogus_31_magic and bogus_magic2. No longer being used. Signed-off-by: Eric W. Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andi Kleen --- arch/x86_64/kernel/acpi/wakeup.S | 57 +--------------------------------------- 1 file changed, 1 insertion(+), 56 deletions(-) (limited to 'arch/x86_64/kernel/acpi/wakeup.S') diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index 185faa911db..6ece70e91a3 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S @@ -258,8 +258,6 @@ gdt_48a: .word 0, 0 # gdt base (filled in later) -real_save_gdt: .word 0 - .quad 0 real_magic: .quad 0 video_mode: .quad 0 video_flags: .quad 0 @@ -272,10 +270,6 @@ bogus_32_magic: movb $0xb3,%al ; outb %al,$0x80 jmp bogus_32_magic -bogus_31_magic: - movb $0xb1,%al ; outb %al,$0x80 - jmp bogus_31_magic - bogus_cpu: movb $0xbc,%al ; outb %al,$0x80 jmp bogus_cpu @@ -346,16 +340,6 @@ check_vesaa: _setbada: jmp setbada - .code64 -bogus_magic: - movw $0x0e00 + 'B', %ds:(0xb8018) - jmp bogus_magic - -bogus_magic2: - movw $0x0e00 + '2', %ds:(0xb8018) - jmp bogus_magic2 - - wakeup_stack_begin: # Stack grows down .org 0xff0 @@ -373,28 +357,11 @@ ENTRY(wakeup_end) # # Returned address is location of code in low memory (past data and stack) # + .code64 ENTRY(acpi_copy_wakeup_routine) pushq %rax - pushq %rcx pushq %rdx - sgdt saved_gdt - sidt saved_idt - sldt saved_ldt - str saved_tss - - movq %cr3, %rdx - movq %rdx, saved_cr3 - movq %cr4, %rdx - movq %rdx, saved_cr4 - movq %cr0, %rdx - movq %rdx, saved_cr0 - sgdt real_save_gdt - wakeup_start (,%rdi) - movl $MSR_EFER, %ecx - rdmsr - movl %eax, saved_efer - movl %edx, saved_efer2 - movl saved_video_mode, %edx movl %edx, video_mode - wakeup_start (,%rdi) movl acpi_video_flags, %edx @@ -407,17 +374,8 @@ ENTRY(acpi_copy_wakeup_routine) cmpl $0x9abcdef0, %eax jne bogus_32_magic - # make sure %cr4 is set correctly (features, etc) - movl saved_cr4 - __START_KERNEL_map, %eax - movq %rax, %cr4 - - movl saved_cr0 - __START_KERNEL_map, %eax - movq %rax, %cr0 - jmp 1f # Flush pipelines -1: # restore the regs we used popq %rdx - popq %rcx popq %rax ENTRY(do_suspend_lowlevel_s4bios) ret @@ -512,16 +470,3 @@ ENTRY(saved_eip) .quad 0 ENTRY(saved_esp) .quad 0 ENTRY(saved_magic) .quad 0 - -ALIGN -# saved registers -saved_gdt: .quad 0,0 -saved_idt: .quad 0,0 -saved_ldt: .quad 0 -saved_tss: .quad 0 - -saved_cr0: .quad 0 -saved_cr3: .quad 0 -saved_cr4: .quad 0 -saved_efer: .quad 0 -saved_efer2: .quad 0 -- cgit From 7db681d7e4038ad205b5face5cf7f7815633e1b5 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 2 May 2007 19:27:07 +0200 Subject: [PATCH] x86-64: wakeup.S rename registers to reflect right names o Use appropriate names for 64bit regsiters. Signed-off-by: Eric W. Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andi Kleen --- arch/x86_64/kernel/acpi/wakeup.S | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'arch/x86_64/kernel/acpi/wakeup.S') diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index 6ece70e91a3..17dbeff64ee 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S @@ -211,16 +211,16 @@ wakeup_long64: movw %ax, %es movw %ax, %fs movw %ax, %gs - movq saved_esp, %rsp + movq saved_rsp, %rsp movw $0x0e00 + 'x', %ds:(0xb8018) - movq saved_ebx, %rbx - movq saved_edi, %rdi - movq saved_esi, %rsi - movq saved_ebp, %rbp + movq saved_rbx, %rbx + movq saved_rdi, %rdi + movq saved_rsi, %rsi + movq saved_rbp, %rbp movw $0x0e00 + '!', %ds:(0xb801a) - movq saved_eip, %rax + movq saved_rip, %rax jmp *%rax .code32 @@ -408,13 +408,13 @@ do_suspend_lowlevel: movq %r15, saved_context_r15(%rip) pushfq ; popq saved_context_eflags(%rip) - movq $.L97, saved_eip(%rip) + movq $.L97, saved_rip(%rip) - movq %rsp,saved_esp - movq %rbp,saved_ebp - movq %rbx,saved_ebx - movq %rdi,saved_edi - movq %rsi,saved_esi + movq %rsp,saved_rsp + movq %rbp,saved_rbp + movq %rbx,saved_rbx + movq %rdi,saved_rdi + movq %rsi,saved_rsi addq $8, %rsp movl $3, %edi @@ -461,12 +461,12 @@ do_suspend_lowlevel: .data ALIGN -ENTRY(saved_ebp) .quad 0 -ENTRY(saved_esi) .quad 0 -ENTRY(saved_edi) .quad 0 -ENTRY(saved_ebx) .quad 0 +ENTRY(saved_rbp) .quad 0 +ENTRY(saved_rsi) .quad 0 +ENTRY(saved_rdi) .quad 0 +ENTRY(saved_rbx) .quad 0 -ENTRY(saved_eip) .quad 0 -ENTRY(saved_esp) .quad 0 +ENTRY(saved_rip) .quad 0 +ENTRY(saved_rsp) .quad 0 ENTRY(saved_magic) .quad 0 -- cgit From 275f55170ec2b5d777b070cb8ab9e5d58e65a2a8 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 2 May 2007 19:27:07 +0200 Subject: [PATCH] x86-64: wakeup.S misc cleanups o Various cleanups. One of the main purpose of cleanups is that make wakeup.S as close as possible to trampoline.S. o Following are the changes - Indentations for comments. - Changed the gdt table to compact form and to resemble the one in trampoline.S - Take the jump to 32bit from real mode using ljmpl. Makes code more readable. - After enabling long mode, directly take a long jump for 64bit mode. No need to take an extra jump to "reach_comaptibility_mode" - Stack is not used after real mode. So don't load stack in 32 bit mode. - No need to enable PGE here. - No need to do extra EFER read, anyway we trash the read contents. - No need to enable system call (EFER_SCE). Anyway it will be enabled when original EFER is restored. - No need to set MP, ET, NE, WP, AM bits in cr0. Very soon we will reload the original cr0 while restroing the processor state. Signed-off-by: Eric W. Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andi Kleen --- arch/x86_64/kernel/acpi/wakeup.S | 112 ++++++++++++++------------------------- 1 file changed, 40 insertions(+), 72 deletions(-) (limited to 'arch/x86_64/kernel/acpi/wakeup.S') diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index 17dbeff64ee..bd4c6f1a6f3 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S @@ -30,11 +30,12 @@ wakeup_code: cld # setup data segment movw %cs, %ax - movw %ax, %ds # Make ds:0 point to wakeup_start + movw %ax, %ds # Make ds:0 point to wakeup_start movw %ax, %ss - mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board + # Private stack is needed for ASUS board + mov $(wakeup_stack - wakeup_code), %sp - pushl $0 # Kill any dangerous flags + pushl $0 # Kill any dangerous flags popfl movl real_magic - wakeup_code, %eax @@ -45,7 +46,7 @@ wakeup_code: jz 1f lcall $0xc000,$3 movw %cs, %ax - movw %ax, %ds # Bios might have played with that + movw %ax, %ds # Bios might have played with that movw %ax, %ss 1: @@ -75,9 +76,12 @@ wakeup_code: jmp 1f 1: - .byte 0x66, 0xea # prefix + jmpi-opcode - .long wakeup_32 - __START_KERNEL_map - .word __KERNEL_CS + ljmpl *(wakeup_32_vector - wakeup_code) + + .balign 4 +wakeup_32_vector: + .long wakeup_32 - __START_KERNEL_map + .word __KERNEL32_CS, 0 .code32 wakeup_32: @@ -96,65 +100,50 @@ wakeup_32: jnc bogus_cpu movl %edx,%edi - movw $__KERNEL_DS, %ax - movw %ax, %ds - movw %ax, %es - movw %ax, %fs - movw %ax, %gs + movl $__KERNEL_DS, %eax + movl %eax, %ds - movw $__KERNEL_DS, %ax - movw %ax, %ss - - mov $(wakeup_stack - __START_KERNEL_map), %esp movl saved_magic - __START_KERNEL_map, %eax cmpl $0x9abcdef0, %eax jne bogus_32_magic + movw $0x0e00 + 'i', %ds:(0xb8012) + movb $0xa8, %al ; outb %al, $0x80; + /* * Prepare for entering 64bits mode */ - /* Enable PAE mode and PGE */ + /* Enable PAE */ xorl %eax, %eax btsl $5, %eax - btsl $7, %eax movl %eax, %cr4 /* Setup early boot stage 4 level pagetables */ movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax movl %eax, %cr3 - /* Setup EFER (Extended Feature Enable Register) */ - movl $MSR_EFER, %ecx - rdmsr - /* Fool rdmsr and reset %eax to avoid dependences */ - xorl %eax, %eax /* Enable Long Mode */ + xorl %eax, %eax btsl $_EFER_LME, %eax - /* Enable System Call */ - btsl $_EFER_SCE, %eax - /* No Execute supported? */ + /* No Execute supported? */ btl $20,%edi jnc 1f btsl $_EFER_NX, %eax -1: /* Make changes effective */ +1: movl $MSR_EFER, %ecx + xorl %edx, %edx wrmsr - wbinvd xorl %eax, %eax btsl $31, %eax /* Enable paging and in turn activate Long Mode */ btsl $0, %eax /* Enable protected mode */ - btsl $1, %eax /* Enable MP */ - btsl $4, %eax /* Enable ET */ - btsl $5, %eax /* Enable NE */ - btsl $16, %eax /* Enable WP */ - btsl $18, %eax /* Enable AM */ /* Make changes effective */ movl %eax, %cr0 + /* At this point: CR4.PAE must be 1 CS.L must be 0 @@ -162,11 +151,6 @@ wakeup_32: Next instruction must be a branch This must be on identity-mapped page */ - jmp reach_compatibility_mode -reach_compatibility_mode: - movw $0x0e00 + 'i', %ds:(0xb8012) - movb $0xa8, %al ; outb %al, $0x80; - /* * At this point we're in long mode but in 32bit compatibility mode * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn @@ -174,24 +158,19 @@ reach_compatibility_mode: * the new gdt/idt that has __KERNEL_CS with CS.L = 1. */ - movw $0x0e00 + 'n', %ds:(0xb8014) - movb $0xa9, %al ; outb %al, $0x80 - - /* Load new GDT with the 64bit segment using 32bit descriptor */ - movl $(pGDT32 - __START_KERNEL_map), %eax - lgdt (%eax) - - movl $(wakeup_jumpvector - __START_KERNEL_map), %eax /* Finally jump in 64bit mode */ - ljmp *(%eax) + ljmp *(wakeup_long64_vector - __START_KERNEL_map) -wakeup_jumpvector: - .long wakeup_long64 - __START_KERNEL_map - .word __KERNEL_CS + .balign 4 +wakeup_long64_vector: + .long wakeup_long64 - __START_KERNEL_map + .word __KERNEL_CS, 0 .code64 - /* Hooray, we are in Long 64-bit mode (but still running in low memory) */ + /* Hooray, we are in Long 64-bit mode (but still running in + * low memory) + */ wakeup_long64: /* * We must switch to a new descriptor in kernel space for the GDT @@ -201,6 +180,9 @@ wakeup_long64: */ lgdt cpu_gdt_descr - __START_KERNEL_map + movw $0x0e00 + 'n', %ds:(0xb8014) + movb $0xa9, %al ; outb %al, $0x80 + movw $0x0e00 + 'u', %ds:(0xb8016) nop @@ -227,33 +209,19 @@ wakeup_long64: .align 64 gdta: + /* Its good to keep gdt in sync with one in trampoline.S */ .word 0, 0, 0, 0 # dummy - - .word 0, 0, 0, 0 # unused - - .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb) - .word 0 # base address = 0 - .word 0x9B00 # code read/exec. ??? Why I need 0x9B00 (as opposed to 0x9A00 in order for this to work?) - .word 0x00CF # granularity = 4096, 386 - # (+5th nibble of limit) - - .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb) - .word 0 # base address = 0 - .word 0x9200 # data read/write - .word 0x00CF # granularity = 4096, 386 - # (+5th nibble of limit) -# this is 64bit descriptor for code - .word 0xFFFF - .word 0 - .word 0x9A00 # code read/exec - .word 0x00AF # as above, but it is long mode and with D=0 + /* ??? Why I need the accessed bit set in order for this to work? */ + .quad 0x00cf9b000000ffff # __KERNEL32_CS + .quad 0x00af9b000000ffff # __KERNEL_CS + .quad 0x00cf93000000ffff # __KERNEL_DS idt_48a: .word 0 # idt limit = 0 .word 0, 0 # idt base = 0L gdt_48a: - .word 0x8000 # gdt limit=2048, + .word 0x800 # gdt limit=2048, # 256 GDT entries .word 0, 0 # gdt base (filled in later) @@ -263,7 +231,7 @@ video_mode: .quad 0 video_flags: .quad 0 bogus_real_magic: - movb $0xba,%al ; outb %al,$0x80 + movb $0xba,%al ; outb %al,$0x80 jmp bogus_real_magic bogus_32_magic: -- cgit From d8e1baf10d62c06fc52e89137357e54da3d92672 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 2 May 2007 19:27:07 +0200 Subject: [PATCH] x86-64: 64bit ACPI wakeup trampoline o Moved wakeup_level4_pgt into the wakeup routine so we can run the kernel above 4G. o Now we first go to 64bit mode and continue to run from trampoline and then then start accessing kernel symbols and restore processor context. This enables us to resume even in relocatable kernel context when kernel might not be loaded at physical addr it has been compiled for. o Removed the need for modifying any existing kernel page table. o Increased the size of the wakeup routine to 8K. This is required as wake page tables are on trampoline itself and they got to be at 4K boundary, hence one page is not sufficient. Signed-off-by: Eric W. Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andi Kleen --- arch/x86_64/kernel/acpi/wakeup.S | 59 +++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 22 deletions(-) (limited to 'arch/x86_64/kernel/acpi/wakeup.S') diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index bd4c6f1a6f3..766cfbcac1d 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S @@ -1,6 +1,7 @@ .text #include #include +#include #include #include @@ -62,12 +63,15 @@ wakeup_code: movb $0xa2, %al ; outb %al, $0x80 - lidt %ds:idt_48a - wakeup_code - xorl %eax, %eax - movw %ds, %ax # (Convert %ds:gdt to a linear ptr) - shll $4, %eax - addl $(gdta - wakeup_code), %eax - movl %eax, gdt_48a +2 - wakeup_code + mov %ds, %ax # Find 32bit wakeup_code addr + movzx %ax, %esi # (Convert %ds:gdt to a liner ptr) + shll $4, %esi + # Fix up the vectors + addl %esi, wakeup_32_vector - wakeup_code + addl %esi, wakeup_long64_vector - wakeup_code + addl %esi, gdt_48a + 2 - wakeup_code # Fixup the gdt pointer + + lidtl %ds:idt_48a - wakeup_code lgdtl %ds:gdt_48a - wakeup_code # load gdt with whatever is # appropriate @@ -80,7 +84,7 @@ wakeup_code: .balign 4 wakeup_32_vector: - .long wakeup_32 - __START_KERNEL_map + .long wakeup_32 - wakeup_code .word __KERNEL32_CS, 0 .code32 @@ -103,10 +107,6 @@ wakeup_32: movl $__KERNEL_DS, %eax movl %eax, %ds - movl saved_magic - __START_KERNEL_map, %eax - cmpl $0x9abcdef0, %eax - jne bogus_32_magic - movw $0x0e00 + 'i', %ds:(0xb8012) movb $0xa8, %al ; outb %al, $0x80; @@ -120,7 +120,7 @@ wakeup_32: movl %eax, %cr4 /* Setup early boot stage 4 level pagetables */ - movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax + leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax movl %eax, %cr3 /* Enable Long Mode */ @@ -159,11 +159,11 @@ wakeup_32: */ /* Finally jump in 64bit mode */ - ljmp *(wakeup_long64_vector - __START_KERNEL_map) + ljmp *(wakeup_long64_vector - wakeup_code)(%esi) .balign 4 wakeup_long64_vector: - .long wakeup_long64 - __START_KERNEL_map + .long wakeup_long64 - wakeup_code .word __KERNEL_CS, 0 .code64 @@ -178,11 +178,16 @@ wakeup_long64: * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ - lgdt cpu_gdt_descr - __START_KERNEL_map + lgdt cpu_gdt_descr movw $0x0e00 + 'n', %ds:(0xb8014) movb $0xa9, %al ; outb %al, $0x80 + movq saved_magic, %rax + movq $0x123456789abcdef0, %rdx + cmpq %rdx, %rax + jne bogus_64_magic + movw $0x0e00 + 'u', %ds:(0xb8016) nop @@ -223,20 +228,21 @@ idt_48a: gdt_48a: .word 0x800 # gdt limit=2048, # 256 GDT entries - .word 0, 0 # gdt base (filled in later) - + .long gdta - wakeup_code # gdt base (relocated in later) real_magic: .quad 0 video_mode: .quad 0 video_flags: .quad 0 +.code16 bogus_real_magic: movb $0xba,%al ; outb %al,$0x80 jmp bogus_real_magic -bogus_32_magic: +.code64 +bogus_64_magic: movb $0xb3,%al ; outb %al,$0x80 - jmp bogus_32_magic + jmp bogus_64_magic bogus_cpu: movb $0xbc,%al ; outb %al,$0x80 @@ -263,6 +269,7 @@ bogus_cpu: #define VIDEO_FIRST_V7 0x0900 # Setting of user mode (AX=mode ID) => CF=success +.code16 mode_seta: movw %ax, %bx #if 0 @@ -313,6 +320,13 @@ wakeup_stack_begin: # Stack grows down .org 0xff0 wakeup_stack: # Just below end of page +.org 0x1000 +ENTRY(wakeup_level4_pgt) + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .fill 510,8,0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE + ENTRY(wakeup_end) ## @@ -338,9 +352,10 @@ ENTRY(acpi_copy_wakeup_routine) movq $0x123456789abcdef0, %rdx movq %rdx, saved_magic - movl saved_magic - __START_KERNEL_map, %eax - cmpl $0x9abcdef0, %eax - jne bogus_32_magic + movq saved_magic, %rax + movq $0x123456789abcdef0, %rdx + cmpq %rdx, %rax + jne bogus_64_magic # restore the regs we used popq %rdx -- cgit From a4831e08b7f3ed51623c9eb25e8c945b76b3eda3 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 2 May 2007 19:27:08 +0200 Subject: [PATCH] x86-64: Move cpu verification code to common file o This patch moves the code to verify long mode and SSE to a common file. This code is now shared by trampoline.S, wakeup.S, boot/setup.S and boot/compressed/head.S o So far we used to do very limited check in trampoline.S, wakeup.S and in 32bit entry point. Now all the entry paths are forced to do the exhaustive check, including SSE because verify_cpu is shared. o I am keeping this patch as last in the x86 relocatable series because previous patches have got quite some amount of testing done and don't want to distrub that. So that if there is problem introduced by this patch, at least it can be easily isolated. Signed-off-by: Eric W. Biederman Signed-off-by: Vivek Goyal Signed-off-by: Andi Kleen --- arch/x86_64/kernel/acpi/wakeup.S | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'arch/x86_64/kernel/acpi/wakeup.S') diff --git a/arch/x86_64/kernel/acpi/wakeup.S b/arch/x86_64/kernel/acpi/wakeup.S index 766cfbcac1d..8550a6ffa27 100644 --- a/arch/x86_64/kernel/acpi/wakeup.S +++ b/arch/x86_64/kernel/acpi/wakeup.S @@ -43,6 +43,11 @@ wakeup_code: cmpl $0x12345678, %eax jne bogus_real_magic + call verify_cpu # Verify the cpu supports long + # mode + testl %eax, %eax + jnz no_longmode + testl $1, video_flags - wakeup_code jz 1f lcall $0xc000,$3 @@ -92,18 +97,6 @@ wakeup_32: # Running in this code, but at low address; paging is not yet turned on. movb $0xa5, %al ; outb %al, $0x80 - /* Check if extended functions are implemented */ - movl $0x80000000, %eax - cpuid - cmpl $0x80000000, %eax - jbe bogus_cpu - wbinvd - mov $0x80000001, %eax - cpuid - btl $29, %edx - jnc bogus_cpu - movl %edx,%edi - movl $__KERNEL_DS, %eax movl %eax, %ds @@ -123,6 +116,11 @@ wakeup_32: leal (wakeup_level4_pgt - wakeup_code)(%esi), %eax movl %eax, %cr3 + /* Check if nx is implemented */ + movl $0x80000001, %eax + cpuid + movl %edx,%edi + /* Enable Long Mode */ xorl %eax, %eax btsl $_EFER_LME, %eax @@ -244,10 +242,12 @@ bogus_64_magic: movb $0xb3,%al ; outb %al,$0x80 jmp bogus_64_magic -bogus_cpu: - movb $0xbc,%al ; outb %al,$0x80 - jmp bogus_cpu +.code16 +no_longmode: + movb $0xbc,%al ; outb %al,$0x80 + jmp no_longmode +#include "../verify_cpu.S" /* This code uses an extended set of video mode numbers. These include: * Aliases for standard modes -- cgit