Changeset 1283 in vbox
- Timestamp:
- Mar 7, 2007 12:02:11 AM (18 years ago)
- svn:sync-xref-src-repo-rev:
- 19236
- Location:
- trunk
- Files:
-
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r1190 r1283 538 538 DEFS.amd64 = __AMD64__ 539 539 DEFS.darwin = __DARWIN__ 540 DEFS.darwin.x86 = VBOX_WITH_HYBIRD_32BIT_KERNEL 540 541 DEFS.freebsd = __FREEBSD__ 541 542 DEFS.l4 = __L4__ __L4ENV__ L4API_l4v2 __NO_CTYPE _FILE_OFFSET_BITS=64 -
trunk/include/VBox/vm.h
r686 r1283 355 355 struct CPUM s; 356 356 #endif 357 #ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 358 char padding[3584]; /* multiple of 32 */ 359 #else 357 360 char padding[HC_ARCH_BITS == 32 ? 3424 : 3552]; /* multiple of 32 */ 361 #endif 358 362 } cpum; 359 363 -
trunk/include/VBox/vm.mac
r166 r1283 82 82 83 83 alignb 32 84 %if HC_ARCH_BITS == 32 84 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 85 .cpum resb 3584 86 %elif HC_ARCH_BITS == 32 85 87 .cpum resb 3424 86 88 %else -
trunk/include/VBox/x86.h
r1 r1283 1987 1987 /** @} */ 1988 1988 1989 1989 #pragma pack(1) 1990 /** 1991 * 32-bit IDTR/GDTR. 1992 */ 1993 typedef struct X86XDTR32 1994 { 1995 /** Size of the descriptor table. */ 1996 uint16_t cb; 1997 /** Address of the descriptor table. */ 1998 uint32_t uAddr; 1999 } X86XDTR32, *PX86XDTR32; 2000 #pragma pack() 2001 2002 #pragma pack(1) 2003 /** 2004 * 64-bit IDTR/GDTR. 2005 */ 2006 typedef struct X86XDTR64 2007 { 2008 /** Size of the descriptor table. */ 2009 uint16_t cb; 2010 /** Address of the descriptor table. */ 2011 uint64_t uAddr; 2012 } X86XDTR64, *PX86XDTR64; 2013 #pragma pack() 1990 2014 1991 2015 /** @} */ -
trunk/src/VBox/VMM/CPUM.cpp
r1212 r1283 845 845 */ 846 846 #if HC_ARCH_BITS == 32 847 pHlp->pfnPrintf(pHlp, 848 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n" 849 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n" 850 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n" 851 "cr0=%08x cr2=xxxxxxxx cr3=%08x cr4=%08x gdtr=%08x:%04x ldtr=%04x\n" 852 "dr0=%08x dr1=%08x dr2=%08x dr3=%08x dr6=%08x dr7=%08x\n" 853 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 854 , 855 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi, 856 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags, 857 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl, 858 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4, 859 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7, 860 pCtx->gdtr.pGdt, pCtx->gdtr.cbGdt, (RTSEL)pCtx->ldtr, 861 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp); 862 #else /* 64-bit */ 863 pHlp->pfnPrintf(pHlp, 864 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n" 865 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n" 866 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n" 867 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n" 868 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n" 869 "r14=%016RX64 r15=%016RX64\n" 870 "iopl=%d %31s\n" 871 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n" 872 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n" 873 "cr4=%016RX64 cr8=%016RX64 ldtr=%04x tr=%04x\n" 874 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64\n" 875 "dr3=%016RX64 dr6=%016RX64 dr7=%016RX64\n" 876 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n" 877 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 878 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n" 879 , 880 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx, 881 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi, 882 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp, 883 /*pCtx->r8, pCtx->r9,*/ pCtx->r10, 884 pCtx->r11, pCtx->r12, pCtx->r13, 885 pCtx->r14, pCtx->r15, 886 X86_EFL_GET_IOPL(efl), szEFlags, 887 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl, 888 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, 889 pCtx->cr4, pCtx->cr8, pCtx->ldtr, pCtx->tr, 890 pCtx->dr0, pCtx->dr1, pCtx->dr2, 891 pCtx->dr3, pCtx->dr6, pCtx->dr7, 892 *(uint64_t *)&pCtx->gdtr[2], *(uint16_t *)&pCtx->gdtr[0], *(uint64_t *)&pCtx->idtr[2], *(uint16_t *)&pCtx->idtr[0], 893 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp, 894 pCtx->FSbase, pCtx->GSbase, pCtx->efer); 847 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 848 if (!(pCtx->efer & MSR_K6_EFER_LMA)) 849 # endif 850 { 851 pHlp->pfnPrintf(pHlp, 852 "eax=xxxxxxxx ebx=%08x ecx=xxxxxxxx edx=xxxxxxxx esi=%08x edi=%08x\n" 853 "eip=xxxxxxxx esp=%08x ebp=%08x iopl=%d %31s\n" 854 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08x\n" 855 "cr0=%08x cr2=xxxxxxxx cr3=%08x cr4=%08x gdtr=%08x:%04x ldtr=%04x\n" 856 "dr0=%08x dr1=%08x dr2=%08x dr3=%08x dr6=%08x dr7=%08x\n" 857 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 858 , 859 /*pCtx->eax,*/ pCtx->ebx, /*pCtx->ecx, pCtx->edx,*/ pCtx->esi, pCtx->edi, 860 /*pCtx->eip,*/ pCtx->esp, pCtx->ebp, X86_EFL_GET_IOPL(efl), szEFlags, 861 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl, 862 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, pCtx->cr4, 863 pCtx->dr0, pCtx->dr1, pCtx->dr2, pCtx->dr3, pCtx->dr6, pCtx->dr7, 864 (uint32_t)pCtx->gdtr.uAddr, pCtx->gdtr.cb, (RTSEL)pCtx->ldtr, 865 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp); 866 } 867 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 868 else 869 # endif 870 #endif 871 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 872 { 873 pHlp->pfnPrintf(pHlp, 874 "rax=xxxxxxxxxxxxxxxx rbx=%016RX64 rcx=xxxxxxxxxxxxxxxx\n" 875 "rdx=xxxxxxxxxxxxxxxx rsi=%016RX64 rdi=%016RX64\n" 876 "rip=xxxxxxxxxxxxxxxx rsp=%016RX64 rbp=%016RX64\n" 877 " r8=xxxxxxxxxxxxxxxx r9=xxxxxxxxxxxxxxxx r10=%016RX64\n" 878 "r11=%016RX64 r12=%016RX64 r13=%016RX64\n" 879 "r14=%016RX64 r15=%016RX64\n" 880 "iopl=%d %31s\n" 881 "cs=%04x ds=%04x es=%04x fs=%04x gs=%04x eflags=%08RX64\n" 882 "cr0=%016RX64 cr2=xxxxxxxxxxxxxxxx cr3=%016RX64\n" 883 "cr4=%016RX64 cr8=%016RX64 ldtr=%04x tr=%04x\n" 884 "dr0=%016RX64 dr1=%016RX64 dr2=%016RX64\n" 885 "dr3=%016RX64 dr6=%016RX64 dr7=%016RX64\n" 886 "gdtr=%016RX64:%04x idtr=%016RX64:%04x\n" 887 "SysEnter={cs=%04x eip=%08x esp=%08x}\n" 888 "FSbase=%016RX64 GSbase=%016RX64 efer=%08RX64\n" 889 , 890 /*pCtx->rax,*/ pCtx->rbx, /*pCtx->rcx, 891 pCtx->rdx,*/ pCtx->rsi, pCtx->rdi, 892 /*pCtx->rip,*/ pCtx->rsp, pCtx->rbp, 893 /*pCtx->r8, pCtx->r9,*/ pCtx->r10, 894 pCtx->r11, pCtx->r12, pCtx->r13, 895 pCtx->r14, pCtx->r15, 896 X86_EFL_GET_IOPL(efl), szEFlags, 897 (RTSEL)pCtx->cs, (RTSEL)pCtx->ds, (RTSEL)pCtx->es, (RTSEL)pCtx->fs, (RTSEL)pCtx->gs, efl, 898 pCtx->cr0, /*pCtx->cr2,*/ pCtx->cr3, 899 pCtx->cr4, pCtx->cr8, pCtx->ldtr, pCtx->tr, 900 pCtx->dr0, pCtx->dr1, pCtx->dr2, 901 pCtx->dr3, pCtx->dr6, pCtx->dr7, 902 pCtx->gdtr.uAddr, pCtx->gdtr.cb, pCtx->idtr.uAddr, pCtx->idtr.cb, 903 pCtx->SysEnter.cs, pCtx->SysEnter.eip, pCtx->SysEnter.esp, 904 pCtx->FSbase, pCtx->GSbase, pCtx->efer); 905 } 895 906 #endif 896 907 } -
trunk/src/VBox/VMM/CPUMInternal.h
r464 r1283 79 79 /** 80 80 * The save host CPU state. 81 * 82 * @remark The special VBOX_WITH_HYBIRD_32BIT_KERNEL checks here are for the 10.4.x series 83 * of Mac OS X where the OS is essentially 32-bit but the cpu mode can be 64-bit. 81 84 */ 82 85 typedef struct CPUMHOSTCTX … … 86 89 X86FXSTATE fpu; 87 90 88 #if HC_ARCH_BITS == 3289 91 /** General purpose register, selectors, flags and more 90 92 * @{ */ 91 //uint32_t eax; - scratch 92 uint32_t ebx; 93 //uint32_t ecx; - scratch 94 //uint32_t edx; - scratch 95 uint32_t edi; 96 uint32_t esi; 97 uint32_t ebp; 98 /* lss pair */ 99 uint32_t esp; 100 RTSEL ss; 101 RTSEL ssPadding; 102 RTSEL gs; 103 RTSEL gsPadding; 104 RTSEL fs; 105 RTSEL fsPadding; 106 RTSEL es; 107 RTSEL esPadding; 108 RTSEL ds; 109 RTSEL dsPadding; 110 RTSEL cs; 111 RTSEL csPadding; 112 X86EFLAGS eflags; 113 //uint32_t eip; - scratch 114 /** @} */ 115 116 /** Control registers. 117 * @{ */ 118 uint32_t cr0; 119 //uint32_t cr2; - scratch 120 uint32_t cr3; 121 uint32_t cr4; 122 /** @} */ 123 124 /** Debug registers. 125 * @{ */ 126 uint32_t dr0; 127 uint32_t dr1; 128 uint32_t dr2; 129 uint32_t dr3; 130 uint32_t dr6; 131 uint32_t dr7; 132 /** @} */ 133 134 /** Global Descriptor Table register. */ 135 VBOXGDTR gdtr; 136 uint16_t gdtrPadding; 137 /** Interrupt Descriptor Table register. */ 138 VBOXIDTR idtr; 139 uint16_t idtrPadding; 140 /** The task register. */ 141 RTSEL ldtr; 142 RTSEL ldtrPadding; 143 /** The task register. */ 144 RTSEL tr; 145 RTSEL trPadding; 146 uint32_t SysEnterPadding; 147 148 /** The sysenter msr registers. 149 * This member is not used by the hypervisor context. */ 150 CPUMSYSENTER SysEnter; 151 152 /* padding to get 32byte aligned size */ 153 uint8_t auPadding[24]; 154 155 #elif HC_ARCH_BITS == 64 93 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 156 94 /** General purpose register ++ 157 95 * { */ … … 174 112 //uint64_t rip; - scratch 175 113 uint64_t rflags; 114 #endif 115 116 #if HC_ARCH_BITS == 32 117 //uint32_t eax; - scratch 118 uint32_t ebx; 119 //uint32_t ecx; - scratch 120 //uint32_t edx; - scratch 121 uint32_t edi; 122 uint32_t esi; 123 uint32_t ebp; 124 X86EFLAGS eflags; 125 //uint32_t eip; - scratch 126 /* lss pair! */ 127 uint32_t esp; 128 #endif 176 129 /** @} */ 177 130 … … 192 145 /** @} */ 193 146 147 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 194 148 /** Control registers. 195 149 * @{ */ 196 uint64_t cr0; 197 //uint64_t cr2; - scratch 198 uint64_t cr3; 199 uint64_t cr4; 200 uint64_t cr8; 150 uint32_t cr0; 151 //uint32_t cr2; - scratch 152 uint32_t cr3; 153 uint32_t cr4; 201 154 /** @} */ 202 155 203 156 /** Debug registers. 204 157 * @{ */ 205 uint 64_t dr0;206 uint 64_t dr1;207 uint 64_t dr2;208 uint 64_t dr3;209 uint 64_t dr6;210 uint 64_t dr7;158 uint32_t dr0; 159 uint32_t dr1; 160 uint32_t dr2; 161 uint32_t dr3; 162 uint32_t dr6; 163 uint32_t dr7; 211 164 /** @} */ 212 165 213 166 /** Global Descriptor Table register. */ 214 uint8_t gdtr[10]; //X86GDTR64167 X86XDTR32 gdtr; 215 168 uint16_t gdtrPadding; 216 169 /** Interrupt Descriptor Table register. */ 217 uint8_t idtr[10]; //X86IDTR64170 X86XDTR32 idtr; 218 171 uint16_t idtrPadding; 219 172 /** The task register. */ … … 223 176 RTSEL tr; 224 177 RTSEL trPadding; 178 uint32_t SysEnterPadding; 179 180 /** The sysenter msr registers. 181 * This member is not used by the hypervisor context. */ 182 CPUMSYSENTER SysEnter; 183 184 /* padding to get 32byte aligned size */ 185 uint8_t auPadding[24]; 186 187 #elif HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 188 189 /** Control registers. 190 * @{ */ 191 uint64_t cr0; 192 //uint64_t cr2; - scratch 193 uint64_t cr3; 194 uint64_t cr4; 195 uint64_t cr8; 196 /** @} */ 197 198 /** Debug registers. 199 * @{ */ 200 uint64_t dr0; 201 uint64_t dr1; 202 uint64_t dr2; 203 uint64_t dr3; 204 uint64_t dr6; 205 uint64_t dr7; 206 /** @} */ 207 208 /** Global Descriptor Table register. */ 209 X86XDTR64 gdtr; 210 uint16_t gdtrPadding; 211 /** Interrupt Descriptor Table register. */ 212 X86XDTR64 idtr; 213 uint16_t idtrPadding; 214 /** The task register. */ 215 RTSEL ldtr; 216 RTSEL ldtrPadding; 217 /** The task register. */ 218 RTSEL tr; 219 RTSEL trPadding; 225 220 226 221 /** MSRs … … 233 228 234 229 /* padding to get 32byte aligned size */ 235 uint8_t auPadding[8]; 230 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 231 uint8_t auPadding[16]; 232 # else 233 uint8_t auPadding[24]; 234 # endif 235 236 236 #else 237 237 # error HC_ARCH_BITS not defined -
trunk/src/VBox/VMM/CPUMInternal.mac
r464 r1283 41 41 %define FPUSTATE_SIZE 512 42 42 43 ;; if anyone figures how to do %if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) in 44 ; nasm please tell / fix this hack. 45 %ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 46 %define fVBOX_WITH_HYBIRD_32BIT_KERNEL 1 47 %else 48 %define fVBOX_WITH_HYBIRD_32BIT_KERNEL 0 49 %endif 50 43 51 struc CPUM 44 52 .offVM resd 1 … … 53 61 .Host.fpu resb 512 54 62 55 %if HC_ARCH_BITS == 32 56 ;.Host.eax resd 1 - scratch 57 .Host.ebx resd 1 58 ;.Host.edx resd 1 - scratch 59 ;.Host.ecx resd 1 - scratch 60 .Host.edi resd 1 61 .Host.esi resd 1 62 .Host.ebp resd 1 63 .Host.esp resd 1 64 .Host.ss resw 1 65 .Host.ssPadding resw 1 66 .Host.gs resw 1 67 .Host.gsPadding resw 1 68 .Host.fs resw 1 69 .Host.fsPadding resw 1 70 .Host.es resw 1 71 .Host.esPadding resw 1 72 .Host.ds resw 1 73 .Host.dsPadding resw 1 74 .Host.cs resw 1 75 .Host.csPadding resw 1 76 .Host.eflags resd 1 77 ;.Host.eip resd 1 - scratch 78 79 .Host.cr0 resd 1 80 ;.Host.cr2 resd 1 - scratch 81 .Host.cr3 resd 1 82 .Host.cr4 resd 1 83 84 .Host.dr0 resd 1 85 .Host.dr1 resd 1 86 .Host.dr2 resd 1 87 .Host.dr3 resd 1 88 .Host.dr6 resd 1 89 .Host.dr7 resd 1 90 91 .Host.gdtr resb 6 ; GDT limit + linear address 92 .Host.gdtrPadding resw 1 93 .Host.idtr resb 6 ; IDT limit + linear address 94 .Host.idtrPadding resw 1 95 .Host.ldtr resw 1 96 .Host.ldtrPadding resw 1 97 .Host.tr resw 1 98 .Host.trPadding resw 1 99 100 .Host.SysEnterPadding resd 1 101 .Host.SysEnter.cs resq 1 102 .Host.SysEnter.eip resq 1 103 .Host.SysEnter.esp resq 1 104 105 %else ; 64-bit 106 63 %if HC_ARCH_BITS == 64 || fVBOX_WITH_HYBIRD_32BIT_KERNEL 107 64 ;.Host.rax resq 1 - scratch 108 65 .Host.rbx resq 1 … … 123 80 ;.Host.rip resd 1 - scratch 124 81 .Host.rflags resq 1 125 82 %endif 83 %if HC_ARCH_BITS == 32 84 ;.Host.eax resd 1 - scratch 85 .Host.ebx resd 1 86 ;.Host.edx resd 1 - scratch 87 ;.Host.ecx resd 1 - scratch 88 .Host.edi resd 1 89 .Host.esi resd 1 90 .Host.ebp resd 1 91 .Host.eflags resd 1 92 ;.Host.eip resd 1 - scratch 93 ; lss pair! 94 .Host.esp resd 1 95 %endif 126 96 .Host.ss resw 1 127 97 .Host.ssPadding resw 1 … … 137 107 .Host.csPadding resw 1 138 108 109 %if HC_ARCH_BITS == 32 && fVBOX_WITH_HYBIRD_32BIT_KERNEL == 0 110 .Host.cr0 resd 1 111 ;.Host.cr2 resd 1 - scratch 112 .Host.cr3 resd 1 113 .Host.cr4 resd 1 114 115 .Host.dr0 resd 1 116 .Host.dr1 resd 1 117 .Host.dr2 resd 1 118 .Host.dr3 resd 1 119 .Host.dr6 resd 1 120 .Host.dr7 resd 1 121 122 .Host.gdtr resb 6 ; GDT limit + linear address 123 .Host.gdtrPadding resw 1 124 .Host.idtr resb 6 ; IDT limit + linear address 125 .Host.idtrPadding resw 1 126 .Host.ldtr resw 1 127 .Host.ldtrPadding resw 1 128 .Host.tr resw 1 129 .Host.trPadding resw 1 130 131 .Host.SysEnterPadding resd 1 132 .Host.SysEnter.cs resq 1 133 .Host.SysEnter.eip resq 1 134 .Host.SysEnter.esp resq 1 135 136 %else ; 64-bit 137 139 138 .Host.cr0 resq 1 140 139 ;.Host.cr2 resq 1 - scratch … … 159 158 .Host.trPadding resw 1 160 159 161 .Host.SysEnter.cs res b 8162 .Host.SysEnter.eip res b 8163 .Host.SysEnter.esp res b 8160 .Host.SysEnter.cs resq 1 161 .Host.SysEnter.eip resq 1 162 .Host.SysEnter.esp resq 1 164 163 .Host.FSbase resq 1 165 164 .Host.GSbase resq 1 -
trunk/src/VBox/VMM/Makefile
r988 r1283 130 130 # VMMSwitcher/AMD64ToAMD64.asm 131 131 132 ifeq ($(BUILD_TARGET),darwin) 133 # this is just a temporary hack until we switch to yasm on Mac OS X too. 134 LIBRARIES += VMMR364 135 VMMR364_TOOL = $(VBOX_GCC_TOOL) 136 VMMR364_ASTOOL = YASM 137 VMMR364_ASFLAGS = -f macho -w+orphan-labels 138 VMMR364_ASDEFS = ASM_FORMAT_MACHO $(ARCH_BITS_DEFS) __YASM__ 139 VMMR364_SOURCES.darwin.x86 += \ 140 VMMSwitcher/AMD64ToPAE.asm 141 endif 132 142 133 143 # … … 345 355 $(LIB_RUNTIME) 346 356 357 ifeq ($(BUILD_TARGET),darwin) 358 VBoxVMM_LIBS += $(TARGET_VMMR364) # temp hack 359 endif 347 360 VBoxVMM_LIBS.win = $(PATH_TOOL_$(VBOX_VCC_TOOL)_LIB)/delayimp.lib 348 361 VBoxVMM_LDFLAGS.win = /DELAYLOAD:dbghelp.dll -
trunk/src/VBox/VMM/PGM.cpp
r1268 r1283 762 762 case SUPPAGINGMODE_AMD64_NX: 763 763 case SUPPAGINGMODE_AMD64_GLOBAL_NX: 764 #ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL 764 765 if (ARCH_BITS != 64) 765 766 { … … 768 769 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 769 770 } 771 #endif 770 772 break; 771 773 default: -
trunk/src/VBox/VMM/TRPMInternal.h
r716 r1283 193 193 DECLASM(void) trpmR0DispatchHostInterrupt(RTR0UINTPTR uIP, RTSEL SelCS, RTR0UINTPTR RSP); 194 194 195 /** 196 * Issues a software interrupt to the specified interrupt vector. 197 * 198 * @param uActiveVector The vector number. 199 */ 200 DECLASM(void) trpmR0DispatchHostInterruptSimple(RTUINT uActiveVector); 201 195 202 # ifndef VBOX_WITHOUT_IDT_PATCHING 196 203 /** -
trunk/src/VBox/VMM/VMM.cpp
r1270 r1283 190 190 &vmmR3SwitcherPAEToPAE_Def, 191 191 NULL, //&vmmR3SwitcherPAEToAMD64_Def, 192 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 193 &vmmR3SwitcherAMD64ToPAE_Def, 194 # else 192 195 NULL, //&vmmR3SwitcherAMD64ToPAE_Def, 196 # endif 193 197 NULL //&vmmR3SwitcherAMD64ToAMD64_Def, 194 198 #else … … 1231 1235 } 1232 1236 1233 #ifdef __AMD64__ 1237 /* 1238 * 32-bit HC pointer fixup to (HC) target within the code (32-bit offset). 1239 */ 1240 case FIX_HC_32BIT: 1241 { 1242 uint32_t offTrg = *u.pu32++; 1243 Assert(offSrc < pSwitcher->cbCode); 1244 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); 1245 *uSrc.pu32 = (uintptr_t)pu8CodeR0 + offTrg; 1246 break; 1247 } 1248 1249 #if defined(__AMD64__) || defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 1234 1250 /* 1235 1251 * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset). … … 1241 1257 Assert(offTrg - pSwitcher->offHCCode0 < pSwitcher->cbHCCode0 || offTrg - pSwitcher->offHCCode1 < pSwitcher->cbHCCode1); 1242 1258 *uSrc.pu64 = (uintptr_t)pu8CodeR0 + offTrg; 1259 break; 1260 } 1261 1262 /* 1263 * 64-bit HC Code Selector (no argument). 1264 */ 1265 case FIX_HC_64BIT_CS: 1266 { 1267 Assert(offSrc < pSwitcher->cbCode); 1268 #if defined(__DARWIN__) && defined(VBOX_WITH_HYBIRD_32BIT_KERNEL) 1269 *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */ 1270 #else 1271 AssertFatalMsg(("FIX_HC_64BIT_CS not implemented for this host\n")); 1272 #endif 1243 1273 break; 1244 1274 } -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r23 r1283 58 58 pVM->hwaccm.s.vmx.fSupported = false;; 59 59 pVM->hwaccm.s.svm.fSupported = false;; 60 61 #ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL /* paranoia */ 60 62 61 63 /* … … 160 162 } 161 163 } 164 #endif /* !VBOX_WITH_HYBIRD_32BIT_KERNEL */ 165 162 166 return VINF_SUCCESS; 163 167 } -
trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp
r415 r1283 46 46 AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector)); 47 47 48 #ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 49 /* 50 * Check if we're in long mode or not. 51 */ 52 if ( (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) 53 && (ASMRdMsr(MSR_K6_EFER) & MSR_K6_EFER_LMA)) 54 { 55 trpmR0DispatchHostInterruptSimple(uActiveVector); 56 return; 57 } 58 #endif 59 48 60 /* 49 61 * Get the handler pointer (16:32 ptr) / (16:48 ptr). … … 77 89 if (pIdte->au32[1] & 0x7 /*IST*/) 78 90 { 79 /** @todo implement IST */ 91 trpmR0DispatchHostInterruptSimple(uActiveVector); 92 return; 80 93 } 81 94 … … 88 101 } 89 102 103 90 104 #ifndef VBOX_WITHOUT_IDT_PATCHING 105 # ifdef VBOX_WITH_HYBIRD_32BIT_KERNEL 106 # error "VBOX_WITH_HYBIRD_32BIT_KERNEL without VBOX_WITHOUT_IDT_PATCHING isn't supported" 107 # endif 91 108 92 109 /** -
trunk/src/VBox/VMM/VMMR0/TRPMR0A.asm
r424 r1283 162 162 %endif ; !VBOX_WITHOUT_IDT_PATCHING 163 163 164 165 ;; 166 ; Issues a software interrupt to the specified interrupt vector. 167 ; 168 ; @param uActiveVector x86:[esp+4] msc:rcx gcc:rdi The vector number. 169 ; 170 ;DECLASM(void) trpmR0DispatchHostInterruptSimple(RTUINT uActiveVector); 171 BEGINPROC trpmR0DispatchHostInterruptSimple 172 %ifdef __X86__ 173 mov eax, [esp + 4] 174 jmp dword [.jmp_table + eax * 4] 175 %else 176 lea r9, [.jmp_table wrt rip] 177 %ifdef ASM_CALL64_MSC 178 jmp qword [r9 + rcx * 8] 179 %else 180 jmp qword [r9 + rdi * 8] 181 %endif 182 %endif 183 184 .jmp_table: 185 %assign i 0 186 %rep 256 187 RTCCPTR_DEF .int_ %+ i 188 %assign i i+1 189 %endrep 190 191 %assign i 0 192 %rep 256 193 ALIGNCODE(4) 194 .int_ %+ i: 195 int i 196 ret 197 %assign i i+1 198 %endrep 199 200 ENDPROC trpmR0DispatchHostInterruptSimple 201 -
trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
r955 r1283 56 56 GLOBALNAME Start 57 57 58 %ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL 58 59 BITS 64 59 60 … … 68 69 COM64_S_CHAR '^' 69 70 %endif 70 71 %ifdef STRICT_IF 71 ; 72 ; The ordinary version of the code. 73 ; 74 75 %ifdef STRICT_IF 72 76 pushf 73 77 pop rax … … 77 81 ret 78 82 .if_clear_in: 79 %endif83 %endif 80 84 81 85 ; 82 86 ; make r9 = pVM and rdx = pCpum. 83 87 ; rax, rcx and r8 are scratch here after. 84 %ifdef __WIN64__88 %ifdef __WIN64__ 85 89 mov r9, rcx 86 %else90 %else 87 91 mov r9, rdi 88 %endif92 %endif 89 93 lea rdx, [r9 + VM.cpum] 90 94 91 %ifdef VBOX_WITH_STATISTICS95 %ifdef VBOX_WITH_STATISTICS 92 96 ; 93 97 ; Switcher stats. … … 95 99 lea r8, [r9 + VM.StatSwitcherToGC] 96 100 STAM64_PROFILE_ADV_START r8 97 %endif101 %endif 98 102 99 103 ; … … 104 108 call NAME(vmmR0HostToGuestAsm) 105 109 106 %ifdef VBOX_WITH_STATISTICS110 %ifdef VBOX_WITH_STATISTICS 107 111 ; 108 112 ; Switcher stats. … … 110 114 lea r8, [r9 + VM.StatSwitcherToGC] 111 115 STAM64_PROFILE_ADV_STOP r8 112 %endif116 %endif 113 117 114 118 ret 115 119 ENDPROC vmmR0HostToGuest 120 121 122 %else ; VBOX_WITH_HYBIRD_32BIT_KERNEL 123 124 125 BITS 32 126 127 ;; 128 ; The C interface. 129 ; 130 BEGINPROC vmmR0HostToGuest 131 %ifdef DEBUG_STUFF 132 COM32_S_NEWLINE 133 COM32_S_CHAR '^' 134 %endif 135 136 %ifdef VBOX_WITH_STATISTICS 137 ; 138 ; Switcher stats. 139 ; 140 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToGC 141 mov edx, 0ffffffffh 142 STAM_PROFILE_ADV_START edx 143 %endif 144 145 ; Thunk to/from 64 bit when invoking the worker routine. 146 ; 147 FIXUP FIX_HC_VM_OFF, 1, VM.cpum 148 mov edx, 0ffffffffh 149 150 push 0 151 push cs 152 push 0 153 FIXUP FIX_HC_32BIT, 1, .vmmR0HostToGuestReturn - NAME(Start) 154 push 0ffffffffh 155 156 FIXUP FIX_HC_64BIT_CS, 1 157 push 0ffffh 158 FIXUP FIX_HC_32BIT, 1, NAME(vmmR0HostToGuestAsm) - NAME(Start) 159 push NAME(vmmR0HostToGuestAsm) 160 retf 161 .vmmR0HostToGuestReturn: 162 163 ; 164 ; This selector reloading is probably not necessary, but we do it anyway to be quite sure 165 ; the CPU has the right idea about the selectors. 166 ; 167 mov edx, ds 168 mov ds, edx 169 mov ecx, es 170 mov es, ecx 171 mov edx, ss 172 mov ss, edx 173 174 %ifdef VBOX_WITH_STATISTICS 175 ; 176 ; Switcher stats. 177 ; 178 FIXUP FIX_HC_VM_OFF, 1, VM.StatSwitcherToHC 179 mov edx, 0ffffffffh 180 STAM_PROFILE_ADV_STOP edx 181 %endif 182 183 ret 184 ENDPROC vmmR0HostToGuest 185 186 BITS 64 187 %endif ;!VBOX_WITH_HYBIRD_32BIT_KERNEL 116 188 117 189 -
trunk/src/VBox/VMM/VMMSwitcher/VMMSwitcher.h
r23 r1283 56 56 #define FIX_NO_SYSENTER_JMP 25 57 57 #define FIX_NO_SYSCALL_JMP 26 58 #define FIX_HC_64BIT 27 59 #define FIX_HC_64BIT_CPUM 28 60 #define FIX_ID_32BIT 29 61 #define FIX_ID_64BIT 30 62 #define FIX_ID_FAR32_TO_64BIT_MODE 31 63 #define FIX_GC_APIC_BASE_32BIT 32 58 #define FIX_HC_32BIT 27 59 #define FIX_HC_64BIT 28 60 #define FIX_HC_64BIT_CPUM 29 61 #define FIX_HC_64BIT_CS 30 62 #define FIX_ID_32BIT 31 63 #define FIX_ID_64BIT 32 64 #define FIX_ID_FAR32_TO_64BIT_MODE 33 65 #define FIX_GC_APIC_BASE_32BIT 34 64 66 #define FIX_THE_END 255 65 67 /** @} */ -
trunk/src/VBox/VMM/VMMSwitcher/VMMSwitcher.mac
r19 r1283 97 97 %define FIX_NO_SYSENTER_JMP 25 98 98 %define FIX_NO_SYSCALL_JMP 26 99 %define FIX_HC_64BIT 27 100 %define FIX_HC_64BIT_CPUM 28 101 %define FIX_ID_32BIT 29 102 %define FIX_ID_64BIT 30 103 %define FIX_ID_FAR32_TO_64BIT_MODE 31 104 %define FIX_GC_APIC_BASE_32BIT 32 99 %define FIX_HC_32BIT 27 100 %define FIX_HC_64BIT 28 101 %define FIX_HC_64BIT_CPUM 29 102 %define FIX_HC_64BIT_CS 30 103 %define FIX_ID_32BIT 31 104 %define FIX_ID_64BIT 32 105 %define FIX_ID_FAR32_TO_64BIT_MODE 33 106 %define FIX_GC_APIC_BASE_32BIT 34 105 107 %define FIX_THE_END 255 106 108 ;/** @} */ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r520 r1283 182 182 CHECK_CPUMCTXCORE(ss); 183 183 184 #if HC_ARCH_BITS == 32 185 /* CPUMHOSTCTX - lss pair */ 186 if (RT_OFFSETOF(CPUMHOSTCTX, esp) + 4 != RT_OFFSETOF(CPUMHOSTCTX, ss)) 187 { 188 printf("error: CPUMHOSTCTX lss has been split up!\n"); 189 rc++; 190 } 191 #endif 192 184 193 /* pdm */ 185 194 CHECK_MEMBER_ALIGNMENT(PDMDEVINS, achInstanceData, 16);
Note:
See TracChangeset
for help on using the changeset viewer.