- Timestamp:
- Jun 5, 2008 11:41:38 AM (17 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/cpum.h
r9354 r9411 117 117 uint64_t rcx; 118 118 }; 119 /* Note: we rely on the exact layout, because we use lss esp, [] in the switcher */ 120 uint32_t esp; 121 RTSEL ss; 122 RTSEL ssPadding; 123 /* Note: no overlap with esp here. */ 124 uint64_t rsp; 125 126 RTSEL gs; 127 RTSEL gsPadding; 128 RTSEL fs; 129 RTSEL fsPadding; 130 RTSEL es; 131 RTSEL esPadding; 132 RTSEL ds; 133 RTSEL dsPadding; 134 RTSEL cs; 135 RTSEL csPadding[3]; /* 3 words to force 8 byte alignment for the remainder */ 119 union 120 { 121 uint32_t esp; 122 uint64_t rsp; 123 }; 124 /* Note: lss esp, [] in the switcher needs some space, so we reserve it here instead of relying on the exact esp & ss layout as before. */ 125 uint32_t lss_esp; 126 RTSEL ss; 127 RTSEL ssPadding; 128 129 RTSEL gs; 130 RTSEL gsPadding; 131 RTSEL fs; 132 RTSEL fsPadding; 133 RTSEL es; 134 RTSEL esPadding; 135 RTSEL ds; 136 RTSEL dsPadding; 137 RTSEL cs; 138 RTSEL csPadding[3]; /* 3 words to force 8 byte alignment for the remainder */ 136 139 137 140 union … … 217 220 uint64_t rcx; 218 221 }; 219 /* Note: we rely on the exact layout, because we use lss esp, [] in the switcher */ 220 uint32_t esp; 221 RTSEL ss; 222 RTSEL ssPadding; 223 /* Note: no overlap with esp here. */ 224 uint64_t rsp; 225 226 RTSEL gs; 227 RTSEL gsPadding; 228 RTSEL fs; 229 RTSEL fsPadding; 230 RTSEL es; 231 RTSEL esPadding; 232 RTSEL ds; 233 RTSEL dsPadding; 234 RTSEL cs; 235 RTSEL csPadding[3]; /* 3 words to force 8 byte alignment for the remainder */ 222 union 223 { 224 uint32_t esp; 225 uint64_t rsp; 226 }; 227 /* Note: lss esp, [] in the switcher needs some space, so we reserve it here instead of relying on the exact esp & ss layout as before (prevented us from using a union with rsp). */ 228 uint32_t lss_esp; 229 RTSEL ss; 230 RTSEL ssPadding; 231 232 RTSEL gs; 233 RTSEL gsPadding; 234 RTSEL fs; 235 RTSEL fsPadding; 236 RTSEL es; 237 RTSEL esPadding; 238 RTSEL ds; 239 RTSEL dsPadding; 240 RTSEL cs; 241 RTSEL csPadding[3]; /* 3 words to force 8 byte alignment for the remainder */ 236 242 237 243 union -
trunk/include/VBox/cpum.mac
r8155 r9411 45 45 .edx resq 1 46 46 .ecx resq 1 47 .esp resd 1 47 .esp resq 1 48 .lss_esp resd 1 48 49 .ss resw 1 49 50 .ssPadding resw 1 50 .rsp resq 151 51 .gs resw 1 52 52 .gsPadding resw 1 … … 108 108 .edx resq 1 109 109 .ecx resq 1 110 .esp resd 1 110 .esp resq 1 111 .lss_esp resd 1 111 112 .ss resw 1 112 113 .ssPadding resw 1 113 .rsp resq 1114 114 .gs resw 1 115 115 .gsPadding resw 1 -
trunk/include/iprt/cdefs.h
r9251 r9411 546 546 # define DECLGCCALLBACKMEMBER(type, name, args) type (RTCALL * name) args 547 547 #else 548 # define DECLGCCALLBACKMEMBER(type, name, args) RT GCPTR32name548 # define DECLGCCALLBACKMEMBER(type, name, args) RTRCPTR name 549 549 #endif 550 550 -
trunk/src/VBox/VMM/CPUMInternal.mac
r9354 r9411 181 181 .Hyper.edx resq 1 182 182 .Hyper.ecx resq 1 183 .Hyper.esp resd 1 183 .Hyper.esp resq 1 184 .Hyper.lss_esp resd 1 184 185 .Hyper.ss resw 1 185 186 .Hyper.ssPadding resw 1 186 .Hyper.rsp resq 1187 187 .Hyper.gs resw 1 188 188 .Hyper.gsPadding resw 1 … … 297 297 .Guest.edx resq 1 298 298 .Guest.ecx resq 1 299 .Guest.esp resd 1 299 .Guest.esp resq 1 300 .Guest.lss_esp resd 1 300 301 .Guest.ss resw 1 301 302 .Guest.ssPadding resw 1 302 .Guest.rsp resq 1303 303 .Guest.gs resw 1 304 304 .Guest.gsPadding resw 1 -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r9409 r9411 738 738 /* Setup the register and mask according to the current execution mode. */ 739 739 if (pCtx->msrEFER & MSR_K6_EFER_LMA) 740 pVM->hwaccm.s.u64RegisterMask = 0xFFFFFFFFFFFFFFFFULL;740 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF); 741 741 else 742 pVM->hwaccm.s.u64RegisterMask = 0xFFFFFFFFULL;742 pVM->hwaccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF); 743 743 744 744 rc = HWACCMR0Globals.pfnEnterSession(pVM, &HWACCMR0Globals.aCpuInfo[idCpu]); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r9407 r9411 686 686 687 687 /* EIP, ESP and EFLAGS */ 688 pVMCB->guest.u64RIP = pCtx-> eip;689 pVMCB->guest.u64RSP = pCtx-> esp;688 pVMCB->guest.u64RIP = pCtx->rip; 689 pVMCB->guest.u64RSP = pCtx->rsp; 690 690 pVMCB->guest.u64RFlags = pCtx->eflags.u32; 691 691 … … 694 694 695 695 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */ 696 pVMCB->guest.u64RAX = pCtx-> eax;696 pVMCB->guest.u64RAX = pCtx->rax; 697 697 698 698 /* vmrun will fail without MSR_K6_EFER_SVME. */ … … 1046 1046 1047 1047 /* Let's first sync back eip, esp, and eflags. */ 1048 pCtx-> eip = pVMCB->guest.u64RIP;1049 pCtx-> esp = pVMCB->guest.u64RSP;1048 pCtx->rip = pVMCB->guest.u64RIP; 1049 pCtx->rsp = pVMCB->guest.u64RSP; 1050 1050 pCtx->eflags.u32 = pVMCB->guest.u64RFlags; 1051 1051 /* eax is saved/restore across the vmrun instruction */ 1052 pCtx-> eax = pVMCB->guest.u64RAX;1052 pCtx->rax = pVMCB->guest.u64RAX; 1053 1053 1054 1054 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r9407 r9411 899 899 900 900 /* EIP, ESP and EFLAGS */ 901 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RIP, pCtx-> eip);902 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_RSP, pCtx-> esp);901 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RIP, pCtx->rip); 902 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_RSP, pCtx->rsp); 903 903 AssertRC(rc); 904 904 … … 951 951 val = (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF); 952 952 953 /* 64 bits guest mode? */954 if (pCtx->msrEFER & MSR_K6_EFER_LMA)955 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;956 957 953 /* Mask away the bits that the CPU doesn't support */ 958 954 /** @todo make sure they don't conflict with the above requirements. */ … … 961 957 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val); 962 958 AssertRC(rc); 959 960 /* 64 bits guest mode? */ 961 if (pCtx->msrEFER & MSR_K6_EFER_LMA) 962 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; 963 963 964 964 /* Done. */ … … 1338 1338 AssertRC(rc); 1339 1339 1340 /* Let's first sync back eip, esp, and eflags. */ 1341 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 1342 AssertRC(rc); 1343 pCtx->rip = val; 1344 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val); 1345 AssertRC(rc); 1346 pCtx->rsp = val; 1347 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 1348 AssertRC(rc); 1349 pCtx->eflags.u32 = val; 1350 1340 1351 /* Take care of instruction fusing (sti, mov ss) */ 1341 1352 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val); … … 1349 1360 else 1350 1361 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS); 1351 1352 /* Let's first sync back eip, esp, and eflags. */1353 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);1354 AssertRC(rc);1355 pCtx->eip = val;1356 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val);1357 AssertRC(rc);1358 pCtx->esp = val;1359 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);1360 AssertRC(rc);1361 pCtx->eflags.u32 = val;1362 1362 1363 1363 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ -
trunk/src/VBox/VMM/VMMSwitcher/AMD64ToPAE.asm
r8155 r9411 453 453 lidt [edx + CPUM.Hyper.idtr] 454 454 455 ; Setup stack 455 ; Setup stack; use the lss_esp, ss pair for lss 456 456 DEBUG_CHAR('3') 457 lss esp, [edx + CPUM.Hyper.esp] 457 mov eax, [edx + CPUM.Hyper.esp] 458 mov [edx + CPUM.Hyper.lss_esp], eax 459 lss esp, [edx + CPUM.Hyper.lss_esp] 458 460 459 461 ; Restore TSS selector; must mark it as not busy before using ltr (!) -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r8155 r9411 289 289 mov fs, eax 290 290 291 ; Setup stack 291 ; Setup stack; use the lss_esp, ss pair for lss 292 292 DEBUG_CHAR('3') 293 mov eax, [edx + CPUM.Hyper.esp] 294 mov [edx + CPUM.Hyper.lss_esp], eax 293 295 lss esp, [edx + CPUM.Hyper.esp] 294 296
Note:
See TracChangeset
for help on using the changeset viewer.