Changeset 87408 in vbox for trunk/src/VBox
- Timestamp:
- Jan 24, 2021 7:57:39 PM (4 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r87401 r87408 314 314 315 315 ;; 316 ; Used on platforms with poor inline assembly support to retrieve all the 317 ; info from the CPU and put it in the @a pRestoreHost structure. 318 ; 319 ; @returns VBox status code 320 ; @param pRestoreHost msc: rcx gcc: rdi Pointer to the RestoreHost struct. 321 ; @param fHaveFsGsBase msc: dl gcc: sil Whether we can use rdfsbase or not. 322 ; 323 ALIGNCODE(64) 324 BEGINPROC hmR0VmxExportHostSegmentRegsAsmHlp 325 %ifdef ASM_CALL64_MSC 326 %define pRestoreHost rcx 327 %elifdef ASM_CALL64_MSC 328 %define pRestoreHost rdi 329 %else 330 %error Unknown calling convension. 331 %endif 332 SEH64_END_PROLOGUE 333 334 ; Start with the FS and GS base so we can trash DL/SIL. 335 %ifdef ASM_CALL64_MSC 336 or dl, dl 337 %else 338 or sil, sil 339 %endif 340 jz .use_rdmsr_for_fs_and_gs_base 341 rdfsbase rax 342 mov [pRestoreHost + VMXRESTOREHOST.uHostFSBase], rax 343 rdgsbase rax 344 mov [pRestoreHost + VMXRESTOREHOST.uHostGSBase], rax 345 .done_fs_and_gs_base: 346 347 ; TR, GDTR and IDTR 348 str [pRestoreHost + VMXRESTOREHOST.uHostSelTR] 349 sgdt [pRestoreHost + VMXRESTOREHOST.HostGdtr] 350 sidt [pRestoreHost + VMXRESTOREHOST.HostIdtr] 351 352 ; Segment registers. 353 xor eax, eax 354 mov eax, cs 355 mov [pRestoreHost + VMXRESTOREHOST.uHostSelCS], ax 356 357 mov eax, ss 358 mov [pRestoreHost + VMXRESTOREHOST.uHostSelSS], ax 359 360 mov eax, gs 361 mov [pRestoreHost + VMXRESTOREHOST.uHostSelGS], ax 362 363 mov eax, fs 364 mov [pRestoreHost + VMXRESTOREHOST.uHostSelFS], ax 365 366 mov eax, es 367 mov [pRestoreHost + VMXRESTOREHOST.uHostSelES], ax 368 369 mov eax, ds 370 mov [pRestoreHost + VMXRESTOREHOST.uHostSelDS], ax 371 372 ret 373 374 ALIGNCODE(16) 375 .use_rdmsr_for_fs_and_gs_base: 376 %ifdef ASM_CALL64_MSC 377 mov r8, pRestoreHost 378 %endif 379 380 mov ecx, MSR_K8_FS_BASE 381 rdmsr 382 shl rdx, 32 383 or rdx, rax 384 mov [r8 + VMXRESTOREHOST.uHostFSBase], rax 385 386 mov ecx, MSR_K8_GS_BASE 387 rdmsr 388 shl rdx, 32 389 or rdx, rax 390 mov [r8 + VMXRESTOREHOST.uHostGSBase], rax 391 392 %ifdef ASM_CALL64_MSC 393 mov pRestoreHost, r8 394 %endif 395 jmp .done_fs_and_gs_base 396 %undef pRestoreHost 397 ENDPROC hmR0VmxExportHostSegmentRegsAsmHlp 398 399 400 ;; 316 401 ; Restores host-state fields. 317 402 ; … … 333 418 SEH64_END_PROLOGUE 334 419 335 .restore_gdtr 420 .restore_gdtr: 336 421 test edi, VMX_RESTORE_HOST_GDTR 337 422 jz .restore_idtr -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87405 r87408 4524 4524 static int hmR0VmxExportHostSegmentRegs(PVMCPUCC pVCpu, uint64_t uHostCr4) 4525 4525 { 4526 /**4527 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry4528 * requirements. See hmR0VmxExportHostSegmentRegs().4529 */4530 #define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_selValue) \4531 if ((a_selValue) & (X86_SEL_RPL | X86_SEL_LDT)) \4532 { \4533 uint32_t fAttr; \4534 if ( !((a_selValue) & X86_SEL_LDT) /* likely */ \4535 || (((fAttr = ASMGetSegAttr(a_selValue)) & X86_DESC_P) && fAttr != UINT32_MAX)) \4536 { \4537 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \4538 pVCpu->hm.s.vmx.RestoreHost.uHostSel##a_Seg = (a_selValue); \4539 } \4540 (a_selValue) = 0; \4541 }4542 4543 4526 /* 4544 4527 * If we've executed guest code using hardware-assisted VMX, the host-state bits … … 4556 4539 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 4557 4540 } 4558 uint32_t fRestoreHostFlags = 0; 4559 4560 /* 4561 * Host segment registers. 4562 */ 4563 RTSEL uSelES = ASMGetES(); 4564 RTSEL uSelCS = ASMGetCS(); 4565 RTSEL uSelSS = ASMGetSS(); 4566 RTSEL uSelDS = ASMGetDS(); 4567 RTSEL uSelFS = ASMGetFS(); 4568 RTSEL uSelGS = ASMGetGS(); 4569 RTSEL uSelTR = ASMGetTR(); 4541 4542 /* 4543 * Get all the host info. 4544 * ASSUME it is safe to use rdfsbase and friends if the CR4.FSGSBASE bit is set 4545 * without also checking the cpuid bit. 4546 */ 4547 uint32_t fRestoreHostFlags; 4548 #if RT_INLINE_ASM_EXTERNAL 4549 if (uHostCr4 & X86_CR4_FSGSBASE) 4550 { 4551 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hm.s.vmx.RestoreHost, true /*fHaveFsGsBase*/); 4552 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE; 4553 } 4554 else 4555 { 4556 hmR0VmxExportHostSegmentRegsAsmHlp(&pVCpu->hm.s.vmx.RestoreHost, false /*fHaveFsGsBase*/); 4557 fRestoreHostFlags = 0; 4558 } 4559 RTSEL uSelES = pVCpu->hm.s.vmx.RestoreHost.uHostSelES; 4560 RTSEL uSelDS = pVCpu->hm.s.vmx.RestoreHost.uHostSelDS; 4561 RTSEL uSelFS = pVCpu->hm.s.vmx.RestoreHost.uHostSelFS; 4562 RTSEL uSelGS = pVCpu->hm.s.vmx.RestoreHost.uHostSelGS; 4563 #else 4564 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = ASMGetTR(); 4565 pVCpu->hm.s.vmx.RestoreHost.uHostSelSS = ASMGetSS(); 4566 pVCpu->hm.s.vmx.RestoreHost.uHostSelCS = ASMGetCS(); 4567 ASMGetGDTR((PRTGDTR)&pVCpu->hm.s.vmx.RestoreHost.HostGdtr); 4568 ASMGetIDTR((PRTIDTR)&pVCpu->hm.s.vmx.RestoreHost.HostIdtr); 4569 if (uHostCr4 & X86_CR4_FSGSBASE) 4570 { 4571 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = ASMGetFSBase(); 4572 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = ASMGetGSBase(); 4573 fRestoreHostFlags = VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE; 4574 } 4575 else 4576 { 4577 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = ASMRdMsr(MSR_K8_FS_BASE); 4578 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = ASMRdMsr(MSR_K8_GS_BASE); 4579 fRestoreHostFlags = 0; 4580 } 4581 RTSEL uSelES, uSelDS, uSelFS, uSelGS; 4582 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS = ASMGetDS(); 4583 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES = ASMGetES(); 4584 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS = ASMGetFS(); 4585 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS = ASMGetGS(); 4586 #endif 4570 4587 4571 4588 /* … … 4575 4592 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 4576 4593 */ 4577 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS); 4578 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES); 4579 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS); 4580 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS); 4594 RTSEL const uSelAll = uSelFS | uSelGS | uSelES | uSelDS; 4595 if (uSelAll & (X86_SEL_RPL | X86_SEL_LDT)) 4596 { 4597 if (!(uSelAll & X86_SEL_LDT)) 4598 { 4599 #define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \ 4600 do { \ 4601 (a_uVmcsVar) = pVCpu->hm.s.vmx.RestoreHost.uHostSel##a_Seg; \ 4602 if ((a_uVmcsVar) & X86_SEL_RPL) \ 4603 { \ 4604 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \ 4605 (a_uVmcsVar) = 0; \ 4606 } \ 4607 } while (0) 4608 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS); 4609 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES); 4610 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS); 4611 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS); 4612 #undef VMXLOCAL_ADJUST_HOST_SEG 4613 } 4614 else 4615 { 4616 #define VMXLOCAL_ADJUST_HOST_SEG(a_Seg, a_uVmcsVar) \ 4617 do { \ 4618 (a_uVmcsVar) = pVCpu->hm.s.vmx.RestoreHost.uHostSel##a_Seg; \ 4619 if ((a_uVmcsVar) & (X86_SEL_RPL | X86_SEL_LDT)) \ 4620 { \ 4621 if (!((a_uVmcsVar) & X86_SEL_LDT)) \ 4622 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \ 4623 else \ 4624 { \ 4625 uint32_t const fAttr = ASMGetSegAttr(a_uVmcsVar); \ 4626 if ((fAttr & X86_DESC_P) && fAttr != UINT32_MAX) \ 4627 fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_##a_Seg; \ 4628 } \ 4629 (a_uVmcsVar) = 0; \ 4630 } \ 4631 } while (0) 4632 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS); 4633 VMXLOCAL_ADJUST_HOST_SEG(ES, uSelES); 4634 VMXLOCAL_ADJUST_HOST_SEG(FS, uSelFS); 4635 VMXLOCAL_ADJUST_HOST_SEG(GS, uSelGS); 4636 #undef VMXLOCAL_ADJUST_HOST_SEG 4637 } 4638 } 4581 4639 4582 4640 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */ 4583 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT)); 4584 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT)); 4641 Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelTR & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelTR & X86_SEL_LDT)); Assert(pVCpu->hm.s.vmx.RestoreHost.uHostSelTR); 4642 Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelCS & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelCS & X86_SEL_LDT)); Assert(pVCpu->hm.s.vmx.RestoreHost.uHostSelCS); 4643 Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelSS & X86_SEL_RPL)); Assert(!(pVCpu->hm.s.vmx.RestoreHost.uHostSelSS & X86_SEL_LDT)); 4585 4644 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT)); 4586 4645 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT)); 4587 4646 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT)); 4588 4647 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT)); 4589 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));4590 Assert(uSelCS);4591 Assert(uSelTR);4592 4593 /* Write these host selector fields into the host-state area in the VMCS. */4594 int rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, uSelCS); AssertRC(rc);4595 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, uSelSS); AssertRC(rc);4596 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc);4597 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc);4598 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc);4599 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc);4600 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, uSelTR); AssertRC(rc);4601 4602 /*4603 * Host GDTR and IDTR.4604 */4605 RTGDTRALIGNED Gdtr;4606 RTIDTRALIGNED Idtr;4607 ASMGetGDTR(&Gdtr.s.Gdtr);4608 ASMGetIDTR(&Idtr.s.Idtr);4609 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, Gdtr.s.Gdtr.pGdt); AssertRC(rc);4610 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, Idtr.s.Idtr.pIdt); AssertRC(rc);4611 4648 4612 4649 /* … … 4614 4651 * them to the maximum limit (0xffff) on every VM-exit. 4615 4652 */ 4616 if ( Gdtr.s.Gdtr.cbGdt!= 0xffff)4653 if (pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb != 0xffff) 4617 4654 fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR; 4618 4655 … … 4627 4664 */ 4628 4665 #if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) 4629 if ( Idtr.s.Idtr.cbIdt< 0x0fff)4666 if (pVCpu->hm.s.vmx.RestoreHost.HostIdtr.cb < 0x0fff) 4630 4667 #else 4631 if ( Idtr.s.Idtr.cbIdt!= 0xffff)4668 if (pVCpu->hm.s.vmx.RestoreHost.HostIdtr.cb != 0xffff) 4632 4669 #endif 4633 {4634 4670 fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR; 4635 pVCpu->hm.s.vmx.RestoreHost.HostIdtr.cb = Idtr.s.Idtr.cbIdt;4636 pVCpu->hm.s.vmx.RestoreHost.HostIdtr.uAddr = Idtr.s.Idtr.pIdt;4637 }4638 4671 4639 4672 /* … … 4642 4675 * RPL should be too in most cases. 4643 4676 */ 4644 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.s.Gdtr.cbGdt, 4645 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.s.Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE); 4646 4647 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.s.Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); 4677 RTSEL const uSelTR = pVCpu->hm.s.vmx.RestoreHost.uHostSelTR; 4678 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb, 4679 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb), 4680 VERR_VMX_INVALID_HOST_STATE); 4681 4682 PCX86DESCHC pDesc = (PCX86DESCHC)(pVCpu->hm.s.vmx.RestoreHost.HostGdtr.uAddr + (uSelTR & X86_SEL_MASK)); 4648 4683 uintptr_t const uTRBase = X86DESC64_BASE(pDesc); 4649 4684 … … 4672 4707 /* The GDT is read-only but the writable GDT is available. */ 4673 4708 fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_NEED_WRITABLE; 4674 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = Gdtr.s.Gdtr.cbGdt;4675 rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr);4709 pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.cb = pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb; 4710 int rc = SUPR0GetCurrentGdtRw(&pVCpu->hm.s.vmx.RestoreHost.HostGdtrRw.uAddr); 4676 4711 AssertRCReturn(rc, rc); 4677 4712 } 4678 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR; 4679 } 4680 4681 /* 4682 * Store the GDTR as we need it when restoring the GDT and while restoring the TR. 4683 */ 4684 if (fRestoreHostFlags & (VMX_RESTORE_HOST_GDTR | VMX_RESTORE_HOST_SEL_TR)) 4685 { 4686 pVCpu->hm.s.vmx.RestoreHost.HostGdtr.cb = Gdtr.s.Gdtr.cbGdt; 4687 pVCpu->hm.s.vmx.RestoreHost.HostGdtr.uAddr = Gdtr.s.Gdtr.pGdt; 4688 } 4689 4690 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); 4691 AssertRC(rc); 4692 4693 /* 4694 * Host FS base and GS base. 4695 * ASSUME it is safe to use rdfsbase and friends if the CR4.FSGSBASE bit is set 4696 * without also checking the cpuid bit. 4697 */ 4698 uint64_t GCPtrFSBase, GCPtrGSBase; 4699 if (uHostCr4 & X86_CR4_FSGSBASE) 4700 { 4701 fRestoreHostFlags |= VMX_RESTORE_HOST_CAN_USE_WRFSBASE_AND_WRGSBASE; 4702 GCPtrFSBase = ASMGetFSBase(); 4703 GCPtrGSBase = ASMGetGSBase(); 4704 } 4705 else 4706 { 4707 GCPtrFSBase = ASMRdMsr(MSR_K8_FS_BASE); 4708 GCPtrGSBase = ASMRdMsr(MSR_K8_GS_BASE); 4709 } 4710 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, GCPtrFSBase); AssertRC(rc); 4711 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, GCPtrGSBase); AssertRC(rc); 4712 4713 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ 4714 if (fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS) 4715 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = GCPtrFSBase; 4716 if (fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS) 4717 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = GCPtrGSBase; 4713 } 4718 4714 4719 4715 pVCpu->hm.s.vmx.fRestoreHostFlags = fRestoreHostFlags; 4720 4716 4717 /* 4718 * Do all the VMCS updates in one block to assist nested virtualization. 4719 */ 4720 int rc; 4721 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_CS_SEL, pVCpu->hm.s.vmx.RestoreHost.uHostSelCS); AssertRC(rc); 4722 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_SS_SEL, pVCpu->hm.s.vmx.RestoreHost.uHostSelSS); AssertRC(rc); 4723 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_DS_SEL, uSelDS); AssertRC(rc); 4724 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_ES_SEL, uSelES); AssertRC(rc); 4725 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_FS_SEL, uSelFS); AssertRC(rc); 4726 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_GS_SEL, uSelGS); AssertRC(rc); 4727 rc = VMXWriteVmcs16(VMX_VMCS16_HOST_TR_SEL, pVCpu->hm.s.vmx.RestoreHost.uHostSelTR); AssertRC(rc); 4728 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GDTR_BASE, pVCpu->hm.s.vmx.RestoreHost.HostGdtr.uAddr); AssertRC(rc); 4729 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_IDTR_BASE, pVCpu->hm.s.vmx.RestoreHost.HostIdtr.uAddr); AssertRC(rc); 4730 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_TR_BASE, uTRBase); AssertRC(rc); 4731 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_FS_BASE, pVCpu->hm.s.vmx.RestoreHost.uHostFSBase); AssertRC(rc); 4732 rc = VMXWriteVmcsNw(VMX_VMCS_HOST_GS_BASE, pVCpu->hm.s.vmx.RestoreHost.uHostGSBase); AssertRC(rc); 4733 4721 4734 return VINF_SUCCESS; 4722 #undef VMXLOCAL_ADJUST_HOST_SEG4723 4735 } 4724 4736 -
trunk/src/VBox/VMM/include/HMInternal.h
r87401 r87408 914 914 RTSEL uHostSelGS; /**< 0x10 */ 915 915 RTSEL uHostSelTR; /**< 0x12 */ 916 uint8_t abPadding0[2]; /**< 0x14*/916 RTSEL uHostSelSS; /**< 0x14 - not restored, just for fetching */ 917 917 X86XDTR64 HostGdtrRw; /**< 0x16 - should be aligned by its 64-bit member. */ 918 uint8_t abPadding1[6]; /**< 0x20 */ 918 RTSEL uHostSelCS; /**< 0x20 - not restored, just for fetching */ 919 uint8_t abPadding1[4]; /**< 0x22 */ 919 920 X86XDTR64 HostIdtr; /**< 0x26 - should be aligned by its 64-bit member. */ 920 921 uint64_t uHostFSBase; /**< 0x30 */ … … 1343 1344 1344 1345 /** 1346 * Used on platforms with poor inline assembly support to retrieve all the 1347 * info from the CPU and put it in the @a pRestoreHost structure. 1348 */ 1349 DECLASM(void) hmR0VmxExportHostSegmentRegsAsmHlp(PVMXRESTOREHOST pRestoreHost, bool fHaveFsGsBase); 1350 1351 /** 1345 1352 * Restores some host-state fields that need not be done on every VM-exit. 1346 1353 * -
trunk/src/VBox/VMM/include/HMInternal.mac
r87401 r87408 102 102 .uHostSelGS resw 1 103 103 .uHostSelTR resw 1 104 . abPadding0 resb 2104 .uHostSelSS resw 1 105 105 .HostGdtrRw resb 10 106 .abPadding1 resb 6 106 .uHostSelCS resw 1 107 .abPadding1 resb 4 107 108 .HostIdtr resb 10 108 109 alignb 8
Note:
See TracChangeset
for help on using the changeset viewer.