Changeset 45637 in vbox for trunk/src/VBox
- Timestamp:
- Apr 19, 2013 10:21:54 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45623 r45637 36 36 #endif 37 37 #ifdef DEBUG_ramshankar 38 #define VBOX_ALWAYS_SAVE_FULL_VTX_STATE39 #define VBOX_ALWAYS_SYNC_FULL_VTX_STATE40 #define VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS38 #define HMVMX_SAVE_FULL_GUEST_STATE 39 #define HMVMX_SYNC_FULL_GUEST_STATE 40 #define HMVMX_TRAP_ALL_EXCEPTIONS 41 41 #endif 42 42 … … 46 46 *******************************************************************************/ 47 47 #if defined(RT_ARCH_AMD64) 48 # define VMX_IS_64BIT_HOST_MODE() (true)48 # define HMVMX_IS_64BIT_HOST_MODE() (true) 49 49 #elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 50 # define VMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)50 # define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0) 51 51 #else 52 # define VMX_IS_64BIT_HOST_MODE() (false)52 # define HMVMX_IS_64BIT_HOST_MODE() (false) 53 53 #endif 54 54 … … 56 56 #define HMVMX_USE_FUNCTION_TABLE 57 57 58 59 #define VMX_SEL_UNUSABLE RT_BIT(16) 60 #define VMX_FLUSH_TAGGED_TLB_EPT_VPID 0 61 #define VMX_FLUSH_TAGGED_TLB_EPT 1 62 #define VMX_FLUSH_TAGGED_TLB_VPID 2 63 #define VMX_FLUSH_TAGGED_TLB_NONE 3 58 /** This bit indicates the segment selector is unusable in VT-x. */ 59 #define HMVMX_SEL_UNUSABLE RT_BIT(16) 60 61 /** Determine which tagged-TLB flush handler to use. */ 62 #define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0 63 #define HMVMX_FLUSH_TAGGED_TLB_EPT 1 64 #define HMVMX_FLUSH_TAGGED_TLB_VPID 2 65 #define HMVMX_FLUSH_TAGGED_TLB_NONE 3 64 66 65 67 /** 66 68 * Updated-guest-state flags. 67 69 */ 68 #define VMX_UPDATED_GUEST_FPU RT_BIT(0) 69 #define VMX_UPDATED_GUEST_RIP RT_BIT(1) 70 #define VMX_UPDATED_GUEST_RSP RT_BIT(2) 71 #define VMX_UPDATED_GUEST_RFLAGS RT_BIT(3) 72 #define VMX_UPDATED_GUEST_CR0 RT_BIT(4) 73 #define VMX_UPDATED_GUEST_CR3 RT_BIT(5) 74 #define VMX_UPDATED_GUEST_CR4 RT_BIT(6) 75 #define VMX_UPDATED_GUEST_GDTR RT_BIT(7) 76 #define VMX_UPDATED_GUEST_IDTR RT_BIT(8) 77 #define VMX_UPDATED_GUEST_LDTR RT_BIT(9) 78 #define VMX_UPDATED_GUEST_TR RT_BIT(10) 79 #define VMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(11) 80 #define VMX_UPDATED_GUEST_DEBUG RT_BIT(12) 81 #define VMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(13) 82 #define VMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(14) 83 #define VMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(15) 84 #define VMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(16) 85 #define VMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(17) 86 #define VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(18) 87 #define VMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(19) 88 #define VMX_UPDATED_GUEST_APIC_STATE RT_BIT(20) 89 #define VMX_UPDATED_GUEST_ALL ( VMX_UPDATED_GUEST_FPU \ 90 | VMX_UPDATED_GUEST_RIP \ 91 | VMX_UPDATED_GUEST_RSP \ 92 | VMX_UPDATED_GUEST_RFLAGS \ 93 | VMX_UPDATED_GUEST_CR0 \ 94 | VMX_UPDATED_GUEST_CR3 \ 95 | VMX_UPDATED_GUEST_CR4 \ 96 | VMX_UPDATED_GUEST_GDTR \ 97 | VMX_UPDATED_GUEST_IDTR \ 98 | VMX_UPDATED_GUEST_LDTR \ 99 | VMX_UPDATED_GUEST_TR \ 100 | VMX_UPDATED_GUEST_SEGMENT_REGS \ 101 | VMX_UPDATED_GUEST_DEBUG \ 102 | VMX_UPDATED_GUEST_FS_BASE_MSR \ 103 | VMX_UPDATED_GUEST_GS_BASE_MSR \ 104 | VMX_UPDATED_GUEST_SYSENTER_CS_MSR \ 105 | VMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ 106 | VMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 107 | VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 108 | VMX_UPDATED_GUEST_ACTIVITY_STATE \ 109 | VMX_UPDATED_GUEST_APIC_STATE) 110 111 /** 112 * Flags to skip redundant reads of some common VMCS fields. 113 */ 114 #define VMX_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0) 115 #define VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1) 116 #define VMX_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2) 117 #define VMX_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3) 118 #define VMX_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4) 119 #define VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5) 70 #define HMVMX_UPDATED_GUEST_FPU RT_BIT(0) 71 #define HMVMX_UPDATED_GUEST_RIP RT_BIT(1) 72 #define HMVMX_UPDATED_GUEST_RSP RT_BIT(2) 73 #define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(3) 74 #define HMVMX_UPDATED_GUEST_CR0 RT_BIT(4) 75 #define HMVMX_UPDATED_GUEST_CR3 RT_BIT(5) 76 #define HMVMX_UPDATED_GUEST_CR4 RT_BIT(6) 77 #define HMVMX_UPDATED_GUEST_GDTR RT_BIT(7) 78 #define HMVMX_UPDATED_GUEST_IDTR RT_BIT(8) 79 #define HMVMX_UPDATED_GUEST_LDTR RT_BIT(9) 80 #define HMVMX_UPDATED_GUEST_TR RT_BIT(10) 81 #define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(11) 82 #define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(12) 83 #define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(13) 84 #define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(14) 85 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(15) 86 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(16) 87 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(17) 88 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(18) 89 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(19) 90 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(20) 91 #define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_FPU \ 92 | HMVMX_UPDATED_GUEST_RIP \ 93 | HMVMX_UPDATED_GUEST_RSP \ 94 | HMVMX_UPDATED_GUEST_RFLAGS \ 95 | HMVMX_UPDATED_GUEST_CR0 \ 96 | HMVMX_UPDATED_GUEST_CR3 \ 97 | HMVMX_UPDATED_GUEST_CR4 \ 98 | HMVMX_UPDATED_GUEST_GDTR \ 99 | HMVMX_UPDATED_GUEST_IDTR \ 100 | HMVMX_UPDATED_GUEST_LDTR \ 101 | HMVMX_UPDATED_GUEST_TR \ 102 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \ 103 | HMVMX_UPDATED_GUEST_DEBUG \ 104 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \ 105 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \ 106 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \ 107 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ 108 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 109 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 110 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \ 111 | HMVMX_UPDATED_GUEST_APIC_STATE) 112 113 /** 114 * Flags to skip redundant reads of some common VMCS fields that are not part of 115 * the guest-CPU state but are in the transient structure. 116 */ 117 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0) 118 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1) 119 #define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2) 120 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3) 121 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4) 122 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5) 120 123 121 124 /** … … 124 127 * we have Nested Paging support. 125 128 */ 126 #define VMX_REAL_MODE_XCPT_BITMAP (RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \127 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \128 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \129 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \130 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \131 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \132 | RT_BIT(X86_XCPT_XF))129 #define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \ 130 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \ 131 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \ 132 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \ 133 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \ 134 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \ 135 | RT_BIT(X86_XCPT_XF)) 133 136 134 137 /** 135 138 * Exception bitmap mask for all contributory exceptions. 136 139 */ 137 #define VMX_CONTRIBUTORY_XCPT_BITMAP( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \138 | RT_BIT(X86_XCPT_DE))140 #define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \ 141 | RT_BIT(X86_XCPT_DE)) 139 142 140 143 /** Maximum VM-instruction error number. */ 141 #define VMX_INSTR_ERROR_MAX 28144 #define HMVMX_INSTR_ERROR_MAX 28 142 145 143 146 … … 184 187 uint32_t uIdtVectoringErrorCode; 185 188 186 /** Mask of currently read VMCS fields; VMX_TRANSIENT_*. */189 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */ 187 190 uint32_t fVmcsFieldsRead; 188 191 /** Whether TSC-offsetting should be setup before VM-entry. */ … … 371 374 372 375 #ifdef VBOX_STRICT 373 static const char * const g_apszVmxInstrErrors[ VMX_INSTR_ERROR_MAX + 1] =376 static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] = 374 377 { 375 378 /* 0 */ "(Not Used)", … … 493 496 DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 494 497 { 495 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_INFO))498 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO)) 496 499 { 497 500 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo); 498 501 AssertRCReturn(rc, rc); 499 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INTERRUPTION_INFO;502 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO; 500 503 } 501 504 return VINF_SUCCESS; … … 513 516 DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 514 517 { 515 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))518 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE)) 516 519 { 517 520 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode); 518 521 AssertRCReturn(rc, rc); 519 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;522 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE; 520 523 } 521 524 return VINF_SUCCESS; … … 533 536 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 534 537 { 535 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INSTR_LEN))538 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN)) 536 539 { 537 540 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr); 538 541 AssertRCReturn(rc, rc); 539 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_INSTR_LEN;542 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN; 540 543 } 541 544 return VINF_SUCCESS; … … 552 555 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 553 556 { 554 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_QUALIFICATION))557 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION)) 555 558 { 556 559 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); 557 560 AssertRCReturn(rc, rc); 558 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_EXIT_QUALIFICATION;561 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION; 559 562 } 560 563 return VINF_SUCCESS; … … 573 576 DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient) 574 577 { 575 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_IDT_VECTORING_INFO))578 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO)) 576 579 { 577 580 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo); 578 581 AssertRCReturn(rc, rc); 579 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_IDT_VECTORING_INFO;582 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO; 580 583 } 581 584 return VINF_SUCCESS; … … 592 595 DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 593 596 { 594 if (!(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE))597 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE)) 595 598 { 596 599 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode); 597 600 AssertRCReturn(rc, rc); 598 pVmxTransient->fVmcsFieldsRead |= VMX_TRANSIENT_IDT_VECTORING_ERROR_CODE;601 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE; 599 602 } 600 603 return VINF_SUCCESS; … … 1464 1467 switch (pVM->hm.s.vmx.uFlushTaggedTlb) 1465 1468 { 1466 case VMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;1467 case VMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;1468 case VMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;1469 case VMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;1469 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break; 1470 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break; 1471 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break; 1472 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break; 1470 1473 default: 1471 1474 AssertMsgFailed(("Invalid flush-tag function identifier\n")); … … 1555 1558 */ 1556 1559 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid) 1557 pVM->hm.s.vmx.uFlushTaggedTlb = VMX_FLUSH_TAGGED_TLB_EPT_VPID;1560 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID; 1558 1561 else if (pVM->hm.s.fNestedPaging) 1559 pVM->hm.s.vmx.uFlushTaggedTlb = VMX_FLUSH_TAGGED_TLB_EPT;1562 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT; 1560 1563 else if (pVM->hm.s.vmx.fVpid) 1561 pVM->hm.s.vmx.uFlushTaggedTlb = VMX_FLUSH_TAGGED_TLB_VPID;1564 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID; 1562 1565 else 1563 pVM->hm.s.vmx.uFlushTaggedTlb = VMX_FLUSH_TAGGED_TLB_NONE;1566 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE; 1564 1567 return VINF_SUCCESS; 1565 1568 } … … 1873 1876 { 1874 1877 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */ 1875 pVCpu->hm.s.vmx.fUpdatedGuestState = VMX_UPDATED_GUEST_ALL;1878 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL; 1876 1879 return VINF_SUCCESS; 1877 1880 } … … 2044 2047 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2045 2048 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */ 2046 if ( VMX_IS_64BIT_HOST_MODE())2049 if (HMVMX_IS_64BIT_HOST_MODE()) 2047 2050 { 2048 2051 uint64_t uReg = hmR0Get64bitCR3(); … … 2086 2089 */ 2087 2090 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2088 if ( VMX_IS_64BIT_HOST_MODE())2091 if (HMVMX_IS_64BIT_HOST_MODE()) 2089 2092 { 2090 2093 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS; … … 2150 2153 RT_ZERO(Gdtr); 2151 2154 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2152 if ( VMX_IS_64BIT_HOST_MODE())2155 if (HMVMX_IS_64BIT_HOST_MODE()) 2153 2156 { 2154 2157 X86XDTR64 Gtr64; … … 2183 2186 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); 2184 2187 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2185 if ( VMX_IS_64BIT_HOST_MODE())2188 if (HMVMX_IS_64BIT_HOST_MODE()) 2186 2189 { 2187 2190 /* We need the 64-bit TR base for hybrid darwin. */ … … 2208 2211 */ 2209 2212 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2210 if ( VMX_IS_64BIT_HOST_MODE())2213 if (HMVMX_IS_64BIT_HOST_MODE()) 2211 2214 { 2212 2215 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); … … 2256 2259 2257 2260 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2258 if ( VMX_IS_64BIT_HOST_MODE())2261 if (HMVMX_IS_64BIT_HOST_MODE()) 2259 2262 { 2260 2263 pHostMsr->u32IndexMSR = MSR_K6_STAR; … … 2294 2297 rc |= VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 2295 2298 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2296 if ( VMX_IS_64BIT_HOST_MODE())2299 if (HMVMX_IS_64BIT_HOST_MODE()) 2297 2300 { 2298 2301 rc |= VMXWriteVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); … … 2401 2404 /* Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. */ 2402 2405 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2403 if ( VMX_IS_64BIT_HOST_MODE())2406 if (HMVMX_IS_64BIT_HOST_MODE()) 2404 2407 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; 2405 2408 else … … 2523 2526 { 2524 2527 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ 2525 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & ( VMX_UPDATED_GUEST_RIP |VMX_UPDATED_GUEST_RFLAGS))2526 == ( VMX_UPDATED_GUEST_RIP |VMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));2528 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS)) 2529 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState)); 2527 2530 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 2528 2531 { … … 2757 2760 Assert(PDMVmmDevHeapIsEnabled(pVM)); 2758 2761 Assert(pVM->hm.s.vmx.pRealModeTSS); 2759 pVCpu->hm.s.vmx.u32XcptBitmap |= VMX_REAL_MODE_XCPT_BITMAP;2762 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 2760 2763 fInterceptNM = true; 2761 2764 fInterceptMF = true; 2762 2765 } 2763 2766 else 2764 pVCpu->hm.s.vmx.u32XcptBitmap &= ~ VMX_REAL_MODE_XCPT_BITMAP;2767 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 2765 2768 2766 2769 if (fInterceptNM) … … 3155 3158 Assert( !(pCtx->cs.u32Limit & 0xfff00000) 3156 3159 || (pCtx->cs.Attr.n.u1Granularity)); 3157 Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != VMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */3160 Assert(pCtx->cs.Attr.u && pCtx->cs.Attr.u != HMVMX_SEL_UNUSABLE); /* CS cannot be loaded with NULL in protected mode. */ 3158 3161 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11) 3159 3162 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl); … … 3163 3166 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl)); 3164 3167 /* SS */ 3165 if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != VMX_SEL_UNUSABLE)3168 if (pCtx->ss.Attr.u && pCtx->ss.Attr.u != HMVMX_SEL_UNUSABLE) 3166 3169 { 3167 3170 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL)); … … 3182 3185 #endif 3183 3186 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */ 3184 if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != VMX_SEL_UNUSABLE)3187 if (pCtx->ds.Attr.u && pCtx->ds.Attr.u != HMVMX_SEL_UNUSABLE) 3185 3188 { 3186 3189 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED); … … 3196 3199 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ)); 3197 3200 } 3198 if (pCtx->es.Attr.u && pCtx->es.Attr.u != VMX_SEL_UNUSABLE)3201 if (pCtx->es.Attr.u && pCtx->es.Attr.u != HMVMX_SEL_UNUSABLE) 3199 3202 { 3200 3203 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED); … … 3210 3213 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ)); 3211 3214 } 3212 if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != VMX_SEL_UNUSABLE)3215 if (pCtx->fs.Attr.u && pCtx->fs.Attr.u != HMVMX_SEL_UNUSABLE) 3213 3216 { 3214 3217 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED); … … 3224 3227 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ)); 3225 3228 } 3226 if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != VMX_SEL_UNUSABLE)3229 if (pCtx->gs.Attr.u && pCtx->gs.Attr.u != HMVMX_SEL_UNUSABLE) 3227 3230 { 3228 3231 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED); … … 3340 3343 */ 3341 3344 if (!u32Access) 3342 u32Access = VMX_SEL_UNUSABLE;3345 u32Access = HMVMX_SEL_UNUSABLE; 3343 3346 } 3344 3347 3345 3348 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */ 3346 AssertMsg((u32Access == VMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),3349 AssertMsg((u32Access == HMVMX_SEL_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED), 3347 3350 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u)); 3348 3351 … … 3471 3474 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY 3472 3475 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights)); 3473 AssertMsg(!(u32AccessRights & VMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));3476 AssertMsg(!(u32AccessRights & HMVMX_SEL_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights)); 3474 3477 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/ 3475 3478 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/ … … 3513 3516 uint32_t u32Access = 0; 3514 3517 if (!pCtx->ldtr.Attr.u) 3515 u32Access = VMX_SEL_UNUSABLE;3518 u32Access = HMVMX_SEL_UNUSABLE; 3516 3519 else 3517 3520 u32Access = pCtx->ldtr.Attr.u; … … 3524 3527 3525 3528 /* Validate. */ 3526 if (!(u32Access & VMX_SEL_UNUSABLE))3529 if (!(u32Access & HMVMX_SEL_UNUSABLE)) 3527 3530 { 3528 3531 Assert(!(pCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */ … … 3805 3808 Log(("Exit Qualification %#x\n", pVmxTransient->uExitQualification)); 3806 3809 Log(("InstrError %#x\n", pVCpu->hm.s.vmx.lasterror.u32InstrError)); 3807 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= VMX_INSTR_ERROR_MAX)3810 if (pVCpu->hm.s.vmx.lasterror.u32InstrError <= HMVMX_INSTR_ERROR_MAX) 3808 3811 Log(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.lasterror.u32InstrError])); 3809 3812 else 3810 Log(("InstrError Desc. Range exceeded %u\n", VMX_INSTR_ERROR_MAX));3813 Log(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX)); 3811 3814 3812 3815 /* VMX control bits. */ … … 3952 3955 Log(("Host RIP %#RHv\n", uHCReg)); 3953 3956 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3954 if ( VMX_IS_64BIT_HOST_MODE())3957 if (HMVMX_IS_64BIT_HOST_MODE()) 3955 3958 { 3956 3959 Log(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER))); … … 4749 4752 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2)); 4750 4753 } 4751 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & VMX_CONTRIBUTORY_XCPT_BITMAP)4754 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK) 4752 4755 && hmR0VmxIsContributoryXcpt(uExitVector) 4753 4756 && ( hmR0VmxIsContributoryXcpt(uIdtVector) … … 4828 4831 { 4829 4832 int rc = VINF_SUCCESS; 4830 if ( !(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR0)4831 || !(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_FPU))4833 if ( !(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0) 4834 || !(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FPU)) 4832 4835 { 4833 4836 RTGCUINTREG uVal = 0; … … 4838 4841 uVal = (uShadow & pVCpu->hm.s.vmx.cr0_mask) | (uVal & ~pVCpu->hm.s.vmx.cr0_mask); 4839 4842 CPUMSetGuestCR0(pVCpu, uVal); 4840 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR0 |VMX_UPDATED_GUEST_FPU;4843 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0 | HMVMX_UPDATED_GUEST_FPU; 4841 4844 } 4842 4845 return rc; … … 4858 4861 { 4859 4862 int rc = VINF_SUCCESS; 4860 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR4))4863 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4)) 4861 4864 { 4862 4865 RTGCUINTREG uVal = 0; … … 4867 4870 uVal = (uShadow & pVCpu->hm.s.vmx.cr4_mask) | (uVal & ~pVCpu->hm.s.vmx.cr4_mask); 4868 4871 CPUMSetGuestCR4(pVCpu, uVal); 4869 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR4;4872 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4; 4870 4873 } 4871 4874 return rc; … … 4886 4889 DECLINLINE(int) hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4887 4890 { 4888 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RIP)4891 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP) 4889 4892 return VINF_SUCCESS; 4890 4893 … … 4893 4896 AssertRCReturn(rc, rc); 4894 4897 pMixedCtx->rip = uVal; 4895 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RIP;4898 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP; 4896 4899 return rc; 4897 4900 } … … 4911 4914 DECLINLINE(int) hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4912 4915 { 4913 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RSP)4916 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP) 4914 4917 return VINF_SUCCESS; 4915 4918 … … 4918 4921 AssertRCReturn(rc, rc); 4919 4922 pMixedCtx->rsp = uVal; 4920 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RSP;4923 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP; 4921 4924 return rc; 4922 4925 } … … 4936 4939 DECLINLINE(int) hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4937 4940 { 4938 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RFLAGS)4941 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS) 4939 4942 return VINF_SUCCESS; 4940 4943 … … 4954 4957 } 4955 4958 4956 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_RFLAGS;4959 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS; 4957 4960 return rc; 4958 4961 } … … 5018 5021 { 5019 5022 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */ 5020 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_ACTIVITY_STATE;5023 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE; 5021 5024 return VINF_SUCCESS; 5022 5025 } … … 5038 5041 { 5039 5042 int rc = VINF_SUCCESS; 5040 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_CS_MSR))5043 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR)) 5041 5044 { 5042 5045 uint32_t u32Val = 0; 5043 5046 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc); 5044 5047 pMixedCtx->SysEnter.cs = u32Val; 5045 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_CS_MSR;5048 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR; 5046 5049 } 5047 5050 5048 5051 RTGCUINTREG uGCVal = 0; 5049 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_EIP_MSR))5052 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR)) 5050 5053 { 5051 5054 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &uGCVal); AssertRCReturn(rc, rc); 5052 5055 pMixedCtx->SysEnter.eip = uGCVal; 5053 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_EIP_MSR;5054 } 5055 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SYSENTER_ESP_MSR))5056 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR; 5057 } 5058 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR)) 5056 5059 { 5057 5060 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &uGCVal); AssertRCReturn(rc, rc); 5058 5061 pMixedCtx->SysEnter.esp = uGCVal; 5059 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SYSENTER_ESP_MSR;5062 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR; 5060 5063 } 5061 5064 return rc; … … 5079 5082 RTGCUINTREG uVal = 0; 5080 5083 int rc = VINF_SUCCESS; 5081 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_FS_BASE_MSR))5084 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR)) 5082 5085 { 5083 5086 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal); AssertRCReturn(rc, rc); 5084 5087 pMixedCtx->fs.u64Base = uVal; 5085 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_FS_BASE_MSR;5088 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR; 5086 5089 } 5087 5090 return rc; … … 5105 5108 RTGCUINTREG uVal = 0; 5106 5109 int rc = VINF_SUCCESS; 5107 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_GS_BASE_MSR))5110 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR)) 5108 5111 { 5109 5112 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal); AssertRCReturn(rc, rc); 5110 5113 pMixedCtx->gs.u64Base = uVal; 5111 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_GS_BASE_MSR;5114 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR; 5112 5115 } 5113 5116 return rc; … … 5129 5132 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 5130 5133 { 5131 if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)5134 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS) 5132 5135 return VINF_SUCCESS; 5133 5136 … … 5154 5157 } 5155 5158 } 5156 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;5159 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS; 5157 5160 return VINF_SUCCESS; 5158 5161 } … … 5185 5188 5186 5189 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */ 5187 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))5190 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3)) 5188 5191 { 5189 5192 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 5215 5218 } 5216 5219 5217 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3;5220 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3; 5218 5221 } 5219 5222 return rc; … … 5261 5264 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers. 5262 5265 */ 5263 if (pSelReg->Attr.u & VMX_SEL_UNUSABLE)5266 if (pSelReg->Attr.u & HMVMX_SEL_UNUSABLE) 5264 5267 { 5265 5268 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); 5266 pSelReg->Attr.u = VMX_SEL_UNUSABLE;5269 pSelReg->Attr.u = HMVMX_SEL_UNUSABLE; 5267 5270 } 5268 5271 return rc; … … 5297 5300 5298 5301 /* Guest segment registers. */ 5299 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SEGMENT_REGS))5302 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS)) 5300 5303 { 5301 5304 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); … … 5318 5321 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.uAttrGS.u; 5319 5322 } 5320 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_SEGMENT_REGS;5323 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS; 5321 5324 } 5322 5325 5323 5326 /* Guest LDTR. */ 5324 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_LDTR))5327 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR)) 5325 5328 { 5326 5329 rc = VMXLOCAL_READ_SEG(LDTR, ldtr); 5327 5330 AssertRCReturn(rc, rc); 5328 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_LDTR;5331 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR; 5329 5332 } 5330 5333 … … 5332 5335 RTGCUINTREG uGCVal = 0; 5333 5336 uint32_t u32Val = 0; 5334 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_GDTR))5337 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR)) 5335 5338 { 5336 5339 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal); … … 5338 5341 pMixedCtx->gdtr.pGdt = uGCVal; 5339 5342 pMixedCtx->gdtr.cbGdt = u32Val; 5340 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_GDTR;5343 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR; 5341 5344 } 5342 5345 5343 5346 /* Guest IDTR. */ 5344 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_IDTR))5347 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR)) 5345 5348 { 5346 5349 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal); … … 5348 5351 pMixedCtx->idtr.pIdt = uGCVal; 5349 5352 pMixedCtx->idtr.cbIdt = u32Val; 5350 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_IDTR;5353 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR; 5351 5354 } 5352 5355 5353 5356 /* Guest TR. */ 5354 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_TR))5357 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR)) 5355 5358 { 5356 5359 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); … … 5360 5363 rc |= VMXLOCAL_READ_SEG(TR, tr); 5361 5364 AssertRCReturn(rc, rc); 5362 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_TR;5365 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR; 5363 5366 } 5364 5367 return rc; … … 5381 5384 { 5382 5385 int rc = VINF_SUCCESS; 5383 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_DEBUG))5386 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG)) 5384 5387 { 5385 5388 RTGCUINTREG uVal; … … 5387 5390 pMixedCtx->dr[7] = uVal; 5388 5391 5389 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_DEBUG;5392 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG; 5390 5393 } 5391 5394 return rc; … … 5407 5410 { 5408 5411 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */ 5409 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_APIC_STATE;5412 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE; 5410 5413 return VINF_SUCCESS; 5411 5414 } … … 5427 5430 Assert(pMixedCtx); 5428 5431 5429 if (pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL)5432 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL) 5430 5433 return VINF_SUCCESS; 5431 5434 … … 5464 5467 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 5465 5468 5466 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL,5469 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL, 5467 5470 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState)); 5468 5471 … … 5733 5736 5734 5737 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 5735 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL);5738 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL); 5736 5739 AssertRC(rc); 5737 5740 … … 5893 5896 const bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 5894 5897 5895 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RFLAGS));5898 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS)); 5896 5899 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/ 5897 5900 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); … … 6587 6590 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)); 6588 6591 Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags)); 6589 #ifdef VBOX_ALWAYS_SYNC_FULL_VTX_STATE6592 #ifdef HMVMX_SYNC_FULL_GUEST_STATE 6590 6593 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 6591 6594 #endif … … 6714 6717 /* Update the guest interruptibility-state from the VMCS. */ 6715 6718 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx); 6716 #if defined( VBOX_ALWAYS_SYNC_FULL_VTX_STATE) || defined(VBOX_ALWAYS_SAVE_FULL_VTX_STATE)6719 #if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE) 6717 6720 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6718 6721 AssertRC(rc); … … 7695 7698 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END) 7696 7699 { 7697 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_APIC_STATE);7700 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_APIC_STATE); 7698 7701 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE; 7699 7702 } … … 8839 8842 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 8840 8843 AssertRCReturn(rc, rc); 8841 Assert(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_INFO);8844 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO); 8842 8845 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 8843 8846 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */); -
trunk/src/VBox/VMM/include/HMInternal.h
r45623 r45637 49 49 # define HM_PROFILE_EXIT_DISPATCH 50 50 #endif 51 52 #define HM_VMX_EMULATE_REALMODE53 51 54 52 /* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we
Note:
See TracChangeset
for help on using the changeset viewer.