Changeset 13909 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 6, 2008 12:24:12 PM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38916
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r13905 r13909 72 72 73 73 /* Some structure checks. */ 74 Assert Msg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3)));75 Assert Msg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject)));76 Assert Msg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo)));77 Assert Msg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl)));78 79 Assert Msg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest)));80 Assert Msg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4)));81 Assert Msg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6)));82 Assert Msg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7)));83 Assert Msg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9)));84 Assert Msg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10)));85 Assert Msg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB)));74 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved3) == 0xC0, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved3))); 75 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.EventInject) == 0xA8, ("ctrl.EventInject offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.EventInject))); 76 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo) == 0x88, ("ctrl.ExitIntInfo offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.ExitIntInfo))); 77 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl) == 0x58, ("ctrl.TLBCtrl offset = %x\n", RT_OFFSETOF(SVM_VMCB, ctrl.TLBCtrl))); 78 79 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest) == 0x400, ("guest offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest))); 80 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4) == 0x4A0, ("guest.u8Reserved4 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved4))); 81 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6) == 0x4D8, ("guest.u8Reserved6 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved6))); 82 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7) == 0x580, ("guest.u8Reserved7 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved7))); 83 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9) == 0x648, ("guest.u8Reserved9 offset = %x\n", RT_OFFSETOF(SVM_VMCB, guest.u8Reserved9))); 84 AssertReleaseMsg(RT_OFFSETOF(SVM_VMCB, u8Reserved10) == 0x698, ("u8Reserved3 offset = %x\n", RT_OFFSETOF(SVM_VMCB, u8Reserved10))); 85 AssertReleaseMsg(sizeof(SVM_VMCB) == 0x1000, ("SVM_VMCB size = %x\n", sizeof(SVM_VMCB))); 86 86 87 87 … … 686 686 { 687 687 LogRel(("HWACCM: VMX setup failed with rc=%Rrc!\n", rc)); 688 LogRel(("HWACCM: Last instruction error %x\n", pVM-> hwaccm.s.vmx.ulLastInstrError));688 LogRel(("HWACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError)); 689 689 pVM->fHWACCMEnabled = false; 690 690 } … … 822 822 && pVM->fHWACCMEnabled) 823 823 { 824 if ( pV M->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL824 if ( pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL 825 825 && enmGuestMode >= PGMMODE_PROTECTED) 826 826 { … … 898 898 for (unsigned i=0;i<pVM->cCPUs;i++) 899 899 { 900 PVMCPU pVCpu = &pVM->aCpus[i]; 901 900 902 /* On first entry we'll sync everything. */ 901 pV M->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;902 903 pV M->aCpus[i].hwaccm.s.vmx.cr0_mask = 0;904 pV M->aCpus[i].hwaccm.s.vmx.cr4_mask = 0;905 906 pV M->aCpus[i].hwaccm.s.Event.fPending = false;907 } 908 909 /* Reset state information for real-mode emulation in VT-x. */910 pVM->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;903 pVCpu->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL; 904 905 pVCpu->hwaccm.s.vmx.cr0_mask = 0; 906 pVCpu->hwaccm.s.vmx.cr4_mask = 0; 907 908 pVCpu->hwaccm.s.Event.fPending = false; 909 910 /* Reset state information for real-mode emulation in VT-x. */ 911 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL; 912 } 911 913 } 912 914 … … 953 955 * from real to protected mode. (all sorts of RPL & DPL assumptions) 954 956 */ 955 if ( pVM->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL 957 PVMCPU pVCpu = VMMGetCpu(pVM); 958 959 if ( pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL 956 960 && enmGuestMode >= PGMMODE_PROTECTED) 957 961 { … … 1098 1102 1099 1103 case VERR_VMX_UNABLE_TO_START_VM: 1100 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ul LastInstrError));1101 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ul LastExitReason));1104 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError)); 1105 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason)); 1102 1106 break; 1103 1107 1104 1108 case VERR_VMX_UNABLE_TO_RESUME_VM: 1105 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ul LastInstrError));1106 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ul LastExitReason));1109 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulInstrError)); 1110 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulExitReason)); 1107 1111 break; 1108 1112 -
trunk/src/VBox/VMM/HWACCMInternal.h
r13905 r13909 280 280 } msr; 281 281 282 /* Last instruction error */283 uint32_t ulLastInstrError;284 285 /** The last known guest paging mode. */286 PGMMODE enmCurrGuestMode;287 288 282 /** Flush types for invept & invvpid; they depend on capabilities. */ 289 283 VMX_FLUSH enmFlushPage; … … 410 404 uint64_t u64VMCSPhys; 411 405 uint32_t ulVMCSRevision; 412 uint32_t ul LastInstrError;413 uint32_t ul LastExitReason;406 uint32_t ulInstrError; 407 uint32_t ulExitReason; 414 408 uint32_t padding; 415 409 } lasterror; 416 410 411 /** The last known guest paging mode. */ 412 PGMMODE enmCurrGuestMode; 417 413 } vmx; 418 414 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13905 r13909 60 60 61 61 62 static void VMXR0CheckError(PVM pVM, int rc)62 static void VMXR0CheckError(PVM pVM, PVMCPU pVCpu, int rc) 63 63 { 64 64 if (rc == VERR_VMX_GENERIC) … … 67 67 68 68 VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError); 69 pV M->hwaccm.s.vmx.ulLastInstrError = instrError;69 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError; 70 70 } 71 71 pVM->hwaccm.s.lLastError = rc; … … 107 107 { 108 108 if (pVM) 109 VMXR0CheckError(pVM, rc);109 VMXR0CheckError(pVM, &pVM->aCpus[0], rc); 110 110 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 111 111 return VERR_VMX_VMXON_FAILED; … … 190 190 for (unsigned i=0;i<pVM->cCPUs;i++) 191 191 { 192 pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ; 192 PVMCPU pVCpu = &pVM->aCpus[i]; 193 194 pVCpu->hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ; 193 195 194 196 /* Allocate one page for the VM control structure (VMCS). */ 195 rc = RTR0MemObjAllocCont(&pV M->aCpus[i].hwaccm.s.vmx.pMemObjVMCS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);197 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjVMCS, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 196 198 AssertRC(rc); 197 199 if (RT_FAILURE(rc)) 198 200 return rc; 199 201 200 pVM->aCpus[i].hwaccm.s.vmx.pVMCS = RTR0MemObjAddress(pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS); 201 pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys = RTR0MemObjGetPagePhysAddr(pVM->aCpus[i].hwaccm.s.vmx.pMemObjVMCS, 0); 202 ASMMemZero32(pVM->aCpus[i].hwaccm.s.vmx.pVMCS, PAGE_SIZE); 203 204 pVM->aCpus[i].hwaccm.s.vmx.cr0_mask = 0; 205 pVM->aCpus[i].hwaccm.s.vmx.cr4_mask = 0; 202 pVCpu->hwaccm.s.vmx.pVMCS = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjVMCS); 203 pVCpu->hwaccm.s.vmx.pVMCSPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVMCS, 0); 204 ASMMemZero32(pVCpu->hwaccm.s.vmx.pVMCS, PAGE_SIZE); 205 206 pVCpu->hwaccm.s.vmx.cr0_mask = 0; 207 pVCpu->hwaccm.s.vmx.cr4_mask = 0; 208 209 /* Current guest paging mode. */ 210 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL; 206 211 207 212 #ifdef LOG_ENABLED 208 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVM->aCpus[i].hwaccm.s.vmx.pVMCS, (uint32_t)pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys); 209 #endif 210 } 211 212 /* Current guest paging mode. */ 213 pVM->hwaccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL; 213 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hwaccm.s.vmx.pVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.pVMCSPhys); 214 #endif 215 } 214 216 215 217 return VINF_SUCCESS; … … 523 525 524 526 vmx_end: 525 VMXR0CheckError(pVM, rc);527 VMXR0CheckError(pVM, &pVM->aCpus[0], rc); 526 528 return rc; 527 529 } … … 968 970 #ifdef HWACCM_VMX_EMULATE_REALMODE 969 971 PGMMODE enmGuestMode = PGMGetGuestMode(pVM); 970 if (pV M->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode)972 if (pVCpu->hwaccm.s.vmx.enmCurrGuestMode != enmGuestMode) 971 973 { 972 974 /* Correct weird requirements for switching to protected mode. */ 973 if ( pV M->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL975 if ( pVCpu->hwaccm.s.vmx.enmCurrGuestMode == PGMMODE_REAL 974 976 && enmGuestMode >= PGMMODE_PROTECTED) 975 977 { … … 986 988 else 987 989 /* Switching from protected mode to real mode. */ 988 if ( pV M->hwaccm.s.vmx.enmCurrGuestMode >= PGMMODE_PROTECTED990 if ( pVCpu->hwaccm.s.vmx.enmCurrGuestMode >= PGMMODE_PROTECTED 989 991 && enmGuestMode == PGMMODE_REAL) 990 992 { … … 1003 1005 Assert(pCtx->gsHid.u64Base <= 0xfffff); 1004 1006 } 1005 pV M->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode;1007 pVCpu->hwaccm.s.vmx.enmCurrGuestMode = enmGuestMode; 1006 1008 } 1007 1009 else … … 3172 3174 Log(("Current stack %08x\n", &rc)); 3173 3175 3174 pVCpu->hwaccm.s.vmx.lasterror.ul LastInstrError = instrError;3175 pVCpu->hwaccm.s.vmx.lasterror.ul LastExitReason = exitReason;3176 pVCpu->hwaccm.s.vmx.lasterror.ulInstrError = instrError; 3177 pVCpu->hwaccm.s.vmx.lasterror.ulExitReason = exitReason; 3176 3178 3177 3179 #ifdef VBOX_STRICT
Note:
See TracChangeset
for help on using the changeset viewer.