Changeset 39038 in vbox
- Timestamp:
- Oct 19, 2011 2:36:27 PM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 74458
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r38816 r39038 855 855 case MSR_IA32_THERM_STATUS: 856 856 /* CPU temperature relative to TCC, to actually activate, CPUID leaf 6 EAX[0] must be set */ 857 *puValue = ( 1 << 31)/* validity bit */858 | ( 20<< 16) /* degrees till TCC */;857 *puValue = RT_BIT(31) /* validity bit */ 858 | (UINT64_C(20) << 16) /* degrees till TCC */; 859 859 break; 860 860 … … 2341 2341 * This only seems to apply to AMD-V; in the VT-x case we *do* need to look 2342 2342 * at SS. (ACP2 regression during install after a far call to ring 2) 2343 * 2343 * 2344 2344 * Seems it isn't necessiarly true for newer AMD-V CPUs even, we have 2345 2345 * to move the VMCB.guest.u8CPL into Attr.n.u2Dpl to make this (and -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r39034 r39038 630 630 } 631 631 } 632 #endif633 632 return VERR_EM_INTERPRETER; 633 #endif 634 634 } 635 635 … … 699 699 } 700 700 } 701 #endif702 701 return VERR_EM_INTERPRETER; 702 #endif 703 703 } 704 704 … … 779 779 } 780 780 } 781 #endif782 781 return VERR_EM_INTERPRETER; 782 #endif 783 783 } 784 784 … … 1327 1327 #ifdef IN_RC 1328 1328 } 1329 #endif1330 1329 return VERR_EM_INTERPRETER; 1330 #endif 1331 1331 } 1332 1332 … … 2209 2209 2210 2210 AssertMsgFailedReturn(("Unexpected control register move\n"), VERR_EM_INTERPRETER); 2211 return VERR_EM_INTERPRETER;2212 2211 } 2213 2212 … … 3066 3065 #undef INTERPRET_CASE 3067 3066 } /* switch (opcode) */ 3068 AssertFailed(); 3069 return VERR_INTERNAL_ERROR; 3067 /* not reached */ 3070 3068 } 3071 3069 -
trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp
r37955 r39038 75 75 Assert(pVM->hwaccm.s.svm.fSupported); 76 76 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); 77 78 #else 79 hwaccmQueueInvlPage(pVCpu, GCVirt); 80 return VINF_SUCCESS; 77 81 #endif 78 79 hwaccmQueueInvlPage(pVCpu, GCVirt);80 return VINF_SUCCESS;81 82 } 82 83 -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r39034 r39038 58 58 static const unsigned g_aSize2Shift[] = 59 59 { 60 ~0 ,/* 0 - invalid */60 ~0U, /* 0 - invalid */ 61 61 0, /* *1 == 2^0 */ 62 62 1, /* *2 == 2^1 */ 63 ~0 ,/* 3 - invalid */63 ~0U, /* 3 - invalid */ 64 64 2, /* *4 == 2^2 */ 65 ~0 ,/* 5 - invalid */66 ~0 ,/* 6 - invalid */67 ~0 ,/* 7 - invalid */65 ~0U, /* 5 - invalid */ 66 ~0U, /* 6 - invalid */ 67 ~0U, /* 7 - invalid */ 68 68 3 /* *8 == 2^3 */ 69 69 }; -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r39034 r39038 1728 1728 default: 1729 1729 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); 1730 return ~0;1730 return NIL_RTHCPHYS; 1731 1731 } 1732 1732 } … … 1760 1760 default: 1761 1761 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode)); 1762 return ~0;1762 return NIL_RTHCPHYS; 1763 1763 } 1764 1764 } -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r39034 r39038 27 27 */ 28 28 29 #ifdef _MSC_VER 30 /** @todo we're generating unnecessary code in nested/ept shadow mode and for 31 * real/prot-guest+RC mode. */ 32 # pragma warning(disable: 4505) 33 #endif 29 34 30 35 /******************************************************************************* … … 37 42 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 38 43 static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 39 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)44 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 40 45 static void PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst); 41 # else46 # else 42 47 static void PGM_BTH_NAME(SyncPageWorker)(PVMCPU pVCpu, PSHWPTE pPteDst, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, unsigned iPTDst); 43 48 #endif … … 2441 2446 if (SHW_PTE_IS_TRACK_DIRTY(*pPteDst)) 2442 2447 { 2443 PPGMPAGE pPage = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS( *pPteSrc));2448 PPGMPAGE pPage = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(PteSrc)); 2444 2449 SHWPTE PteDst = *pPteDst; 2445 2450 … … 2447 2452 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtyPageTrap)); 2448 2453 2449 Assert( pPteSrc->n.u1Write);2454 Assert(PteSrc.n.u1Write); 2450 2455 2451 2456 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB … … 2467 2472 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) 2468 2473 { 2469 rc = pgmPhysPageMakeWritable(pVM, pPage, GST_GET_PTE_GCPHYS( *pPteSrc));2474 rc = pgmPhysPageMakeWritable(pVM, pPage, GST_GET_PTE_GCPHYS(PteSrc)); 2470 2475 AssertRC(rc); 2471 2476 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r39034 r39038 3490 3490 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1); 3491 3491 if (RT_SUCCESS(rc)) 3492 { 3492 3493 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2); 3493 if (RT_SUCCESS(rc)) 3494 { 3495 /** @todo we should check reserved bits ... */ 3496 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc)); 3497 PGMPAGEMAPLOCK PgMpLck; 3498 void const *pvSrc1; 3499 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck); 3500 switch (rc) 3494 if (RT_SUCCESS(rc)) 3501 3495 { 3502 case VINF_SUCCESS: 3503 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1); 3504 PGMPhysReleasePageMappingLock(pVM, &PgMpLck); 3505 break; 3506 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS: 3507 memset(pvDst, 0xff, cb1); 3508 break; 3509 default: 3510 Assert(RT_FAILURE_NP(rc)); 3511 return rc; 3496 /** @todo we should check reserved bits ... */ 3497 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc)); 3498 PGMPAGEMAPLOCK PgMpLck; 3499 void const *pvSrc1; 3500 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck); 3501 switch (rc) 3502 { 3503 case VINF_SUCCESS: 3504 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1); 3505 PGMPhysReleasePageMappingLock(pVM, &PgMpLck); 3506 break; 3507 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS: 3508 memset(pvDst, 0xff, cb1); 3509 break; 3510 default: 3511 Assert(RT_FAILURE_NP(rc)); 3512 return rc; 3513 } 3514 3515 void const *pvSrc2; 3516 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck); 3517 switch (rc) 3518 { 3519 case VINF_SUCCESS: 3520 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2); 3521 PGMPhysReleasePageMappingLock(pVM, &PgMpLck); 3522 break; 3523 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS: 3524 memset((uint8_t *)pvDst + cb1, 0xff, cb2); 3525 break; 3526 default: 3527 Assert(RT_FAILURE_NP(rc)); 3528 return rc; 3529 } 3530 3531 if (!(fFlags1 & X86_PTE_A)) 3532 { 3533 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A); 3534 AssertRC(rc); 3535 } 3536 if (!(fFlags2 & X86_PTE_A)) 3537 { 3538 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A); 3539 AssertRC(rc); 3540 } 3541 return VINF_SUCCESS; 3512 3542 } 3513 3514 void const *pvSrc2;3515 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);3516 switch (rc)3517 {3518 case VINF_SUCCESS:3519 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);3520 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);3521 break;3522 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:3523 memset((uint8_t *)pvDst + cb1, 0xff, cb2);3524 break;3525 default:3526 Assert(RT_FAILURE_NP(rc));3527 return rc;3528 }3529 3530 if (!(fFlags1 & X86_PTE_A))3531 {3532 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3533 AssertRC(rc);3534 }3535 if (!(fFlags2 & X86_PTE_A))3536 {3537 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);3538 AssertRC(rc);3539 }3540 return VINF_SUCCESS;3541 3543 } 3542 3544 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r39034 r39038 3179 3179 #endif 3180 3180 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u32=%RX32 poolkind=%x\n", pPage->iFirstPresent, pPage->cPresent, u32, pPage->enmKind)); 3181 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);3181 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/ 3182 3182 break; 3183 3183 } … … 3253 3253 #endif 3254 3254 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d u64=%RX64 poolkind=%x iPte=%d PT=%RX64\n", pPage->iFirstPresent, pPage->cPresent, u64, pPage->enmKind, iPte, PGMSHWPTEPAE_GET_LOG(pPT->a[iPte]))); 3255 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);3255 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPT);*/ 3256 3256 break; 3257 3257 } … … 3288 3288 # endif 3289 3289 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent)); 3290 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);3290 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/ 3291 3291 break; 3292 3292 } … … 3321 3321 # endif 3322 3322 AssertFatalMsgFailed(("iFirstPresent=%d cPresent=%d\n", pPage->iFirstPresent, pPage->cPresent)); 3323 PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);3323 /*PGM_DYNMAP_UNUSED_HINT_VM(pVM, pPD);*/ 3324 3324 break; 3325 3325 } … … 3329 3329 AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw)); 3330 3330 } 3331 3332 /* not reached. */ 3333 #ifndef _MSC_VER 3331 3334 return fRet; 3335 #endif 3332 3336 } 3333 3337 -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r37354 r39038 300 300 || PGM_SHW_TYPE == PGM_TYPE_EPT 301 301 AssertFailed(); /* can't happen */ 302 pPT = NULL; /* shut up MSC */ 302 303 # else 303 304 Assert(pgmMapAreMappingsEnabled(pVM)); -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r37527 r39038 73 73 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; 74 74 AssertFatalMsgFailed(("pGip=%p u32Magic=%#x\n", pGip, VALID_PTR(pGip) ? pGip->u32Magic : 0)); 75 #ifndef _MSC_VER 75 76 return 0; /* gcc false positive warning */ 77 #endif 76 78 } 77 79 -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r37955 r39038 187 187 pVCpu->trpm.s.uActiveVector = u8TrapNo; 188 188 pVCpu->trpm.s.enmActiveType = enmType; 189 pVCpu->trpm.s.uActiveErrorCode = ~ 0;189 pVCpu->trpm.s.uActiveErrorCode = ~(RTGCUINT)0; 190 190 pVCpu->trpm.s.uActiveCR2 = 0xdeadface; 191 191 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMR0/GMMR0.cpp
r39034 r39038 629 629 * @param pGMM The name of the pGMM variable. 630 630 */ 631 #if defined(VBOX_STRICT) && 0631 #if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0 632 632 # define GMM_CHECK_SANITY_UPON_ENTERING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0) 633 633 #else … … 643 643 * @param pGMM The name of the pGMM variable. 644 644 */ 645 #if defined(VBOX_STRICT) && 0645 #if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0 646 646 # define GMM_CHECK_SANITY_UPON_LEAVING(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0) 647 647 #else … … 657 657 * @param pGMM The name of the pGMM variable. 658 658 */ 659 #if defined(VBOX_STRICT) && 0659 #if defined(VBOX_STRICT) && defined(GMMR0_WITH_SANITY_CHECK) && 0 660 660 # define GMM_CHECK_SANITY_IN_LOOPS(pGMM) (gmmR0SanityCheck((pGMM), __PRETTY_FUNCTION__, __LINE__) == 0) 661 661 #else … … 672 672 DECLINLINE(void) gmmR0LinkChunk(PGMMCHUNK pChunk, PGMMCHUNKFREESET pSet); 673 673 DECLINLINE(void) gmmR0SelectSetAndLinkChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk); 674 #ifdef GMMR0_WITH_SANITY_CHECK 674 675 static uint32_t gmmR0SanityCheck(PGMM pGMM, const char *pszFunction, unsigned uLineNo); 676 #endif 675 677 static bool gmmR0FreeChunk(PGMM pGMM, PGVM pGVM, PGMMCHUNK pChunk, bool fRelaxedSem); 676 678 DECLINLINE(void) gmmR0FreePrivatePage(PGMM pGMM, PGVM pGVM, uint32_t idPage, PGMMPAGE pPage); … … 1619 1621 } 1620 1622 1623 #ifdef GMMR0_WITH_SANITY_CHECK 1621 1624 1622 1625 /** … … 1679 1682 } 1680 1683 1684 #endif /* GMMR0_WITH_SANITY_CHECK */ 1681 1685 1682 1686 /** -
trunk/src/VBox/VMM/VMMR0/GVMMR0.cpp
r37465 r39038 256 256 257 257 /** The GVMM::u32Magic value (Charlie Haden). */ 258 #define GVMM_MAGIC 0x19370806258 #define GVMM_MAGIC UINT32_C(0x19370806) 259 259 260 260 … … 1298 1298 ASMAtomicWriteNullPtr(&pHandle->pSession); 1299 1299 ASMAtomicWriteHandle(&pHandle->hEMT0, NIL_RTNATIVETHREAD); 1300 ASMAtomicWrite Size(&pHandle->ProcId,NIL_RTPROCESS);1300 ASMAtomicWriteU32(&pHandle->ProcId, NIL_RTPROCESS); 1301 1301 1302 1302 gvmmR0UsedUnlock(pGVMM); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r38816 r39038 964 964 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID; 965 965 SVM_VMCB *pVMCB; 966 bool fSyncTPR = false;967 966 unsigned cResume = 0; 968 uint8_t u8LastTPR;969 967 PHMGLOBLCPUINFO pCpu = 0; 970 968 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0; … … 1092 1090 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB 1093 1091 * shootdowns rely on this. 1094 */ 1092 */ 1095 1093 uOldEFlags = ASMIntDisableFlags(); 1096 1094 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) … … 1111 1109 /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */ 1112 1110 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! (no longer true) 1113 * @todo query and update the TPR only when it could have been changed (mmio access)1114 1111 */ 1112 /** @todo query and update the TPR only when it could have been changed (mmio access) 1113 */ 1114 bool fSyncTPR = false; 1115 uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */ 1115 1116 if (pVM->hwaccm.s.fHasIoApic) 1116 1117 { 1118 /* TPR caching in CR8 */ 1117 1119 bool fPending; 1118 1119 /* TPR caching in CR8 */1120 1120 rc2 = PDMApicGetTPR(pVCpu, &u8LastTPR, &fPending); 1121 1121 AssertRC(rc2); … … 1587 1587 else 1588 1588 { 1589 if ((u 8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR)1589 if ((uint8_t)(u8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR) 1590 1590 { 1591 1591 rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r39034 r39038 77 77 static void hmR0VmxFlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr); 78 78 static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 79 #ifdef VBOX_STRICT80 static bool hmR0VmxIsValidReadField(uint32_t idxField);81 static bool hmR0VmxIsValidWriteField(uint32_t idxField);82 #endif83 79 static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite); 84 80 … … 4630 4626 4631 4627 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4628 4632 4629 /** 4633 4630 * Prepares for and executes VMLAUNCH (64 bits guest mode) … … 4697 4694 } 4698 4695 4699 /** 4700 * Executes the specified handler in 64 mode 4701 * 4702 * @returns VBox status code. 4703 * @param pVM The VM to operate on. 4704 * @param pVCpu The VMCPU to operate on. 4705 * @param pCtx Guest context 4706 * @param pfnHandler RC handler 4707 * @param cbParam Number of parameters 4708 * @param paParam Array of 32 bits parameters 4709 */ 4710 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam) 4711 { 4712 int rc, rc2; 4713 PHMGLOBLCPUINFO pCpu; 4714 RTHCPHYS HCPhysCpuPage; 4715 RTHCUINTREG uOldEFlags; 4716 4717 AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR); 4718 Assert(pfnHandler); 4719 Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField)); 4720 Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField)); 4721 4722 #ifdef VBOX_STRICT 4723 for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++) 4724 Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i])); 4725 4726 for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++) 4727 Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i])); 4728 #endif 4729 4730 /* Disable interrupts. */ 4731 uOldEFlags = ASMIntDisableFlags(); 4732 4733 pCpu = HWACCMR0GetCurrentCpu(); 4734 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 4735 4736 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 4737 VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4738 4739 /* Leave VMX Root Mode. */ 4740 VMXDisable(); 4741 4742 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 4743 4744 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 4745 CPUMSetHyperEIP(pVCpu, pfnHandler); 4746 for (int i=(int)cbParam-1;i>=0;i--) 4747 CPUMPushHyper(pVCpu, paParam[i]); 4748 4749 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 4750 /* Call switcher. */ 4751 rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 4752 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 4753 4754 /* Make sure the VMX instructions don't cause #UD faults. */ 4755 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); 4756 4757 /* Enter VMX Root Mode */ 4758 rc2 = VMXEnable(HCPhysCpuPage); 4759 if (RT_FAILURE(rc2)) 4760 { 4761 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 4762 ASMSetFlags(uOldEFlags); 4763 return VERR_VMX_VMXON_FAILED; 4764 } 4765 4766 rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4767 AssertRC(rc2); 4768 Assert(!(ASMGetFlags() & X86_EFL_IF)); 4769 ASMSetFlags(uOldEFlags); 4770 return rc; 4771 } 4772 4773 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 4774 4775 4776 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4777 /** 4778 * Executes VMWRITE 4779 * 4780 * @returns VBox status code 4781 * @param pVCpu The VMCPU to operate on. 4782 * @param idxField VMCS index 4783 * @param u64Val 16, 32 or 64 bits value 4784 */ 4785 VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 4786 { 4787 int rc; 4788 4789 switch (idxField) 4790 { 4791 case VMX_VMCS_CTRL_TSC_OFFSET_FULL: 4792 case VMX_VMCS_CTRL_IO_BITMAP_A_FULL: 4793 case VMX_VMCS_CTRL_IO_BITMAP_B_FULL: 4794 case VMX_VMCS_CTRL_MSR_BITMAP_FULL: 4795 case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL: 4796 case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL: 4797 case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL: 4798 case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL: 4799 case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL: 4800 case VMX_VMCS_GUEST_LINK_PTR_FULL: 4801 case VMX_VMCS_GUEST_PDPTR0_FULL: 4802 case VMX_VMCS_GUEST_PDPTR1_FULL: 4803 case VMX_VMCS_GUEST_PDPTR2_FULL: 4804 case VMX_VMCS_GUEST_PDPTR3_FULL: 4805 case VMX_VMCS_GUEST_DEBUGCTL_FULL: 4806 case VMX_VMCS_GUEST_EFER_FULL: 4807 case VMX_VMCS_CTRL_EPTP_FULL: 4808 /* These fields consist of two parts, which are both writable in 32 bits mode. */ 4809 rc = VMXWriteVMCS32(idxField, u64Val); 4810 rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL)); 4811 AssertRC(rc); 4812 return rc; 4813 4814 case VMX_VMCS64_GUEST_LDTR_BASE: 4815 case VMX_VMCS64_GUEST_TR_BASE: 4816 case VMX_VMCS64_GUEST_GDTR_BASE: 4817 case VMX_VMCS64_GUEST_IDTR_BASE: 4818 case VMX_VMCS64_GUEST_SYSENTER_EIP: 4819 case VMX_VMCS64_GUEST_SYSENTER_ESP: 4820 case VMX_VMCS64_GUEST_CR0: 4821 case VMX_VMCS64_GUEST_CR4: 4822 case VMX_VMCS64_GUEST_CR3: 4823 case VMX_VMCS64_GUEST_DR7: 4824 case VMX_VMCS64_GUEST_RIP: 4825 case VMX_VMCS64_GUEST_RSP: 4826 case VMX_VMCS64_GUEST_CS_BASE: 4827 case VMX_VMCS64_GUEST_DS_BASE: 4828 case VMX_VMCS64_GUEST_ES_BASE: 4829 case VMX_VMCS64_GUEST_FS_BASE: 4830 case VMX_VMCS64_GUEST_GS_BASE: 4831 case VMX_VMCS64_GUEST_SS_BASE: 4832 /* Queue a 64 bits value as we can't set it in 32 bits host mode. */ 4833 if (u64Val >> 32ULL) 4834 rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val); 4835 else 4836 rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val); 4837 4838 return rc; 4839 4840 default: 4841 AssertMsgFailed(("Unexpected field %x\n", idxField)); 4842 return VERR_INVALID_PARAMETER; 4843 } 4844 } 4845 4846 /** 4847 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts. 4848 * 4849 * @param pVCpu The VMCPU to operate on. 4850 * @param idxField VMCS field 4851 * @param u64Val Value 4852 */ 4853 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 4854 { 4855 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache; 4856 4857 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED); 4858 4859 /* Make sure there are no duplicates. */ 4860 for (unsigned i=0;i<pCache->Write.cValidEntries;i++) 4861 { 4862 if (pCache->Write.aField[i] == idxField) 4863 { 4864 pCache->Write.aFieldVal[i] = u64Val; 4865 return VINF_SUCCESS; 4866 } 4867 } 4868 4869 pCache->Write.aField[pCache->Write.cValidEntries] = idxField; 4870 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val; 4871 pCache->Write.cValidEntries++; 4872 return VINF_SUCCESS; 4873 } 4874 4875 #endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 4876 4877 #ifdef VBOX_STRICT 4696 # ifdef VBOX_STRICT 4697 4878 4698 static bool hmR0VmxIsValidReadField(uint32_t idxField) 4879 4699 { … … 4971 4791 } 4972 4792 4973 #endif 4974 4793 # endif /* VBOX_STRICT */ 4794 4795 /** 4796 * Executes the specified handler in 64 mode 4797 * 4798 * @returns VBox status code. 4799 * @param pVM The VM to operate on. 4800 * @param pVCpu The VMCPU to operate on. 4801 * @param pCtx Guest context 4802 * @param pfnHandler RC handler 4803 * @param cbParam Number of parameters 4804 * @param paParam Array of 32 bits parameters 4805 */ 4806 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam) 4807 { 4808 int rc, rc2; 4809 PHMGLOBLCPUINFO pCpu; 4810 RTHCPHYS HCPhysCpuPage; 4811 RTHCUINTREG uOldEFlags; 4812 4813 AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR); 4814 Assert(pfnHandler); 4815 Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField)); 4816 Assert(pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField)); 4817 4818 #ifdef VBOX_STRICT 4819 for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries;i++) 4820 Assert(hmR0VmxIsValidWriteField(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField[i])); 4821 4822 for (unsigned i=0;i<pVCpu->hwaccm.s.vmx.VMCSCache.Read.cValidEntries;i++) 4823 Assert(hmR0VmxIsValidReadField(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField[i])); 4824 #endif 4825 4826 /* Disable interrupts. */ 4827 uOldEFlags = ASMIntDisableFlags(); 4828 4829 pCpu = HWACCMR0GetCurrentCpu(); 4830 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 4831 4832 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 4833 VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4834 4835 /* Leave VMX Root Mode. */ 4836 VMXDisable(); 4837 4838 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 4839 4840 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu)); 4841 CPUMSetHyperEIP(pVCpu, pfnHandler); 4842 for (int i=(int)cbParam-1;i>=0;i--) 4843 CPUMPushHyper(pVCpu, paParam[i]); 4844 4845 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 4846 /* Call switcher. */ 4847 rc = pVM->hwaccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 4848 STAM_PROFILE_ADV_STOP(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); 4849 4850 /* Make sure the VMX instructions don't cause #UD faults. */ 4851 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); 4852 4853 /* Enter VMX Root Mode */ 4854 rc2 = VMXEnable(HCPhysCpuPage); 4855 if (RT_FAILURE(rc2)) 4856 { 4857 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 4858 ASMSetFlags(uOldEFlags); 4859 return VERR_VMX_VMXON_FAILED; 4860 } 4861 4862 rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4863 AssertRC(rc2); 4864 Assert(!(ASMGetFlags() & X86_EFL_IF)); 4865 ASMSetFlags(uOldEFlags); 4866 return rc; 4867 } 4868 4869 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 4870 4871 4872 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 4873 /** 4874 * Executes VMWRITE 4875 * 4876 * @returns VBox status code 4877 * @param pVCpu The VMCPU to operate on. 4878 * @param idxField VMCS index 4879 * @param u64Val 16, 32 or 64 bits value 4880 */ 4881 VMMR0DECL(int) VMXWriteVMCS64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 4882 { 4883 int rc; 4884 4885 switch (idxField) 4886 { 4887 case VMX_VMCS_CTRL_TSC_OFFSET_FULL: 4888 case VMX_VMCS_CTRL_IO_BITMAP_A_FULL: 4889 case VMX_VMCS_CTRL_IO_BITMAP_B_FULL: 4890 case VMX_VMCS_CTRL_MSR_BITMAP_FULL: 4891 case VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL: 4892 case VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL: 4893 case VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL: 4894 case VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL: 4895 case VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL: 4896 case VMX_VMCS_GUEST_LINK_PTR_FULL: 4897 case VMX_VMCS_GUEST_PDPTR0_FULL: 4898 case VMX_VMCS_GUEST_PDPTR1_FULL: 4899 case VMX_VMCS_GUEST_PDPTR2_FULL: 4900 case VMX_VMCS_GUEST_PDPTR3_FULL: 4901 case VMX_VMCS_GUEST_DEBUGCTL_FULL: 4902 case VMX_VMCS_GUEST_EFER_FULL: 4903 case VMX_VMCS_CTRL_EPTP_FULL: 4904 /* These fields consist of two parts, which are both writable in 32 bits mode. */ 4905 rc = VMXWriteVMCS32(idxField, u64Val); 4906 rc |= VMXWriteVMCS32(idxField + 1, (uint32_t)(u64Val >> 32ULL)); 4907 AssertRC(rc); 4908 return rc; 4909 4910 case VMX_VMCS64_GUEST_LDTR_BASE: 4911 case VMX_VMCS64_GUEST_TR_BASE: 4912 case VMX_VMCS64_GUEST_GDTR_BASE: 4913 case VMX_VMCS64_GUEST_IDTR_BASE: 4914 case VMX_VMCS64_GUEST_SYSENTER_EIP: 4915 case VMX_VMCS64_GUEST_SYSENTER_ESP: 4916 case VMX_VMCS64_GUEST_CR0: 4917 case VMX_VMCS64_GUEST_CR4: 4918 case VMX_VMCS64_GUEST_CR3: 4919 case VMX_VMCS64_GUEST_DR7: 4920 case VMX_VMCS64_GUEST_RIP: 4921 case VMX_VMCS64_GUEST_RSP: 4922 case VMX_VMCS64_GUEST_CS_BASE: 4923 case VMX_VMCS64_GUEST_DS_BASE: 4924 case VMX_VMCS64_GUEST_ES_BASE: 4925 case VMX_VMCS64_GUEST_FS_BASE: 4926 case VMX_VMCS64_GUEST_GS_BASE: 4927 case VMX_VMCS64_GUEST_SS_BASE: 4928 /* Queue a 64 bits value as we can't set it in 32 bits host mode. */ 4929 if (u64Val >> 32ULL) 4930 rc = VMXWriteCachedVMCSEx(pVCpu, idxField, u64Val); 4931 else 4932 rc = VMXWriteVMCS32(idxField, (uint32_t)u64Val); 4933 4934 return rc; 4935 4936 default: 4937 AssertMsgFailed(("Unexpected field %x\n", idxField)); 4938 return VERR_INVALID_PARAMETER; 4939 } 4940 } 4941 4942 /** 4943 * Cache VMCS writes for performance reasons (Darwin) and for running 64 bits guests on 32 bits hosts. 4944 * 4945 * @param pVCpu The VMCPU to operate on. 4946 * @param idxField VMCS field 4947 * @param u64Val Value 4948 */ 4949 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 4950 { 4951 PVMCSCACHE pCache = &pVCpu->hwaccm.s.vmx.VMCSCache; 4952 4953 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED); 4954 4955 /* Make sure there are no duplicates. */ 4956 for (unsigned i=0;i<pCache->Write.cValidEntries;i++) 4957 { 4958 if (pCache->Write.aField[i] == idxField) 4959 { 4960 pCache->Write.aFieldVal[i] = u64Val; 4961 return VINF_SUCCESS; 4962 } 4963 } 4964 4965 pCache->Write.aField[pCache->Write.cValidEntries] = idxField; 4966 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val; 4967 pCache->Write.cValidEntries++; 4968 return VINF_SUCCESS; 4969 } 4970 4971 #endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 4972 -
trunk/src/VBox/VMM/VMMR0/TRPMR0.cpp
r35346 r39038 44 44 PVMCPU pVCpu = VMMGetCpu0(pVM); 45 45 RTUINT uActiveVector = pVCpu->trpm.s.uActiveVector; 46 pVCpu->trpm.s.uActiveVector = ~0;46 pVCpu->trpm.s.uActiveVector = UINT32_MAX; 47 47 AssertMsgReturnVoid(uActiveVector < 256, ("uActiveVector=%#x is invalid! (More assertions to come, please enjoy!)\n", uActiveVector)); 48 48 -
trunk/src/VBox/VMM/VMMR3/PDMBlkCache.cpp
r39034 r39038 361 361 LogFlow(("Evicting entry %#p (%u bytes)\n", pCurr, pCurr->cbData)); 362 362 363 if (fReuseBuffer && (pCurr->cbData == cbData))363 if (fReuseBuffer && pCurr->cbData == cbData) 364 364 { 365 365 STAM_COUNTER_INC(&pCache->StatBuffersReused); … … 382 382 383 383 /* We have to remove the last entries from the paged out list. */ 384 while ( ((pGhostListDst->cbCached + pCurr->cbData) > pCache->cbRecentlyUsedOutMax)384 while ( pGhostListDst->cbCached + pCurr->cbData > pCache->cbRecentlyUsedOutMax 385 385 && pGhostEntFree) 386 386 { … … 825 825 826 826 RTSemRWRequestRead(pBlkCache->SemRWEntries, RT_INDEFINITE_WAIT); 827 SSMR3PutU32(pSSM, strlen(pBlkCache->pszId));827 SSMR3PutU32(pSSM, (uint32_t)strlen(pBlkCache->pszId)); 828 828 SSMR3PutStrZ(pSSM, pBlkCache->pszId); 829 829 … … 1610 1610 uint64_t off, size_t cbData, uint8_t *pbBuffer) 1611 1611 { 1612 AssertReturn(cbData <= UINT32_MAX, NULL); 1612 1613 PPDMBLKCACHEENTRY pEntryNew = (PPDMBLKCACHEENTRY)RTMemAllocZ(sizeof(PDMBLKCACHEENTRY)); 1613 1614 … … 1621 1622 pEntryNew->cRefs = 1; /* We are using it now. */ 1622 1623 pEntryNew->pList = NULL; 1623 pEntryNew->cbData = cbData;1624 pEntryNew->cbData = (uint32_t)cbData; 1624 1625 pEntryNew->pWaitingHead = NULL; 1625 1626 pEntryNew->pWaitingTail = NULL; … … 1740 1741 1741 1742 /** 1742 * Calculate aligned offset and size for a new cache entry 1743 * which do not intersect with an already existing entry and the 1744 * file end. 1743 * Calculate aligned offset and size for a new cache entry which do not 1744 * intersect with an already existing entry and the file end. 1745 1745 * 1746 1746 * @returns The number of bytes the entry can hold of the requested amount 1747 1747 * of byte. 1748 * @param pEndpoint The endpoint. 1749 * @param pBlkCache The endpoint cache. 1750 * @param off The start offset. 1751 * @param cb The number of bytes the entry needs to hold at least. 1752 * @param uAlignment Alignment of the boundary sizes. 1753 * @param poffAligned Where to store the aligned offset. 1754 * @param pcbAligned Where to store the aligned size of the entry. 1755 */ 1756 static size_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache, 1757 uint64_t off, size_t cb, 1758 unsigned uAlignment, 1759 uint64_t *poffAligned, size_t *pcbAligned) 1760 { 1761 size_t cbAligned; 1762 size_t cbInEntry = 0; 1763 uint64_t offAligned; 1748 * @param pEndpoint The endpoint. 1749 * @param pBlkCache The endpoint cache. 1750 * @param off The start offset. 1751 * @param cb The number of bytes the entry needs to hold at 1752 * least. 1753 * @param uAlignment Alignment of the boundary sizes. 1754 * @param poffAligned Where to store the aligned offset. 1755 * @param pcbAligned Where to store the aligned size of the entry. 1756 */ 1757 static uint32_t pdmBlkCacheEntryBoundariesCalc(PPDMBLKCACHE pBlkCache, 1758 uint64_t off, uint32_t cb, 1759 unsigned uAlignment, 1760 uint64_t *poffAligned, uint32_t *pcbAligned) 1761 { 1762 /* Get the best fit entries around the offset */ 1764 1763 PPDMBLKCACHEENTRY pEntryAbove = NULL; 1765 1766 /* Get the best fit entries around the offset */1767 1764 pdmBlkCacheGetCacheBestFitEntryByOffset(pBlkCache, off, &pEntryAbove); 1768 1765 … … 1775 1772 pEntryAbove ? pEntryAbove->cbData : 0)); 1776 1773 1777 offAligned = off; 1778 1774 /** @todo r=bird: Why is uAlignment disregarded here? */ 1775 uint64_t offAligned = off; 1776 1777 uint32_t cbAligned; 1778 uint32_t cbInEntry; 1779 1779 if ( pEntryAbove 1780 1780 && off + cb > pEntryAbove->Core.Key) 1781 1781 { 1782 cbInEntry = pEntryAbove->Core.Key - off;1783 cbAligned = pEntryAbove->Core.Key - offAligned;1782 cbInEntry = (uint32_t)(pEntryAbove->Core.Key - off); 1783 cbAligned = (uint32_t)(pEntryAbove->Core.Key - offAligned); 1784 1784 } 1785 1785 else 1786 1786 { 1787 cbInEntry = cb; 1787 1788 cbAligned = cb; 1788 cbInEntry = cb;1789 1789 } 1790 1790 1791 1791 /* A few sanity checks */ 1792 AssertMsg(!pEntryAbove || (offAligned + cbAligned)<= pEntryAbove->Core.Key,1792 AssertMsg(!pEntryAbove || offAligned + cbAligned <= pEntryAbove->Core.Key, 1793 1793 ("Aligned size intersects with another cache entry\n")); 1794 1794 Assert(cbInEntry <= cbAligned); … … 1824 1824 size_t *pcbData) 1825 1825 { 1826 AssertReturn(cb <= UINT32_MAX, NULL); 1827 1826 1828 uint64_t offStart = 0; 1827 size_t cbEntry = 0; 1829 uint32_t cbEntry = 0; 1830 *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, (uint32_t)cb, uAlignment, 1831 &offStart, &cbEntry); 1832 1833 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1834 pdmBlkCacheLockEnter(pCache); 1835 1828 1836 PPDMBLKCACHEENTRY pEntryNew = NULL; 1829 PPDMBLKCACHEGLOBAL pCache = pBlkCache->pCache; 1830 uint8_t *pbBuffer = NULL; 1831 1832 *pcbData = pdmBlkCacheEntryBoundariesCalc(pBlkCache, off, cb, uAlignment, 1833 &offStart, &cbEntry); 1834 1835 pdmBlkCacheLockEnter(pCache); 1837 uint8_t *pbBuffer = NULL; 1836 1838 bool fEnough = pdmBlkCacheReclaim(pCache, cbEntry, true, &pbBuffer); 1837 1838 1839 if (fEnough) 1839 1840 { -
trunk/src/VBox/VMM/VMMRC/TRPMRCHandlers.cpp
r37955 r39038 142 142 if ( rc != VINF_EM_RAW_GUEST_TRAP 143 143 && rc != VINF_EM_RAW_RING_SWITCH_INT) 144 pVCpu->trpm.s.uActiveVector = ~0;144 pVCpu->trpm.s.uActiveVector = UINT32_MAX; 145 145 146 146 #ifdef VBOX_HIGH_RES_TIMERS_HACK … … 694 694 * starting from the instruction which caused the trap. 695 695 */ 696 pTrpmCpu->uActiveVector = ~0;696 pTrpmCpu->uActiveVector = UINT32_MAX; 697 697 Log6(("TRPMGC0b: %Rrc (%04x:%08x) (CG)\n", VINF_EM_RAW_RING_SWITCH, pRegFrame->cs, pRegFrame->eip)); 698 698 PGMRZDynMapReleaseAutoSet(pVCpu); … … 869 869 case OP_BOUND: 870 870 case OP_INTO: 871 pVCpu->trpm.s.uActiveVector = ~0;871 pVCpu->trpm.s.uActiveVector = UINT32_MAX; 872 872 return trpmGCExitTrap(pVM, pVCpu, VINF_EM_RAW_RING_SWITCH, pRegFrame); 873 873 -
trunk/src/VBox/VMM/include/PDMBlkCacheInternal.h
r38885 r39038 65 65 volatile uint32_t cRefs; 66 66 /** Size of the entry. */ 67 size_tcbData;67 uint32_t cbData; 68 68 /** Pointer to the memory containing the data. */ 69 69 uint8_t *pbData; -
trunk/src/VBox/VMM/include/TRPMInternal.h
r38867 r39038 205 205 206 206 /** Active Interrupt or trap vector number. 207 * If not ~0U this indicates that we're currently processing208 * a interrupt, trap, fault, abort, whatever which have arrived209 * at thatvector number.207 * If not UINT32_MAX this indicates that we're currently processing a 208 * interrupt, trap, fault, abort, whatever which have arrived at that 209 * vector number. 210 210 */ 211 RTUINTuActiveVector;211 uint32_t uActiveVector; 212 212 213 213 /** Active trap type. */
Note:
See TracChangeset
for help on using the changeset viewer.