Changeset 13820 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 5, 2008 12:55:49 AM (16 years ago)
- svn:sync-xref-src-repo-rev:
- 38811
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 28 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/DBGFDisas.cpp
r13818 r13820 443 443 RTStrPrintf(pszOutput, cchOutput, "%VGv %s", GCPtr, szBuf); 444 444 else 445 RTStrPrintf(pszOutput, cchOutput, "% VRv %s", (RTRCPTR)GCPtr, szBuf);445 RTStrPrintf(pszOutput, cchOutput, "%08RX32 %s", (uint32_t)GCPtr, szBuf); 446 446 } 447 447 else … … 450 450 RTStrPrintf(pszOutput, cchOutput, "%04x:%VGv %s", Sel, GCPtr, szBuf); 451 451 else 452 RTStrPrintf(pszOutput, cchOutput, "%04x:% VRv %s", Sel, (RTRCPTR)GCPtr, szBuf);452 RTStrPrintf(pszOutput, cchOutput, "%04x:%08RX32 %s", Sel, (uint32_t)GCPtr, szBuf); 453 453 } 454 454 } … … 476 476 szBuf); 477 477 else 478 RTStrPrintf(pszOutput, cchOutput, "% VRv%.*Vhxs%*s %s",479 ( RTRCPTR)GCPtr,478 RTStrPrintf(pszOutput, cchOutput, "%08RX32 %.*Vhxs%*s %s", 479 (uint32_t)GCPtr, 480 480 cbBits, pau8Bits, cbBits < 8 ? (8 - cbBits) * 3 : 0, "", 481 481 szBuf); … … 489 489 szBuf); 490 490 else 491 RTStrPrintf(pszOutput, cchOutput, "%04x:% VRv%.*Vhxs%*s %s",492 Sel, ( RTRCPTR)GCPtr,491 RTStrPrintf(pszOutput, cchOutput, "%04x:%08RX32 %.*Vhxs%*s %s", 492 Sel, (uint32_t)GCPtr, 493 493 cbBits, pau8Bits, cbBits < 8 ? (8 - cbBits) * 3 : 0, "", 494 494 szBuf); -
trunk/src/VBox/VMM/EM.cpp
r13818 r13820 577 577 * Just a braindead function to keep track of cli addresses. 578 578 * @param pVM VM handle. 579 * @param pInstrGCThe EIP of the cli instruction.580 */ 581 static void emR3RecordCli(PVM pVM, RTGCPTR pInstrGC)579 * @param GCPtrInstr The EIP of the cli instruction. 580 */ 581 static void emR3RecordCli(PVM pVM, RTGCPTR GCPtrInstr) 582 582 { 583 583 PCLISTAT pRec; 584 584 585 pRec = (PCLISTAT)RTAvlPVGet(&pVM->em.s.pCliStatTree, (AVLPVKEY) pInstrGC);585 pRec = (PCLISTAT)RTAvlPVGet(&pVM->em.s.pCliStatTree, (AVLPVKEY)GCPtrInstr); 586 586 if (!pRec) 587 587 { … … 591 591 if (!pRec) 592 592 return; 593 pRec->Core.Key = (AVLPVKEY) pInstrGC;593 pRec->Core.Key = (AVLPVKEY)GCPtrInstr; 594 594 595 595 char szCliStatName[32]; 596 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%VGv", pInstrGC);596 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%VGv", GCPtrInstr); 597 597 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed."); 598 598 … … 1752 1752 if (rc == VINF_SUCCESS) 1753 1753 { 1754 Log(("Patch code: IRET->VM stack frame: return address %04X:% VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));1754 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp)); 1755 1755 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS)); 1756 1756 } 1757 1757 } 1758 1758 else 1759 Log(("Patch code: IRET stack frame: return address %04X:% VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));1759 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp)); 1760 1760 } 1761 1761 else 1762 Log(("Patch code: IRET stack frame: return address %04X:% VGveflags=%08x\n", selCS, eip, uEFlags));1762 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags)); 1763 1763 } 1764 1764 } … … 1793 1793 { 1794 1794 /** @todo move to PATMR3HandleTrap */ 1795 Log(("Possible Windows XP iret fault at % VGv\n", pCtx->eip));1795 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip)); 1796 1796 PATMR3RemovePatch(pVM, pCtx->eip); 1797 1797 } … … 2023 2023 Assert(rc == VERR_PATCH_DISABLED); 2024 2024 /* Conflict detected, patch disabled */ 2025 Log(("emR3RawPrivileged: detected conflict -> disabled patch at % VGv\n", pCtx->eip));2025 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip)); 2026 2026 2027 2027 enmState = PATMTRANS_SAFE; … … 2029 2029 2030 2030 /* The translation had better be successful. Otherwise we can't recover. */ 2031 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at % VGv\n", pCtx->eip));2031 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip)); 2032 2032 if (enmState != PATMTRANS_OVERWRITTEN) 2033 2033 pCtx->eip = pOrgInstrGC; … … 2071 2071 Assert(rc == VERR_PATCH_DISABLED); 2072 2072 /* Conflict detected, patch disabled */ 2073 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->rip));2073 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", (RTGCPTR)pCtx->rip)); 2074 2074 enmState = PATMTRANS_SAFE; 2075 2075 } 2076 2076 /* The translation had better be successful. Otherwise we can't recover. */ 2077 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->rip));2077 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", (RTGCPTR)pCtx->rip)); 2078 2078 if (enmState != PATMTRANS_OVERWRITTEN) 2079 2079 pCtx->rip = pOrgInstrGC; … … 2769 2769 PCPUMCTX pCtx = pVM->em.s.pCtx; 2770 2770 2771 LogFlow(("emR3HwAccExecute: (cs:eip=%04x:%VGv)\n", pCtx->cs, pCtx->rip));2771 LogFlow(("emR3HwAccExecute: (cs:eip=%04x:%VGv)\n", pCtx->cs, (RTGCPTR)pCtx->rip)); 2772 2772 *pfFFDone = false; 2773 2773 … … 2801 2801 */ 2802 2802 if (TRPMHasTrap(pVM)) 2803 Log(("Pending hardware interrupt=0x%x cs: eip=%04X:%VGv\n", TRPMGetTrapNo(pVM), pCtx->cs,pCtx->rip));2803 Log(("Pending hardware interrupt=0x%x cs:rip=%04X:%VGv\n", TRPMGetTrapNo(pVM), pCtx->cs, (RTGCPTR)pCtx->rip)); 2804 2804 2805 2805 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx)); … … 2807 2807 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); 2808 2808 else if (CPUMIsGuestIn64BitCode(pVM, CPUMCTX2CORE(pCtx))) 2809 Log(("HWR%d: %04X:%VGv ESP=%VGv IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2809 Log(("HWR%d: %04X:%VGv ESP=%VGv IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 2810 2810 else 2811 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2811 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 2812 2812 #endif /* LOG_ENABLED */ 2813 2813 -
trunk/src/VBox/VMM/IOM.cpp
r13819 r13820 601 601 RCPTRTYPE(PFNIOMIOPORTOUTSTRING) pfnOutStrCallback, RCPTRTYPE(PFNIOMIOPORTINSTRING) pfnInStrCallback, const char *pszDesc) 602 602 { 603 LogFlow(("IOMR3IOPortRegisterRC: pDevIns=%p PortStart=%#x cPorts=%#x pvUser=%VRv pfnOutCallback=% VGv pfnInCallback=%VRv pfnOutStrCallback=%VRv pfnInStrCallback=%VRv pszDesc=%s\n",603 LogFlow(("IOMR3IOPortRegisterRC: pDevIns=%p PortStart=%#x cPorts=%#x pvUser=%VRv pfnOutCallback=%RRv pfnInCallback=%RRv pfnOutStrCallback=%RRv pfnInStrCallback=%RRv pszDesc=%s\n", 604 604 pDevIns, PortStart, cPorts, pvUser, pfnOutCallback, pfnInCallback, pfnOutStrCallback, pfnInStrCallback, pszDesc)); 605 605 … … 1329 1329 { 1330 1330 PIOMIOPORTRANGER0 pRange = (PIOMIOPORTRANGER0)MMHyperR0ToCC(pVM, pVM->iom.s.pRangeLastWriteR0); 1331 pHlp->pfnPrintf(pHlp, "R0 Write Ports: %#04x-%#04x % VGv%s\n",1331 pHlp->pfnPrintf(pHlp, "R0 Write Ports: %#04x-%#04x %p %s\n", 1332 1332 pRange->Port, pRange->Port + pRange->cPorts, pRange, pRange->pszDesc); 1333 1333 } -
trunk/src/VBox/VMM/MMHyper.cpp
r13819 r13820 229 229 if (RT_FAILURE(rc)) 230 230 { 231 AssertMsgFailed(("rc=%Rrc cb=%d GCPtr=%VGvenmType=%d pszDesc=%s\n",232 rc, pLookup->cb, pLookup-> enmType, pLookup->pszDesc));231 AssertMsgFailed(("rc=%Rrc cb=%d off=%#RX32 enmType=%d pszDesc=%s\n", 232 rc, pLookup->cb, pLookup->off, pLookup->enmType, pLookup->pszDesc)); 233 233 return rc; 234 234 } … … 409 409 if (RT_FAILURE(rc)) 410 410 { 411 AssertMsgFailed(("rc=%Rrc GCPhys=%VG voff=%#x %s\n", rc, GCPhys, off, pszDesc));411 AssertMsgFailed(("rc=%Rrc GCPhys=%VGp off=%#x %s\n", rc, GCPhys, off, pszDesc)); 412 412 break; 413 413 } … … 418 418 if (RT_FAILURE(rc)) 419 419 { 420 AssertMsgFailed(("rc=%Rrc GCPhys=%VG voff=%#x %s\n", rc, GCPhys, off, pszDesc));420 AssertMsgFailed(("rc=%Rrc GCPhys=%VGp off=%#x %s\n", rc, GCPhys, off, pszDesc)); 421 421 break; 422 422 } -
trunk/src/VBox/VMM/PATM/CSAM.cpp
r13819 r13820 1670 1670 } 1671 1671 1672 Log(("csamCreatePageRecord %VRv HCPhys=%VGp\n", GCPtr, pPage->page.GCPhys));1672 Log(("csamCreatePageRecord %VRv GCPhys=%VGp\n", GCPtr, pPage->page.GCPhys)); 1673 1673 1674 1674 #ifdef VBOX_WITH_STATISTICS … … 1941 1941 1942 1942 Assert(enmAccessType == PGMACCESSTYPE_WRITE); 1943 Log(("CSAMCodePageWriteHandler: write to %VGv size=% d\n", GCPtr, cbBuf));1943 Log(("CSAMCodePageWriteHandler: write to %VGv size=%zu\n", GCPtr, cbBuf)); 1944 1944 1945 1945 if (VM_IS_EMT(pVM)) -
trunk/src/VBox/VMM/PATM/PATMSSM.cpp
r13816 r13820 574 574 /* We actually generated code for this patch. */ 575 575 ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset); 576 AssertMsg(ret, ("Inserting patch % VGv offset %VGvfailed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key));576 AssertMsg(ret, ("Inserting patch %RRv offset %08RX32 failed!!\n", pPatchRec->patch.pPrivInstrGC, pPatchRec->CoreOffset.Key)); 577 577 } 578 578 } -
trunk/src/VBox/VMM/PATM/VMMAll/PATMAll.cpp
r13818 r13820 60 60 register uint32_t efl = pCtxCore->eflags.u32; 61 61 CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = efl & PATM_VIRTUAL_FLAGS_MASK; 62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%VRv eflags=%08x fPATM=%d pPATMGC=% VGv-%VGv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem));62 AssertMsg((efl & X86_EFL_IF) || PATMShouldUseRawMode(pVM, (RTRCPTR)pCtxCore->eip), ("X86_EFL_IF is clear and PATM is disabled! (eip=%VRv eflags=%08x fPATM=%d pPATMGC=%RRv-%RRv\n", pCtxCore->eip, pCtxCore->eflags.u32, PATMIsEnabled(pVM), pVM->patm.s.pPatchMemGC, pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem)); 63 63 64 64 AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode, ("fPIF=%d eip=%VRv\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip)); -
trunk/src/VBox/VMM/PATM/VMMGC/CSAMGC.cpp
r13818 r13820 82 82 if (!pPATMGCState->fPIF && fPatchCode) 83 83 { 84 Log(("CSAMGCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at % VGv!\n", pRegFrame->eip));84 Log(("CSAMGCCodePageWriteHandler: fPIF=0 -> stack fault in patch generated code at %08RX32!\n", pRegFrame->eip)); 85 85 /** @note there are cases when pages previously used for code are now used for stack; patch generated code will fault (pushf)) 86 86 * Just make the page r/w and continue. -
trunk/src/VBox/VMM/PDMDevMiscHlp.cpp
r13819 r13820 96 96 AssertReleaseRC(rc); 97 97 AssertRelease(pRCHelpers); 98 LogFlow(("pdmR3PicHlp_GetRCHelpers: caller='%s'/%d: returns % VGv\n",98 LogFlow(("pdmR3PicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n", 99 99 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, pRCHelpers)); 100 100 return pRCHelpers; … … 227 227 AssertReleaseRC(rc); 228 228 AssertRelease(pRCHelpers); 229 LogFlow(("pdmR3ApicHlp_GetRCHelpers: caller='%s'/%d: returns % VGv\n",229 LogFlow(("pdmR3ApicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n", 230 230 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, pRCHelpers)); 231 231 return pRCHelpers; … … 314 314 AssertReleaseRC(rc); 315 315 AssertRelease(pRCHelpers); 316 LogFlow(("pdmR3IoApicHlp_GetRCHelpers: caller='%s'/%d: returns % VGv\n",316 LogFlow(("pdmR3IoApicHlp_GetRCHelpers: caller='%s'/%d: returns %RRv\n", 317 317 pDevIns->pDevReg->szDeviceName, pDevIns->iInstance, pRCHelpers)); 318 318 return pRCHelpers; -
trunk/src/VBox/VMM/PDMLdr.cpp
r13818 r13820 592 592 else 593 593 pUVM->pdm.s.pModules = pModule; /* (pNext is zeroed by alloc) */ 594 Log(("PDM: GC Module at %VGvx %s (%s)\n", (RTGCPTR)pModule->ImageBase, pszName, pszFilename));594 Log(("PDM: R0 Module at %RHv %s (%s)\n", (RTR0PTR)pModule->ImageBase, pszName, pszFilename)); 595 595 RTMemTmpFree(pszFile); 596 596 return VINF_SUCCESS; -
trunk/src/VBox/VMM/PGM.cpp
r13819 r13820 2614 2614 "%04X - %VGp P=%d U=%d RW=%d [G=%d]\n", 2615 2615 iPD, 2616 PdeSrc.u & X86_PDE_PG_MASK,2616 (RTGCPHYS)(PdeSrc.u & X86_PDE_PG_MASK), 2617 2617 PdeSrc.n.u1Present, PdeSrc.n.u1User, PdeSrc.n.u1Write, PdeSrc.b.u1Global && fPGE); 2618 2618 } -
trunk/src/VBox/VMM/PGMMap.cpp
r13819 r13820 1070 1070 { 1071 1071 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts); 1072 Log(("PGMR3HasMappingConflicts: Conflict was detected at % VGvfor mapping %s (32 bits)\n"1073 " iPDE=%#x iPT=%#x PDE=%VGp.\n",1072 Log(("PGMR3HasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n" 1073 " iPDE=%#x iPT=%#x PDE=%VGp.\n", 1074 1074 (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc, 1075 1075 iPDE, iPT, pPD->a[iPDE + iPT].au32[0])); … … 1096 1096 STAM_COUNTER_INC(&pVM->pgm.s.StatR3DetectedConflicts); 1097 1097 Log(("PGMR3HasMappingConflicts: Conflict was detected at %VGv for mapping %s (PAE)\n" 1098 " PDE=%VGp.\n",1098 " PDE=%016RX64.\n", 1099 1099 GCPtr, pCur->pszDesc, Pde.u)); 1100 1100 return true; -
trunk/src/VBox/VMM/SELM.cpp
r13818 r13820 888 888 else 889 889 { 890 AssertReleaseMsgFailed(("Couldn't read GDT at % VGv, rc=%Rrc!\n", GDTR.pGdt, rc));890 AssertReleaseMsgFailed(("Couldn't read GDT at %016RX64, rc=%Rrc!\n", GDTR.pGdt, rc)); 891 891 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 892 892 return VERR_NOT_IMPLEMENTED; … … 1051 1051 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt) 1052 1052 { 1053 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=% VGvcbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt));1053 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt)); 1054 1054 1055 1055 /* … … 1180 1180 || cbLdt != pVM->selm.s.cbLdtLimit) 1181 1181 { 1182 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=% VGv:%04x)\n",1182 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %VGv:%04x to %VGv:%04x. (GDTR=%016RX64:%04x)\n", 1183 1183 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1184 1184 … … 1202 1202 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */ 1203 1203 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1204 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=% VGv:%04x)\n",1204 Log(("WARNING: Guest LDT (%VGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n", 1205 1205 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1206 1206 } … … 1783 1783 if (!pVM->selm.s.fSyncTSSRing0Stack) 1784 1784 { 1785 RTGCPTR pGuestTSS = pVM->selm.s.GCPtrGuestTss;1785 RTGCPTR GCPtrGuestTSS = pVM->selm.s.GCPtrGuestTss; 1786 1786 uint32_t ESPR0; 1787 int rc = PGMPhysSimpleReadGCPtr(pVM, &ESPR0, pGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0));1787 int rc = PGMPhysSimpleReadGCPtr(pVM, &ESPR0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, esp0), sizeof(ESPR0)); 1788 1788 if (RT_SUCCESS(rc)) 1789 1789 { 1790 1790 RTSEL SelSS0; 1791 rc = PGMPhysSimpleReadGCPtr(pVM, &SelSS0, pGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0));1791 rc = PGMPhysSimpleReadGCPtr(pVM, &SelSS0, GCPtrGuestTSS + RT_OFFSETOF(VBOXTSS, ss0), sizeof(SelSS0)); 1792 1792 if (RT_SUCCESS(rc)) 1793 1793 { … … 1799 1799 uint64_t fFlags; 1800 1800 1801 rc = PGMGstGetPage(pVM, pGuestTSS, &fFlags, &GCPhys);1801 rc = PGMGstGetPage(pVM, GCPtrGuestTSS, &fFlags, &GCPhys); 1802 1802 AssertRC(rc); 1803 1803 AssertMsgFailed(("TSS out of sync!! (%04X:%08X vs %04X:%08X (guest)) Tss=%VGv Phys=%VGp\n", 1804 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, pGuestTSS, GCPhys));1804 (pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, SelSS0, ESPR0, GCPtrGuestTSS, GCPhys)); 1805 1805 } 1806 1806 else … … 2270 2270 static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) 2271 2271 { 2272 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=% VGv):\n", MMHyperHC2GC(pVM, pVM->selm.s.paGdtR3));2272 pHlp->pfnPrintf(pHlp, "Shadow GDT (GCAddr=%RRv):\n", MMHyperR3ToRC(pVM, pVM->selm.s.paGdtR3)); 2273 2273 for (unsigned iGDT = 0; iGDT < SELM_GDT_ELEMENTS; iGDT++) 2274 2274 { … … 2305 2305 VBOXGDTR GDTR; 2306 2306 CPUMGetGuestGDTR(pVM, &GDTR); 2307 RTGCPTR pGDTGC= GDTR.pGdt;2307 RTGCPTR GCPtrGDT = GDTR.pGdt; 2308 2308 unsigned cGDTs = ((unsigned)GDTR.cbGdt + 1) / sizeof(X86DESC); 2309 2309 2310 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", pGDTGC, GDTR.cbGdt);2311 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, pGDTGC+= sizeof(X86DESC))2310 pHlp->pfnPrintf(pHlp, "Guest GDT (GCAddr=%VGv limit=%x):\n", GCPtrGDT, GDTR.cbGdt); 2311 for (unsigned iGDT = 0; iGDT < cGDTs; iGDT++, GCPtrGDT += sizeof(X86DESC)) 2312 2312 { 2313 2313 X86DESC GDTE; 2314 int rc = PGMPhysSimpleReadGCPtr(pVM, &GDTE, pGDTGC, sizeof(GDTE));2314 int rc = PGMPhysSimpleReadGCPtr(pVM, &GDTE, GCPtrGDT, sizeof(GDTE)); 2315 2315 if (RT_SUCCESS(rc)) 2316 2316 { … … 2324 2324 else if (rc == VERR_PAGE_NOT_PRESENT) 2325 2325 { 2326 if (( pGDTGC& PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))2327 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, pGDTGC);2326 if ((GCPtrGDT & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC)) 2327 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%VGv)\n", iGDT << X86_SEL_SHIFT, GCPtrGDT); 2328 2328 } 2329 2329 else 2330 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, pGDTGC);2330 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%VGv\n", iGDT << X86_SEL_SHIFT, rc, GCPtrGDT); 2331 2331 } 2332 2332 } … … 2344 2344 unsigned cLDTs = ((unsigned)pVM->selm.s.cbLdtLimit + 1) >> X86_SEL_SHIFT; 2345 2345 PX86DESC paLDT = (PX86DESC)((char *)pVM->selm.s.pvLdtR3 + pVM->selm.s.offLdtHyper); 2346 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=% VGv limit=%d):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit);2346 pHlp->pfnPrintf(pHlp, "Shadow LDT (GCAddr=%RRv limit=%#x):\n", pVM->selm.s.pvLdtRC + pVM->selm.s.offLdtHyper, pVM->selm.s.cbLdtLimit); 2347 2347 for (unsigned iLDT = 0; iLDT < cLDTs; iLDT++) 2348 2348 { … … 2373 2373 } 2374 2374 2375 RTGCPTR pLdtGC;2375 RTGCPTR GCPtrLdt; 2376 2376 unsigned cbLdt; 2377 int rc = SELMGetLDTFromSel(pVM, SelLdt, & pLdtGC, &cbLdt);2377 int rc = SELMGetLDTFromSel(pVM, SelLdt, &GCPtrLdt, &cbLdt); 2378 2378 if (RT_FAILURE(rc)) 2379 2379 { … … 2382 2382 } 2383 2383 2384 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, pLdtGC, cbLdt);2384 pHlp->pfnPrintf(pHlp, "Guest LDT (Sel=%x GCAddr=%VGv limit=%x):\n", SelLdt, GCPtrLdt, cbLdt); 2385 2385 unsigned cLdts = (cbLdt + 1) >> X86_SEL_SHIFT; 2386 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, pLdtGC+= sizeof(X86DESC))2386 for (unsigned iLdt = 0; iLdt < cLdts; iLdt++, GCPtrLdt += sizeof(X86DESC)) 2387 2387 { 2388 2388 X86DESC LdtE; 2389 int rc = PGMPhysSimpleReadGCPtr(pVM, &LdtE, pLdtGC, sizeof(LdtE));2389 int rc = PGMPhysSimpleReadGCPtr(pVM, &LdtE, GCPtrLdt, sizeof(LdtE)); 2390 2390 if (RT_SUCCESS(rc)) 2391 2391 { … … 2399 2399 else if (rc == VERR_PAGE_NOT_PRESENT) 2400 2400 { 2401 if (( pLdtGC& PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC))2402 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, pLdtGC);2401 if ((GCPtrLdt & PAGE_OFFSET_MASK) + sizeof(X86DESC) - 1 < sizeof(X86DESC)) 2402 pHlp->pfnPrintf(pHlp, "%04x - page not present (GCAddr=%VGv)\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, GCPtrLdt); 2403 2403 } 2404 2404 else 2405 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, pLdtGC);2405 pHlp->pfnPrintf(pHlp, "%04x - read error rc=%Rrc GCAddr=%VGv\n", (iLdt << X86_SEL_SHIFT) | X86_SEL_LDT, rc, GCPtrLdt); 2406 2406 } 2407 2407 } -
trunk/src/VBox/VMM/TRPM.cpp
r13818 r13820 661 661 if (pVM->trpm.s.aGuestTrapHandler[iTrap] != TRPM_INVALID_HANDLER) 662 662 { 663 Log(("TRPMR3Relocate: iGate=%2X Handler % VGv -> %VGv\n", iTrap, pVM->trpm.s.aGuestTrapHandler[iTrap], pVM->trpm.s.aGuestTrapHandler[iTrap] + offDelta));663 Log(("TRPMR3Relocate: iGate=%2X Handler %RRv -> %RRv\n", iTrap, pVM->trpm.s.aGuestTrapHandler[iTrap], pVM->trpm.s.aGuestTrapHandler[iTrap] + offDelta)); 664 664 pVM->trpm.s.aGuestTrapHandler[iTrap] += offDelta; 665 665 } -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r13819 r13820 155 155 { 156 156 Log(("EMInterpretDisasOne: Failed to convert %RTsel:%VGv (cpl=%d) - rc=%Rrc !!\n", 157 pCtxCore->cs, pCtxCore->rip, pCtxCore->ss & X86_SEL_RPL, rc));157 pCtxCore->cs, (RTGCPTR)pCtxCore->rip, pCtxCore->ss & X86_SEL_RPL, rc)); 158 158 return rc; 159 159 } … … 211 211 RTGCPTR pbCode; 212 212 213 LogFlow(("EMInterpretInstruction % VGv fault %VGv\n",pRegFrame->rip, pvFault));213 LogFlow(("EMInterpretInstruction %RGv fault %VGv\n", (RTGCPTR)pRegFrame->rip, pvFault)); 214 214 int rc = SELMToFlatEx(pVM, DIS_SELREG_CS, pRegFrame, pRegFrame->rip, 0, &pbCode); 215 215 if (RT_SUCCESS(rc)) … … 728 728 if (pCpu->param1.size < pCpu->param2.size) 729 729 { 730 AssertMsgFailed(("%s at % VGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu),pRegFrame->rip, pCpu->param1.size, pCpu->param2.size)); /* should never happen! */730 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu), (RTGCPTR)pRegFrame->rip, pCpu->param1.size, pCpu->param2.size)); /* should never happen! */ 731 731 return VERR_EM_INTERPRETER; 732 732 } … … 814 814 { 815 815 AssertMsgReturn(pCpu->param1.size >= pCpu->param2.size, /* should never happen! */ 816 ("%s at % VGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu),pRegFrame->rip, pCpu->param1.size, pCpu->param2.size),816 ("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu), (RTGCPTR)pRegFrame->rip, pCpu->param1.size, pCpu->param2.size), 817 817 VERR_EM_INTERPRETER); 818 818 … … 903 903 if (pCpu->param1.size < pCpu->param2.size) 904 904 { 905 AssertMsgFailed(("%s at % VGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu),pRegFrame->rip, pCpu->param1.size, pCpu->param2.size)); /* should never happen! */905 AssertMsgFailed(("%s at %RGv parameter mismatch %d vs %d!!\n", emGetMnemonic(pCpu), (RTGCPTR)pRegFrame->rip, pCpu->param1.size, pCpu->param2.size)); /* should never happen! */ 906 906 return VERR_EM_INTERPRETER; 907 907 } … … 1187 1187 1188 1188 default: 1189 Log(("emInterpretMov: unexpected type=%d eip=%VGv\n", param2.type,pRegFrame->rip));1189 Log(("emInterpretMov: unexpected type=%d rip=%RGv\n", param2.type, (RTGCPTR)pRegFrame->rip)); 1190 1190 return VERR_EM_INTERPRETER; 1191 1191 } 1192 1192 #ifdef LOG_ENABLED 1193 1193 if (pCpu->mode == CPUMODE_64BIT) 1194 LogFlow(("EMInterpretInstruction at % VGv: OP_MOV %VGv <- %RX64 (%d) &val64=%RHv\n",pRegFrame->rip, pDest, val64, param2.size, &val64));1194 LogFlow(("EMInterpretInstruction at %RGv: OP_MOV %VGv <- %RX64 (%d) &val64=%RHv\n", (RTGCPTR)pRegFrame->rip, pDest, val64, param2.size, &val64)); 1195 1195 else 1196 LogFlow(("EMInterpretInstruction at % VGv: OP_MOV %VGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64));1196 LogFlow(("EMInterpretInstruction at %08RX64: OP_MOV %VGv <- %08X (%d) &val64=%RHv\n", pRegFrame->rip, pDest, (uint32_t)val64, param2.size, &val64)); 1197 1197 #endif 1198 1198 … … 2000 2000 2001 2001 /** @todo Clean up this mess. */ 2002 LogFlow(("EMInterpretCRxWrite at % VGv CR%d <- %VX64\n",pRegFrame->rip, DestRegCrx, val));2002 LogFlow(("EMInterpretCRxWrite at %RGv CR%d <- %VX64\n", (RTGCPTR)pRegFrame->rip, DestRegCrx, val)); 2003 2003 switch (DestRegCrx) 2004 2004 { … … 2349 2349 X86XDTR32 dtr32; 2350 2350 2351 Log(("Emulate %s at % VGv\n", emGetMnemonic(pCpu),pRegFrame->rip));2351 Log(("Emulate %s at %RGv\n", emGetMnemonic(pCpu), (RTGCPTR)pRegFrame->rip)); 2352 2352 2353 2353 /* Only for the VT-x real-mode emulation case. */ -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r13818 r13820 1018 1018 { 1019 1019 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a); 1020 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%VGv eip=%VGv\n",1021 GCPhysFault, (uint32_t)uErrorCode, pvFault,pCtxCore->rip));1020 Log(("IOMMMIOHandler: GCPhys=%RGp uErr=%#x pvFault=%VGv rip=%RGv\n", 1021 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); 1022 1022 1023 1023 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser; … … 1740 1740 VMMDECL(int) IOMMMIOModifyPage(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags) 1741 1741 { 1742 Assert(fPageFlags == (X86_PTE_RW |X86_PTE_P));1742 Assert(fPageFlags == (X86_PTE_RW | X86_PTE_P)); 1743 1743 1744 1744 Log(("IOMMMIOModifyPage %VGp -> %VGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags)); -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r13819 r13820 360 360 VMMDECL(int) PGMTrap0eHandler(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault) 361 361 { 362 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%VGv eip=%VGv\n", uErr, pvFault, pRegFrame->rip));362 LogFlow(("PGMTrap0eHandler: uErr=%RGu pvFault=%VGv eip=%VGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip)); 363 363 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a); 364 364 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } ); … … 1927 1927 *ppv = pv; 1928 1928 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits); 1929 //Log(("PGMGCDynMapHCPage: HCPhys=%RHp pv=% VGviPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));1929 //Log(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache)); 1930 1930 return VINF_SUCCESS; 1931 1931 } … … 1947 1947 *ppv = pv; 1948 1948 ASMInvalidatePage(pv); 1949 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=% VGviPage=%d\n", HCPhys, pv, iPage));1949 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage)); 1950 1950 return VINF_SUCCESS; 1951 1951 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13819 r13820 658 658 ) 659 659 { 660 LogFlow(("CSAMExecFault % VGv\n", pRegFrame->eip));660 LogFlow(("CSAMExecFault %RX32\n", pRegFrame->eip)); 661 661 rc = CSAMExecFault(pVM, (RTRCPTR)pRegFrame->eip); 662 662 if (rc != VINF_SUCCESS) … … 1020 1020 { 1021 1021 LogFlow(("InvalidatePage: Out-of-sync PML4E (P/GCPhys) at %VGv GCPhys=%VGp vs %VGp Pml4eSrc=%RX64 Pml4eDst=%RX64\n", 1022 1022 GCPtrPage, pShwPdpt->GCPhys, GCPhysPdpt, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 1023 1023 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e); 1024 1024 pPml4eDst->u = 0; … … 1444 1444 else 1445 1445 { 1446 LogFlow(("SyncPageWorker: monitored page (%V Gp) -> mark not present\n", HCPhys));1446 LogFlow(("SyncPageWorker: monitored page (%VHp) -> mark not present\n", HCPhys)); 1447 1447 PteDst.u = 0; 1448 1448 } … … 1561 1561 PGM_BTH_DECL(int, SyncPage)(PVM pVM, GSTPDE PdeSrc, RTGCUINTPTR GCPtrPage, unsigned cPages, unsigned uErr) 1562 1562 { 1563 LogFlow(("SyncPage: GCPtrPage=%VGv cPages=% duErr=%#x\n", GCPtrPage, cPages, uErr));1563 LogFlow(("SyncPage: GCPtrPage=%VGv cPages=%u uErr=%#x\n", GCPtrPage, cPages, uErr)); 1564 1564 1565 1565 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ … … 2531 2531 (uint64_t)PteSrc.u, 2532 2532 pPTDst->a[iPTDst].u & PGM_PTFLAGS_TRACK_DIRTY ? " Track-Dirty" : "", pPTDst->a[iPTDst].u, iPTSrc, PdeSrc.au32[0], 2533 ( PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)));2533 (RTGCPHYS)((PdeSrc.u & GST_PDE_PG_MASK) + iPTSrc*sizeof(PteSrc)) )); 2534 2534 } 2535 2535 } /* for PTEs */ … … 3206 3206 { 3207 3207 /* Free it. */ 3208 LogFlow(("SyncCR3: Out-of-sync PML4E (GCPhys) GCPtr=% VGv%VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",3208 LogFlow(("SyncCR3: Out-of-sync PML4E (GCPhys) GCPtr=%RX64 %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3209 3209 (uint64_t)iPml4e << X86_PML4_SHIFT, pShwPdpt->GCPhys, GCPhysPdptSrc, (uint64_t)pPml4eSrc->u, (uint64_t)pPml4eDst->u)); 3210 3210 pgmPoolFreeByPage(pPool, pShwPdpt, pVM->pgm.s.pHCShwAmd64CR3->idx, iPml4e); … … 3291 3291 { 3292 3292 /* Free it. */ 3293 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=% VGv%VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n",3293 LogFlow(("SyncCR3: Out-of-sync PDPE (GCPhys) GCPtr=%RX64 %VGp vs %VGp PdpeSrc=%RX64 PdpeDst=%RX64\n", 3294 3294 ((uint64_t)iPml4e << X86_PML4_SHIFT) + ((uint64_t)iPdpte << X86_PDPT_SHIFT), pShwPde->GCPhys, GCPhysPdeSrc, (uint64_t)PdpeSrc.u, (uint64_t)pPdpeDst->u)); 3295 3295 … … 3869 3869 if (!pPoolPage) 3870 3870 { 3871 AssertMsgFailed(("Invalid page table address %V Gp at %VGv! PdeDst=%#RX64\n",3871 AssertMsgFailed(("Invalid page table address %VHp at %VGv! PdeDst=%#RX64\n", 3872 3872 HCPhysShw, GCPtr, (uint64_t)PdeDst.u)); 3873 3873 cErrors++; … … 4079 4079 if (PteDst.n.u1Write) 4080 4080 { 4081 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=% VGvPteSrc=%#RX64 PteDst=%#RX64\n",4081 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%RHp PteSrc=%#RX64 PteDst=%#RX64\n", 4082 4082 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PteSrc.u, (uint64_t)PteDst.u)); 4083 4083 cErrors++; … … 4309 4309 if (PteDst.n.u1Write) 4310 4310 { 4311 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=% VGvPdeSrc=%#RX64 PteDst=%#RX64\n",4311 AssertMsgFailed(("WRITE access flagged at %VGv but the page is writable! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n", 4312 4312 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u)); 4313 4313 cErrors++; … … 4321 4321 if (PteDst.n.u1Present) 4322 4322 { 4323 AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=% VGvPdeSrc=%#RX64 PteDst=%#RX64\n",4323 AssertMsgFailed(("ALL access flagged at %VGv but the page is present! HCPhys=%RHp PdeSrc=%#RX64 PteDst=%#RX64\n", 4324 4324 GCPtr + off, pPhysPage->HCPhys, (uint64_t)PdeSrc.u, (uint64_t)PteDst.u)); 4325 4325 cErrors++; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13818 r13820 1009 1009 1010 1010 #ifdef DEBUG 1011 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=% VGv\n", iPD1, iPD1 << X86_PD_SHIFT));1011 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT)); 1012 1012 if (iPD1 != iPD2) 1013 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=% VGv\n", iPD2, iPD2 << X86_PD_SHIFT));1013 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT)); 1014 1014 #endif 1015 1015 … … 1158 1158 1159 1159 #ifdef DEBUG 1160 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (% VGv)\n",1160 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n", 1161 1161 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))); 1162 1162 if (iPD1 != iPD2) 1163 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (% VGv)\n",1163 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n", 1164 1164 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))); 1165 1165 #endif -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r13818 r13820 1345 1345 1346 1346 /** @todo fix me later. */ 1347 AssertReleaseMsgFailed(("Unknown read at %VGp size % d implement the complex physical reading case %x\n",1347 AssertReleaseMsgFailed(("Unknown read at %VGp size %u implement the complex physical reading case %RHp\n", 1348 1348 GCPhys, cbRead, 1349 1349 pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_ROM))); /** @todo PAGE FLAGS */ … … 1361 1361 else 1362 1362 { 1363 LogFlow(("PGMPhysRead: Unassigned %VGp size=% d\n", GCPhys, cbRead));1363 LogFlow(("PGMPhysRead: Unassigned %VGp size=%u\n", GCPhys, cbRead)); 1364 1364 1365 1365 /* … … 1683 1683 1684 1684 /** @todo fix me later. */ 1685 AssertReleaseMsgFailed(("Unknown write at %VGp size % d implement the complex physical writing case %x\n",1685 AssertReleaseMsgFailed(("Unknown write at %VGp size %u implement the complex physical writing case %RHp\n", 1686 1686 GCPhys, cbWrite, 1687 1687 (pPage->HCPhys & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)))); /** @todo PAGE FLAGS */ … … 2114 2114 return VINF_SUCCESS; 2115 2115 2116 LogFlow(("PGMPhysReadGCPtr: %VGv % d\n", GCPtrSrc, cb));2116 LogFlow(("PGMPhysReadGCPtr: %VGv %zu\n", GCPtrSrc, cb)); 2117 2117 2118 2118 /* … … 2186 2186 return VINF_SUCCESS; 2187 2187 2188 LogFlow(("PGMPhysWriteGCPtr: %VGv % d\n", GCPtrDst, cb));2188 LogFlow(("PGMPhysWriteGCPtr: %VGv %zu\n", GCPtrDst, cb)); 2189 2189 2190 2190 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r13819 r13820 317 317 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 318 318 PCX86PTE pGstPte = (PCX86PTE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte)); 319 Log4(("pgmPoolMonitorChainChanging 32_32: deref % RHp GCPhys %VGp\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK));319 Log4(("pgmPoolMonitorChainChanging 32_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK)); 320 320 pgmPoolTracDerefGCPhysHint(pPool, pPage, 321 321 uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, … … 336 336 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 337 337 PCX86PTE pGstPte = (PCX86PTE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte)); 338 Log4(("pgmPoolMonitorChainChanging pae_32: deref % RHp GCPhys %VGp\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK));338 Log4(("pgmPoolMonitorChainChanging pae_32: deref %016RX64 GCPhys %08RX32\n", uShw.pPT->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PG_MASK)); 339 339 pgmPoolTracDerefGCPhysHint(pPool, pPage, 340 340 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, … … 353 353 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 354 354 PCX86PTEPAE pGstPte = (PCX86PTEPAE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte)); 355 Log4(("pgmPoolMonitorChainChanging pae: deref % RHp GCPhys %VGp\n", uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK));355 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK)); 356 356 pgmPoolTracDerefGCPhysHint(pPool, pPage, 357 357 uShw.pPTPae->a[iShw].u & X86_PTE_PAE_PG_MASK, … … 373 373 # ifdef PGMPOOL_WITH_GCPHYS_TRACKING 374 374 PCX86PTEPAE pGstPte = (PCX86PTEPAE)pgmPoolMonitorGCPtr2CCPtr(pPool, pvAddress, GCPhysFault, sizeof(*pGstPte)); 375 Log4(("pgmPoolMonitorChainChanging pae: deref % RHp GCPhys %VGp\n", uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK));375 Log4(("pgmPoolMonitorChainChanging pae: deref %016RX64 GCPhys %016RX64\n", uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, pGstPte->u & X86_PTE_PAE_PG_MASK)); 376 376 pgmPoolTracDerefGCPhysHint(pPool, pPage, 377 377 uShw.pPTPae->a[iShw2].u & X86_PTE_PAE_PG_MASK, … … 1055 1055 /* REP prefix, don't bother. */ 1056 1056 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,RepPrefix)); 1057 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x eip=%VGv opcode=%d prefix=%#x\n",1058 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, pRegFrame->rip, Cpu.pCurInstr->opcode, Cpu.prefix));1057 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%VGv opcode=%d prefix=%#x\n", 1058 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, Cpu.pCurInstr->opcode, Cpu.prefix)); 1059 1059 } 1060 1060 … … 3816 3816 if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key) 3817 3817 { 3818 AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, ("Can't free the shadow CR3! (%VGp vs %VGp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind)); 3818 AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, 3819 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind)); 3819 3820 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx)); 3820 3821 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r13818 r13820 208 208 VMMDECL(void) TRPMSetErrorCode(PVM pVM, RTGCUINT uErrorCode) 209 209 { 210 Log2(("TRPMSetErrorCode: uErrorCode=%VGv\n", uErrorCode)); 210 Log2(("TRPMSetErrorCode: uErrorCode=%VGv\n", uErrorCode)); /** @todo RTGCUINT mess! */ 211 211 AssertMsg(pVM->trpm.s.uActiveVector != ~0U, ("No active trap!\n")); 212 212 pVM->trpm.s.uActiveErrorCode = uErrorCode; … … 368 368 STAM_PROFILE_ADV_START(&pVM->trpm.s.CTX_SUFF_Z(StatForwardProf), a); 369 369 370 # ifdef DEBUG370 # if defined(VBOX_STRICT) || defined(LOG_ENABLED) 371 371 if (pRegFrame->eflags.Bits.u1VM) 372 372 Log(("TRPMForwardTrap-VM: eip=%04X:%04X iGate=%d\n", pRegFrame->cs, pRegFrame->eip, iGate)); … … 380 380 int rc; 381 381 RTGCPTR pCallerGC; 382 # ifdef IN_GC382 # ifdef IN_GC 383 383 rc = MMGCRamRead(pVM, &pCallerGC, (void *)pRegFrame->esp, sizeof(pCallerGC)); 384 # else384 # else 385 385 rc = PGMPhysSimpleReadGCPtr(pVM, &pCallerGC, (RTGCPTR)pRegFrame->esp, sizeof(pCallerGC)); 386 # endif386 # endif 387 387 if (RT_SUCCESS(rc)) 388 {389 388 Log(("TRPMForwardTrap: caller=%VGv\n", pCallerGC)); 390 }391 389 } 392 390 /* no break */ … … 404 402 break; 405 403 } 406 # endif /* DEBUG*/404 # endif /* VBOX_STRICT || LOG_ENABLED */ 407 405 408 406 /* Retrieve the eflags including the virtualized bits. */ -
trunk/src/VBox/VMM/VMMGC/SELMGC.cpp
r13818 r13820 294 294 { 295 295 Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%VGv -> %RTsel:%VGv\n", 296 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), pVM->selm.s.Tss.esp1, (RTSEL)pGuestTSS->ss0,pGuestTSS->esp0));296 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTSS->ss0, (RTGCPTR)pGuestTSS->esp0)); 297 297 pVM->selm.s.Tss.esp1 = pGuestTSS->esp0; 298 298 pVM->selm.s.Tss.ss1 = pGuestTSS->ss0 | 1; … … 319 319 if (RT_FAILURE(rc)) 320 320 { 321 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %VGv failed with %Rrc\n", ( uint8_t *)pGuestTSS + offIntRedirBitmap + i*8, rc));321 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %VGv failed with %Rrc\n", (RTGCPTR)((uintptr_t)pGuestTSS + offIntRedirBitmap + i*8), rc)); 322 322 break; 323 323 } 324 324 rc = MMGCRamRead(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], (uint8_t *)pGuestTSS + offIntRedirBitmap + i * 8, 8); 325 325 } 326 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %VGv failed with %Rrc\n", ( uint8_t *)pGuestTSS + offIntRedirBitmap + i * 8, rc));326 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %VGv failed with %Rrc\n", (RTGCPTR)((uintptr_t)pGuestTSS + offIntRedirBitmap + i * 8), rc)); 327 327 } 328 328 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r13818 r13820 358 358 #ifdef VBOX_STRICT 359 359 if (pEvent->n.u8Vector == 0xE) 360 Log(("SVM: Inject int %d at % VGv error code=%02x CR2=%VGv intInfo=%08x\n", pEvent->n.u8Vector, pCtx->rip, pEvent->n.u32ErrorCode,pCtx->cr2, pEvent->au64[0]));360 Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0])); 361 361 else 362 362 if (pEvent->n.u8Vector < 0x20) 363 Log(("SVM: Inject int %d at % VGv error code=%08x\n", pEvent->n.u8Vector,pCtx->rip, pEvent->n.u32ErrorCode));363 Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode)); 364 364 else 365 365 { 366 Log(("INJ-EI: %x at % VGv\n", pEvent->n.u8Vector,pCtx->rip));366 Log(("INJ-EI: %x at %RGv\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip)); 367 367 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)); 368 368 Assert(pCtx->eflags.u32 & X86_EFL_IF); … … 392 392 SVM_EVENT Event; 393 393 394 Log(("Reinjecting event %08x %08x at % VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode,pCtx->rip));394 Log(("Reinjecting event %08x %08x at %RGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip)); 395 395 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject); 396 396 Event.au64[0] = pVM->hwaccm.s.Event.intInfo; … … 413 413 LogFlow(("Enable irq window exit!\n")); 414 414 else 415 Log(("Pending interrupt blocked at % VGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n",pCtx->rip));415 Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n", (RTGCPTR)pCtx->rip)); 416 416 417 417 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */ … … 823 823 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)) 824 824 { 825 Log(("VM_FF_INHIBIT_INTERRUPTS at % VGv successor %VGv\n",pCtx->rip, EMGetInhibitInterruptsPC(pVM)));825 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %VGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM))); 826 826 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM)) 827 827 { … … 1209 1209 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1210 1210 { 1211 Log(("uInterruptState %x eip=%VGv\n", pVMCB->ctrl.u64IntShadow,pCtx->rip));1211 Log(("uInterruptState %x rip=%RGv\n", pVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip)); 1212 1212 EMSetInhibitInterruptsPC(pVM, pCtx->rip); 1213 1213 } … … 1227 1227 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */) 1228 1228 { 1229 Log(("Pending inject %VX64 at % VGv exit=%08x\n", pVM->hwaccm.s.Event.intInfo,pCtx->rip, exitCode));1229 Log(("Pending inject %VX64 at %RGv exit=%08x\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); 1230 1230 1231 1231 #ifdef LOG_ENABLED … … 1291 1291 if (rc == VINF_EM_RAW_GUEST_TRAP) 1292 1292 { 1293 Log(("Trap %x (debug) at % VGv\n", vector, pCtx->rip));1293 Log(("Trap %x (debug) at %016RX64\n", vector, pCtx->rip)); 1294 1294 1295 1295 /* Reinject the exception. */ … … 1310 1310 case X86_XCPT_NM: 1311 1311 { 1312 Log(("#NM fault at % VGv\n",pCtx->rip));1312 Log(("#NM fault at %RGv\n", (RTGCPTR)pCtx->rip)); 1313 1313 1314 1314 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */ … … 1350 1350 * Forward the trap to the guest by injecting the exception and resuming execution. 1351 1351 */ 1352 Log(("Guest page fault at % VGv cr2=%VGv error code %x rsp=%VGv\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));1352 Log(("Guest page fault at %RGv cr2=%RGv error code %x rsp=%RGv\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp)); 1353 1353 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF); 1354 1354 … … 1371 1371 Assert(!pVM->hwaccm.s.fNestedPaging); 1372 1372 1373 Log2(("Page fault at % VGv cr2=%VGv error code %x\n",pCtx->rip, uFaultAddress, errCode));1373 Log2(("Page fault at %RGv cr2=%VGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1374 1374 /* Exit qualification contains the linear address of the page fault. */ 1375 1375 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP); … … 1379 1379 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */ 1380 1380 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress); 1381 Log2(("PGMTrap0eHandler % VGv returned %Rrc\n",pCtx->rip, rc));1381 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 1382 1382 if (rc == VINF_SUCCESS) 1383 1383 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 1384 Log2(("Shadow page fault at % VGv cr2=%VGv error code %x\n",pCtx->rip, uFaultAddress, errCode));1384 Log2(("Shadow page fault at %RGv cr2=%VGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1385 1385 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF); 1386 1386 … … 1436 1436 break; 1437 1437 } 1438 Log(("Trap %x at % VGv\n", vector,pCtx->rip));1438 Log(("Trap %x at %RGv\n", vector, (RTGCPTR)pCtx->rip)); 1439 1439 1440 1440 Event.au64[0] = 0; … … 1485 1485 break; 1486 1486 } 1487 Log(("Trap %x at % VGv esi=%x\n", vector,pCtx->rip, pCtx->esi));1487 Log(("Trap %x at %RGv esi=%x\n", vector, (RTGCPTR)pCtx->rip, pCtx->esi)); 1488 1488 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event); 1489 1489 … … 1508 1508 1509 1509 Assert(pVM->hwaccm.s.fNestedPaging); 1510 Log(("Nested page fault at % VGv cr2=%VGp error code %x\n",pCtx->rip, uFaultAddress, errCode));1510 Log(("Nested page fault at %RGv cr2=%VGp error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1511 1511 /* Exit qualification contains the linear address of the page fault. */ 1512 1512 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP); … … 1516 1516 /* Handle the pagefault trap for the nested shadow table. */ 1517 1517 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMGetHostMode(pVM), errCode, CPUMCTX2CORE(pCtx), uFaultAddress); 1518 Log2(("PGMR0Trap0eHandlerNestedPaging % VGv returned %Rrc\n",pCtx->rip, rc));1518 Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 1519 1519 if (rc == VINF_SUCCESS) 1520 1520 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 1521 Log2(("Shadow page fault at % VGv cr2=%VGp error code %x\n",pCtx->rip, uFaultAddress, errCode));1521 Log2(("Shadow page fault at %RGv cr2=%VGp error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1522 1522 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF); 1523 1523 … … 1564 1564 case SVM_EXIT_CPUID: /* Guest software attempted to execute CPUID. */ 1565 1565 { 1566 Log2(("SVM: Cpuid at % VGv for %x\n",pCtx->rip, pCtx->eax));1566 Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax)); 1567 1567 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid); 1568 1568 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx)); … … 1620 1620 uint32_t cbSize; 1621 1621 1622 Log2(("SVM: % VGv mov cr%d, \n",pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0));1622 Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0)); 1623 1623 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite); 1624 1624 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize); … … 1674 1674 uint32_t cbSize; 1675 1675 1676 Log2(("SVM: % VGv mov x, cr%d\n",pCtx->rip, exitCode - SVM_EXIT_READ_CR0));1676 Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0)); 1677 1677 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead); 1678 1678 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize); … … 1696 1696 uint32_t cbSize; 1697 1697 1698 Log2(("SVM: % VGv mov dr%d, x\n",pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0));1698 Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0)); 1699 1699 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead); 1700 1700 … … 1736 1736 uint32_t cbSize; 1737 1737 1738 Log2(("SVM: % VGv mov dr%d, x\n",pCtx->rip, exitCode - SVM_EXIT_READ_DR0));1738 Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0)); 1739 1739 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead); 1740 1740 … … 1810 1810 if (IoExitInfo.n.u1Type == 0) 1811 1811 { 1812 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));1812 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 1813 1813 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite); 1814 1814 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize); … … 1816 1816 else 1817 1817 { 1818 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));1818 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 1819 1819 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead); 1820 1820 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, prefix, uIOSize); … … 1828 1828 if (IoExitInfo.n.u1Type == 0) 1829 1829 { 1830 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));1830 Log2(("IOMIOPortWrite %VGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize)); 1831 1831 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite); 1832 1832 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); … … 1842 1842 /* Write back to the EAX register. */ 1843 1843 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal); 1844 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));1844 Log2(("IOMIOPortRead %VGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize)); 1845 1845 } 1846 1846 } … … 1893 1893 1894 1894 /* Inject the exception. */ 1895 Log(("Inject IO debug trap at %VGv\n", pCtx->rip));1895 Log(("Inject IO debug trap at %VGv\n", (RTGCPTR)pCtx->rip)); 1896 1896 1897 1897 Event.au64[0] = 0; … … 1911 1911 goto ResumeExecution; 1912 1912 } 1913 Log2(("EM status from IO at %VGv %x size %d: %Rrc\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize, rc));1913 Log2(("EM status from IO at %VGv %x size %d: %Rrc\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize, rc)); 1914 1914 break; 1915 1915 } … … 1923 1923 AssertMsg(RT_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", rc)); 1924 1924 #endif 1925 Log2(("Failed IO at %VGv %x size %d\n", pCtx->rip, IoExitInfo.n.u16Port, uIOSize));1925 Log2(("Failed IO at %VGv %x size %d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 1926 1926 break; 1927 1927 } … … 1956 1956 Event.n.u8Vector = X86_XCPT_UD; 1957 1957 1958 Log(("Forced #UD trap at %VGv\n", pCtx->rip));1958 Log(("Forced #UD trap at %VGv\n", (RTGCPTR)pCtx->rip)); 1959 1959 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event); 1960 1960 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13819 r13820 530 530 #ifdef VBOX_STRICT 531 531 if (iGate == 0xE) 532 LogFlow(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", iGate, pCtx->rip, errCode, pCtx->cr2, intInfo));532 LogFlow(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo)); 533 533 else 534 534 if (iGate < 0x20) 535 LogFlow(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x\n", iGate, pCtx->rip, errCode));535 LogFlow(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip, errCode)); 536 536 else 537 537 { 538 LogFlow(("INJ-EI: %x at %VGv\n", iGate, pCtx->rip));538 LogFlow(("INJ-EI: %x at %VGv\n", iGate, (RTGCPTR)pCtx->rip)); 539 539 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)); 540 540 Assert(pCtx->eflags.u32 & X86_EFL_IF); … … 637 637 if (pVM->hwaccm.s.Event.fPending) 638 638 { 639 Log(("Reinjecting event %VX64 %08x at %VGv cr2=%RX64\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->rip, pCtx->cr2));639 Log(("Reinjecting event %VX64 %08x at %VGv cr2=%RX64\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 640 640 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject); 641 641 rc = VMXR0InjectEvent(pVM, pCtx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode); … … 667 667 668 668 rc = PDMGetInterrupt(pVM, &u8Interrupt); 669 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs: eip=%04X:%VGv\n", u8Interrupt, u8Interrupt, rc, pCtx->cs,pCtx->rip));669 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%VGv\n", u8Interrupt, u8Interrupt, rc, pCtx->cs, (RTGCPTR)pCtx->rip)); 670 670 if (RT_SUCCESS(rc)) 671 671 { … … 682 682 } 683 683 else 684 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->rip));684 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", (RTGCPTR)pCtx->rip)); 685 685 } 686 686 … … 1771 1771 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)) 1772 1772 { 1773 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->rip, EMGetInhibitInterruptsPC(pVM)));1773 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM))); 1774 1774 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM)) 1775 1775 { … … 1986 1986 AssertRC(rc); 1987 1987 pVM->hwaccm.s.Event.errCode = val; 1988 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVM->hwaccm.s.Event.intInfo, pCtx->rip, exitReason, intInfo, exitQualification, val));1988 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 1989 1989 } 1990 1990 else 1991 1991 { 1992 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->rip, exitReason, intInfo, exitQualification));1992 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 1993 1993 pVM->hwaccm.s.Event.errCode = 0; 1994 1994 } … … 2005 2005 Log2(("Interruption error code %d\n", errCode)); 2006 2006 Log2(("IntInfo = %08x\n", intInfo)); 2007 Log2(("New EIP=%VGv\n", pCtx->rip));2007 Log2(("New EIP=%VGv\n", (RTGCPTR)pCtx->rip)); 2008 2008 2009 2009 if (fSyncTPR) … … 2048 2048 case X86_XCPT_NM: 2049 2049 { 2050 Log(("#NM fault at %VGv error code %x\n", pCtx->rip, errCode));2050 Log(("#NM fault at %VGv error code %x\n", (RTGCPTR)pCtx->rip, errCode)); 2051 2051 2052 2052 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */ … … 2106 2106 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */ 2107 2107 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification); 2108 Log2(("PGMTrap0eHandler %VGv returned %Rrc\n", pCtx->rip, rc));2108 Log2(("PGMTrap0eHandler %VGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 2109 2109 if (rc == VINF_SUCCESS) 2110 2110 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 2111 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->rip, exitQualification ,errCode));2111 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode)); 2112 2112 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF); 2113 2113 … … 2157 2157 break; 2158 2158 } 2159 Log(("Trap %x at %04X:%VGv\n", vector, pCtx->cs, pCtx->rip));2159 Log(("Trap %x at %04X:%VGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 2160 2160 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2161 2161 AssertRC(rc); … … 2205 2205 AssertRC(rc); 2206 2206 2207 Log(("Trap %x (debug) at %VGv exit qualification %VX64\n", vector, pCtx->rip, exitQualification));2207 Log(("Trap %x (debug) at %VGv exit qualification %VX64\n", vector, (RTGCPTR)pCtx->rip, exitQualification)); 2208 2208 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2209 2209 AssertRC(rc); … … 2224 2224 if (!CPUMIsGuestInRealModeEx(pCtx)) 2225 2225 { 2226 Log(("Trap %x at %04X:%VGv errorCode=%x\n", vector, pCtx->cs, pCtx->rip, errCode));2226 Log(("Trap %x at %04X:%VGv errorCode=%x\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, errCode)); 2227 2227 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2228 2228 AssertRC(rc); … … 2233 2233 Assert(CPUMIsGuestInRealModeEx(pCtx)); 2234 2234 2235 LogFlow(("Real mode X86_XCPT_GP instruction emulation at %VGv\n", pCtx->rip));2235 LogFlow(("Real mode X86_XCPT_GP instruction emulation at %VGv\n", (RTGCPTR)pCtx->rip)); 2236 2236 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize); 2237 2237 if (rc == VINF_SUCCESS) … … 2272 2272 } 2273 2273 2274 Log(("Trap %x at %04X:%VGv\n", vector, pCtx->cs, pCtx->rip));2274 Log(("Trap %x at %04X:%VGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 2275 2275 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2276 2276 AssertRC(rc); … … 2354 2354 /* Handle the pagefault trap for the nested shadow table. */ 2355 2355 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), GCPhys); 2356 Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Rrc\n", pCtx->rip, rc));2356 Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 2357 2357 if (rc == VINF_SUCCESS) 2358 2358 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 2359 Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", pCtx->rip, exitQualification , errCode));2359 Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode)); 2360 2360 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF); 2361 2361 … … 2377 2377 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */ 2378 2378 /* Clear VM-exit on IF=1 change. */ 2379 LogFlow(("VMX_EXIT_IRQ_WINDOW %VGv pending=%d IF=%d\n", pCtx->rip, VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));2379 LogFlow(("VMX_EXIT_IRQ_WINDOW %VGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 2380 2380 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 2381 2381 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls); … … 2472 2472 { 2473 2473 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: 2474 Log2(("VMX: %VGv mov cr%d, x\n", pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));2474 Log2(("VMX: %VGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 2475 2475 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite); 2476 2476 rc = EMInterpretCRxWrite(pVM, CPUMCTX2CORE(pCtx), … … 2644 2644 if (fIOWrite) 2645 2645 { 2646 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->rip, uPort, cbSize));2646 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 2647 2647 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite); 2648 2648 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize); … … 2650 2650 else 2651 2651 { 2652 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->rip, uPort, cbSize));2652 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 2653 2653 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead); 2654 2654 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize); … … 2735 2735 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 2736 2736 2737 Log(("Inject IO debug trap at %VGv\n", pCtx->rip));2737 Log(("Inject IO debug trap at %VGv\n", (RTGCPTR)pCtx->rip)); 2738 2738 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0); 2739 2739 AssertRC(rc); … … 2876 2876 2877 2877 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 2878 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val));2878 Log(("Old eip %VGv new %VGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val)); 2879 2879 2880 2880 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val); … … 3148 3148 3149 3149 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val); 3150 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val));3150 Log(("Old eip %VGv new %VGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val)); 3151 3151 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 3152 3152 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r13236 r13820 82 82 int rc; 83 83 84 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGp eip=%VGv\n", uErr, pvFault, pRegFrame->rip));84 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%VGp eip=%VGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip)); 85 85 STAM_PROFILE_START(&pVM->pgm.s.StatRZTrap0e, a); 86 86 STAM_STATS({ pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } ); -
trunk/src/VBox/VMM/VMMSwitcher.cpp
r13819 r13820 196 196 197 197 /* shit */ 198 AssertMsgFailed(("PGMR3Map(,% VRv, %VGp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc));198 AssertMsgFailed(("PGMR3Map(,%RRv, %RHp, %#x, 0) failed with rc=%Rrc\n", pVM->vmm.s.pvCoreCodeRC, pVM->vmm.s.HCPhysCoreCode, cbCoreCode, rc)); 199 199 SUPContFree(pVM->vmm.s.pvCoreCodeR3, pVM->vmm.s.cbCoreCode >> PAGE_SHIFT); 200 200 } … … 711 711 " GCPtrCode = %VGv\n" 712 712 " u32IDCode = %08x\n" 713 " pVM GC = %VGv\n"714 " pCPUM GC = %VGv\n"715 " pVM HC= %p\n"716 " pCPUM HC= %p\n"713 " pVMRC = %RRv\n" 714 " pCPUMRC = %RRv\n" 715 " pVMR3 = %p\n" 716 " pCPUMR3 = %p\n" 717 717 " GCPtrGDT = %VGv\n" 718 " InterCR3s = %08 x, %08x, %08x(32-Bit, PAE, AMD64)\n"719 " HyperCR3s = %08 x, %08x, %08x(32-Bit, PAE, AMD64)\n"718 " InterCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n" 719 " HyperCR3s = %08RHp, %08RHp, %08RHp (32-Bit, PAE, AMD64)\n" 720 720 " SelCS = %04x\n" 721 721 " SelDS = %04x\n" … … 723 723 " SelTSS = %04x\n", 724 724 pSwitcher->enmType, pSwitcher->pszDesc, pSwitcher->cbCode, 725 R0PtrCode, pu8CodeR3, GCPtrCode, u32IDCode, VM_GUEST_ADDR(pVM, pVM), 726 VM_GUEST_ADDR(pVM, &pVM->cpum), pVM, &pVM->cpum, 725 R0PtrCode, 726 pu8CodeR3, 727 GCPtrCode, 728 u32IDCode, 729 VM_RC_ADDR(pVM, pVM), 730 VM_RC_ADDR(pVM, &pVM->cpum), 731 pVM, 732 &pVM->cpum, 727 733 GCPtrGDT, 734 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM), 728 735 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM), 729 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),730 736 SelCS, SelDS, SelCS64, SelTSS); 731 737 -
trunk/src/VBox/VMM/VMMTests.cpp
r13818 r13820 134 134 else if (RCPtrFault != CPUMGetHyperEIP(pVM)) 135 135 { 136 RTPrintf("VMM: FAILURE - EIP=% VGv expected %VGv (%s)\n", CPUMGetHyperEIP(pVM), RCPtrFault, pszFaultEIP);136 RTPrintf("VMM: FAILURE - EIP=%08RX32 expected %RRv (%s)\n", CPUMGetHyperEIP(pVM), RCPtrFault, pszFaultEIP); 137 137 fDump = true; 138 138 } … … 187 187 if (RT_SUCCESS(rc)) 188 188 { 189 RTPrintf("VMM: VMMGCEntry=% VGv\n", RCPtrEP);189 RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); 190 190 191 191 /* … … 498 498 if (RT_SUCCESS(rc)) 499 499 { 500 RTPrintf("VMM: VMMGCEntry=% VGv\n", RCPtrEP);500 RTPrintf("VMM: VMMGCEntry=%RRv\n", RCPtrEP); 501 501 502 502 CPUMQueryHyperCtxPtr(pVM, &pHyperCtx);
Note:
See TracChangeset
for help on using the changeset viewer.