Changeset 45276 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Apr 2, 2013 8:17:11 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 84670
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r44399 r45276 42 42 #include <VBox/vmm/pdmapi.h> 43 43 #include <VBox/vmm/mm.h> 44 #include <VBox/vmm/em.h> 44 45 #include <VBox/vmm/selm.h> 45 46 #include <VBox/vmm/dbgf.h> … … 4192 4193 * Are we in Ring-0? 4193 4194 */ 4194 if ( pCtxCore->ss.Sel && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0 4195 if ( pCtxCore->ss.Sel 4196 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 0 4195 4197 && !pCtxCore->eflags.Bits.u1VM) 4196 4198 { … … 4204 4206 */ 4205 4207 pCtxCore->ss.Sel |= 1; 4206 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0) 4208 if ( pCtxCore->cs.Sel 4209 && (pCtxCore->cs.Sel & X86_SEL_RPL) == 0) 4207 4210 pCtxCore->cs.Sel |= 1; 4208 4211 } 4209 4212 else 4210 4213 { 4214 #ifdef VBOX_WITH_RAW_RING1 4215 if ( EMIsRawRing1Enabled(pVM) 4216 && !pCtxCore->eflags.Bits.u1VM 4217 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 1) 4218 { 4219 /* Set CPL to Ring-2. */ 4220 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 2; 4221 if (pCtxCore->cs.Sel && (pCtxCore->cs.Sel & X86_SEL_RPL) == 1) 4222 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 2; 4223 } 4224 #else 4211 4225 AssertMsg((pCtxCore->ss.Sel & X86_SEL_RPL) >= 2 || pCtxCore->eflags.Bits.u1VM, 4212 4226 ("ring-1 code not supported\n")); 4227 #endif 4213 4228 /* 4214 4229 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well. … … 4221 4236 */ 4222 4237 AssertMsg((pCtxCore->eflags.u32 & X86_EFL_IF), ("X86_EFL_IF is clear\n")); 4223 AssertReleaseMsg( pCtxCore->eflags.Bits.u2IOPL < (unsigned)(pCtxCore->ss.Sel & X86_SEL_RPL) 4224 || pCtxCore->eflags.Bits.u1VM, 4238 AssertReleaseMsg(pCtxCore->eflags.Bits.u2IOPL == 0, 4225 4239 ("X86_EFL_IOPL=%d CPL=%d\n", pCtxCore->eflags.Bits.u2IOPL, pCtxCore->ss.Sel & X86_SEL_RPL)); 4226 4240 Assert((pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) == (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP)); … … 4231 4245 return VINF_SUCCESS; 4232 4246 } 4247 4233 4248 4234 4249 … … 4300 4315 if (!pCtxCore->eflags.Bits.u1VM) 4301 4316 { 4302 /** @todo See what happens if we remove this. */ 4303 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1) 4304 pCtxCore->ds.Sel &= ~X86_SEL_RPL; 4305 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1) 4306 pCtxCore->es.Sel &= ~X86_SEL_RPL; 4307 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1) 4308 pCtxCore->fs.Sel &= ~X86_SEL_RPL; 4309 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1) 4310 pCtxCore->gs.Sel &= ~X86_SEL_RPL; 4317 #ifdef VBOX_WITH_RAW_RING1 4318 if ( EMIsRawRing1Enabled(pVM) 4319 && (pCtxCore->ss.Sel & X86_SEL_RPL) == 2) 4320 { 4321 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ 4322 /** @todo See what happens if we remove this. */ 4323 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 2) 4324 pCtxCore->ds.Sel = (pCtxCore->ds.Sel & ~X86_SEL_RPL) | 1; 4325 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 2) 4326 pCtxCore->es.Sel = (pCtxCore->es.Sel & ~X86_SEL_RPL) | 1; 4327 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 2) 4328 pCtxCore->fs.Sel = (pCtxCore->fs.Sel & ~X86_SEL_RPL) | 1; 4329 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 2) 4330 pCtxCore->gs.Sel = (pCtxCore->gs.Sel & ~X86_SEL_RPL) | 1; 4331 4332 /* 4333 * Ring-2 selector => Ring-1. 4334 */ 4335 pCtxCore->ss.Sel = (pCtxCore->ss.Sel & ~X86_SEL_RPL) | 1; 4336 if ((pCtxCore->cs.Sel & X86_SEL_RPL) == 2) 4337 pCtxCore->cs.Sel = (pCtxCore->cs.Sel & ~X86_SEL_RPL) | 1; 4338 } 4339 else 4340 { 4341 #endif 4342 /** @todo See what happens if we remove this. */ 4343 if ((pCtxCore->ds.Sel & X86_SEL_RPL) == 1) 4344 pCtxCore->ds.Sel &= ~X86_SEL_RPL; 4345 if ((pCtxCore->es.Sel & X86_SEL_RPL) == 1) 4346 pCtxCore->es.Sel &= ~X86_SEL_RPL; 4347 if ((pCtxCore->fs.Sel & X86_SEL_RPL) == 1) 4348 pCtxCore->fs.Sel &= ~X86_SEL_RPL; 4349 if ((pCtxCore->gs.Sel & X86_SEL_RPL) == 1) 4350 pCtxCore->gs.Sel &= ~X86_SEL_RPL; 4351 #ifdef VBOX_WITH_RAW_RING1 4352 } 4353 #endif 4311 4354 } 4312 4355 } -
trunk/src/VBox/VMM/VMMR3/CSAM.cpp
r44399 r45276 847 847 break; 848 848 849 /* removing breaks win2k guests? */ 850 case OP_IRET: 851 #ifdef VBOX_WITH_RAW_RING1 852 if (EMIsRawRing1Enabled(pVM)) 853 break; 854 #endif 855 /* no break */ 856 849 857 case OP_ILLUD2: 850 858 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue. */ … … 852 860 case OP_INT3: 853 861 case OP_INVALID: 854 #if 1855 /* removing breaks win2k guests? */856 case OP_IRET:857 #endif858 862 return VINF_SUCCESS; 859 863 } … … 919 923 } 920 924 925 #ifdef VBOX_WITH_RAW_RING1 926 case OP_MOV: 927 /* mov xx, CS is a dangerous instruction as our raw ring usage leaks through. */ 928 if ( EMIsRawRing1Enabled(pVM) 929 && (pCpu->Param2.fUse & DISUSE_REG_SEG) 930 && (pCpu->Param2.Base.idxSegReg == DISSELREG_CS)) 931 { 932 Log(("CSAM: Patching dangerous 'mov xx, cs' instruction at %RGv with an int3\n", pCurInstrGC)); 933 if (PATMR3HasBeenPatched(pVM, pCurInstrGC) == false) 934 { 935 rc = PATMR3InstallPatch(pVM, pCurInstrGC, (pPage->fCode32) ? PATMFL_CODE32 : 0); 936 if (RT_FAILURE(rc)) 937 { 938 Log(("PATMR3InstallPatch failed with %d\n", rc)); 939 return VWRN_CONTINUE_ANALYSIS; 940 } 941 } 942 return VWRN_CONTINUE_ANALYSIS; 943 } 944 break; 945 #endif 946 921 947 case OP_PUSH: 948 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */ 922 949 if (pCpu->pCurInstr->fParam1 != OP_PARM_REG_CS) 923 950 break; 924 951 925 952 /* no break */ 953 #ifndef VBOX_WITH_SAFE_STR 926 954 case OP_STR: 955 #endif 927 956 case OP_LSL: 928 957 case OP_LAR: … … 2642 2671 2643 2672 rc = PATMR3InstallPatch(pVM, pHandler, fPatchFlags); 2644 if (RT_SUCCESS(rc) || rc == VERR_PATM_ALREADY_PATCHED) 2673 if ( RT_SUCCESS(rc) 2674 || rc == VERR_PATM_ALREADY_PATCHED) 2645 2675 { 2646 2676 Log(("Gate handler 0x%X is SAFE!\n", iGate)); -
trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp
r44528 r45276 661 661 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrCurrentLog failed with rc=%Rrc\n", rc); 662 662 if (pszPrefix && *pszPrefix) 663 RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf); 663 { 664 if (pVCpu->CTX_SUFF(pVM)->cCpus > 1) 665 RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf); 666 else 667 RTLogPrintf("%s: %s\n", pszPrefix, szBuf); 668 } 664 669 else 665 670 RTLogPrintf("%s\n", szBuf); … … 692 697 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrLog(, %RTsel, %RGv) failed with rc=%Rrc\n", Sel, GCPtr, rc); 693 698 if (pszPrefix && *pszPrefix) 694 RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf); 699 { 700 if (pVCpu->CTX_SUFF(pVM)->cCpus > 1) 701 RTLogPrintf("%s-CPU%u: %s\n", pszPrefix, pVCpu->idCpu, szBuf); 702 else 703 RTLogPrintf("%s: %s\n", pszPrefix, szBuf); 704 } 695 705 else 696 706 RTLogPrintf("%s\n", szBuf); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r45152 r45276 124 124 pVM->fRecompileSupervisor = RT_SUCCESS(rc) ? !fEnabled : false; 125 125 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n", pVM->fRecompileUser, pVM->fRecompileSupervisor)); 126 127 #ifdef VBOX_WITH_RAW_RING1 128 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR1Enabled", &fEnabled); 129 pVM->fRawRing1Enabled = RT_SUCCESS(rc) ? fEnabled : false; 130 Log(("EMR3Init: fRawRing1Enabled=%RTbool\n", pVM->fRawRing1Enabled)); 131 #else 132 pVM->fRawRing1Enabled = false; /* disabled by default. */ 133 #endif 126 134 127 135 #ifdef VBOX_WITH_REM … … 268 276 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted."); 269 277 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted."); 278 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted."); 279 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted."); 270 280 271 281 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted."); … … 322 332 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted."); 323 333 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted."); 334 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted."); 335 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted."); 324 336 325 337 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered."); … … 938 950 static int emR3RemStep(PVM pVM, PVMCPU pVCpu) 939 951 { 940 Log Flow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));952 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu))); 941 953 942 954 #ifdef VBOX_WITH_REM … … 958 970 #endif 959 971 960 Log Flow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));972 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu))); 961 973 return rc; 962 974 } … … 1182 1194 { 1183 1195 DBGFR3PrgStep(pVCpu); 1184 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS :");1196 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS"); 1185 1197 emR3RemStep(pVM, pVCpu); 1186 1198 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM) … … 1308 1320 return EMSTATE_REM; 1309 1321 1322 # ifdef VBOX_WITH_RAW_RING1 1323 /* Only ring 0 and 1 supervisor code. */ 1324 if (EMIsRawRing1Enabled(pVM)) 1325 { 1326 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */ 1327 { 1328 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL)); 1329 return EMSTATE_REM; 1330 } 1331 } 1332 else 1333 # endif 1310 1334 /* Only ring 0 supervisor code. */ 1311 1335 if ((uSS & X86_SEL_RPL) != 0) … … 1334 1358 { 1335 1359 Log2(("raw r0 mode forced: patch code\n")); 1360 # ifdef VBOX_WITH_SAFE_STR 1361 Assert(pCtx->tr.Sel); 1362 # endif 1336 1363 return EMSTATE_RAW; 1337 1364 } … … 1346 1373 # endif 1347 1374 1375 # ifndef VBOX_WITH_RAW_RING1 1348 1376 /** @todo still necessary??? */ 1349 1377 if (EFlags.Bits.u2IOPL != 0) … … 1352 1380 return EMSTATE_REM; 1353 1381 } 1382 # endif 1354 1383 } 1355 1384 … … 1387 1416 return EMSTATE_REM; 1388 1417 } 1418 1419 # ifdef VBOX_WITH_SAFE_STR 1420 if (pCtx->tr.Sel == 0) 1421 { 1422 Log(("Raw mode refused -> TR=0\n")); 1423 return EMSTATE_REM; 1424 } 1425 # endif 1389 1426 1390 1427 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/ -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r44528 r45276 137 137 { 138 138 DBGFR3PrgStep(pVCpu); 139 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS :");139 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS"); 140 140 rc = emR3HmStep(pVM, pVCpu); 141 141 if ( rc != VINF_SUCCESS -
trunk/src/VBox/VMM/VMMR3/EMRaw.cpp
r44399 r45276 159 159 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 160 160 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER; 161 #ifndef DEBUG_sander vl161 #ifndef DEBUG_sander 162 162 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu), 163 163 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu))); … … 196 196 else 197 197 rc = VMMR3RawRunGC(pVM, pVCpu); 198 #ifndef DEBUG_sander vl198 #ifndef DEBUG_sander 199 199 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu), 200 200 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc)); … … 237 237 { 238 238 DBGFR3PrgStep(pVCpu); 239 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS :");239 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS"); 240 240 rc = emR3RawStep(pVM, pVCpu); 241 if (rc != VINF_SUCCESS) 241 if ( rc != VINF_SUCCESS 242 && rc != VINF_EM_DBG_STEPPED) 242 243 break; 243 244 } … … 950 951 { 951 952 DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV"); 952 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr :");953 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr"); 953 954 } 954 955 #endif … … 1090 1091 { 1091 1092 DBGFR3_INFO_LOG(pVM, "cpumguest", "PRIV"); 1092 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr :");1093 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "Privileged instr"); 1093 1094 } 1094 1095 #endif … … 1361 1362 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ); 1362 1363 # endif 1364 # ifdef VBOX_WITH_RAW_RING1 1365 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0 || (EMIsRawRing1Enabled(pVM) && (pCtx->ss.Sel & X86_SEL_RPL) == 1)); 1366 # else 1363 1367 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss.Sel & X86_SEL_RPL) == 3 || (pCtx->ss.Sel & X86_SEL_RPL) == 0); 1368 # endif 1364 1369 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF) 1365 1370 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip), … … 1429 1434 Log(("RV86: %04x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags)); 1430 1435 else if ((pCtx->ss.Sel & X86_SEL_RPL) == 1) 1431 Log(("RR0: % 08x ESP=%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n",1432 pCtx-> eip, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF,1436 Log(("RR0: %x:%08x ESP=%x:%08x EFL=%x IF=%d/%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", 1437 pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, CPUMRawGetEFlags(pVCpu), !!(pGCState->uVMFlags & X86_EFL_IF), pCtx->eflags.Bits.u1IF, 1433 1438 pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss.Sel & X86_SEL_RPL), CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip))); 1439 # ifdef VBOX_WITH_RAW_RING1 1440 else if ((pCtx->ss.Sel & X86_SEL_RPL) == 2) 1441 Log(("RR1: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x CPL=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, (pCtx->ss.Sel & X86_SEL_RPL))); 1442 # endif 1434 1443 else if ((pCtx->ss.Sel & X86_SEL_RPL) == 3) 1435 Log(("RR3: % 08x ESP=%08x IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));1444 Log(("RR3: %x:%08x ESP=%x:%08x IF=%d VMFlags=%x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags)); 1436 1445 #endif /* LOG_ENABLED */ 1437 1438 1446 1439 1447 … … 1543 1551 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 1544 1552 { 1545 Assert(pCtx->eflags.Bits.u1VM || ( pCtx->ss.Sel & X86_SEL_RPL) != 1);1553 Assert(pCtx->eflags.Bits.u1VM || (EMIsRawRing1Enabled(pVM) ? ((pCtx->ss.Sel & X86_SEL_RPL) != 2) : ((pCtx->ss.Sel & X86_SEL_RPL) != 1))); 1546 1554 1547 1555 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a); -
trunk/src/VBox/VMM/VMMR3/PATM.cpp
r44399 r45276 1535 1535 break; 1536 1536 1537 #ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */ 1538 case OP_STR: 1539 break; 1540 #endif 1541 1537 1542 default: 1538 1543 if (pCpu->pCurInstr->fOpType & (DISOPTYPE_PRIVILEGED_NOTRAP)) … … 1645 1650 case OP_RETN: 1646 1651 return VINF_SUCCESS; 1652 1653 #ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table */ 1654 case OP_STR: 1655 break; 1656 #endif 1647 1657 1648 1658 case OP_POPF: … … 1806 1816 1807 1817 case OP_POP: 1818 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */ 1808 1819 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_SS) 1809 1820 { … … 1913 1924 1914 1925 case OP_PUSH: 1926 /** @todo broken comparison!! should be if ((pCpu->Param1.fUse & DISUSE_REG_SEG) && (pCpu->Param1.Base.idxSegReg == DISSELREG_SS)) */ 1915 1927 if (pCpu->pCurInstr->fParam1 == OP_PARM_REG_CS) 1916 1928 { … … 1947 1959 1948 1960 case OP_STR: 1961 #ifdef VBOX_WITH_SAFE_STR /* @todo remove DISOPTYPE_PRIVILEGED_NOTRAP from disasm table and move OP_STR into #ifndef */ 1962 /* Now safe because our shadow TR entry is identical to the guest's. */ 1963 goto duplicate_instr; 1964 #endif 1949 1965 case OP_SLDT: 1950 1966 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC); … … 4442 4458 break; 4443 4459 4460 #ifndef VBOX_WITH_SAFE_STR 4444 4461 case OP_STR: 4462 #endif 4445 4463 case OP_SGDT: 4446 4464 case OP_SLDT: … … 4453 4471 case OP_VERR: 4454 4472 case OP_IRET: 4473 #ifdef VBOX_WITH_RAW_RING1 4474 case OP_MOV: 4475 #endif 4455 4476 rc = patmR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch); 4456 4477 break; -
trunk/src/VBox/VMM/VMMR3/PATMA.asm
r44528 r45276 1262 1262 1263 1263 ; force ring 1 CS RPL 1264 or dword [esp+8], 1 1264 or dword [esp+8], 1 ;-> @todo we leave traces or raw mode if we jump back to the host context to handle pending interrupts! (below) 1265 1265 iret_notring0: 1266 1266 … … 1443 1443 DD PATM_FIXUP 1444 1444 DD PATMIretTable - PATMIretStart 1445 DD PATM_IRET_FUNCTION 1446 DD 0 1447 DD PATM_VMFLAGS 1448 DD 0 1449 DD PATM_VMFLAGS 1450 DD 0 1451 DD PATM_VMFLAGS 1452 DD 0 1453 DD PATM_TEMP_EAX 1454 DD 0 1455 DD PATM_TEMP_ECX 1456 DD 0 1457 DD PATM_TEMP_RESTORE_FLAGS 1458 DD 0 1459 DD PATM_PENDINGACTION 1460 DD 0 1461 DD 0ffffffffh 1462 SECTION .text 1463 1464 ;;**************************************************** 1465 ;; Abstract: 1466 ;; 1467 ;; if eflags.NT==0 && iretstack.eflags.VM==0 && iretstack.eflags.IOPL==0 1468 ;; then 1469 ;; if return to ring 0 (iretstack.new_cs & 3 == 0) 1470 ;; then 1471 ;; if iretstack.new_eflags.IF == 1 && iretstack.new_eflags.IOPL == 0 1472 ;; then 1473 ;; iretstack.new_cs |= 1 1474 ;; else 1475 ;; int 3 1476 ;; endif 1477 ;; uVMFlags &= ~X86_EFL_IF 1478 ;; iret 1479 ;; else 1480 ;; int 3 1481 ;;**************************************************** 1482 ;; 1483 ; Stack: 1484 ; 1485 ; esp + 32 - GS (V86 only) 1486 ; esp + 28 - FS (V86 only) 1487 ; esp + 24 - DS (V86 only) 1488 ; esp + 20 - ES (V86 only) 1489 ; esp + 16 - SS (if transfer to outer ring) 1490 ; esp + 12 - ESP (if transfer to outer ring) 1491 ; esp + 8 - EFLAGS 1492 ; esp + 4 - CS 1493 ; esp - EIP 1494 ;; 1495 BEGINPROC PATMIretRing1Replacement 1496 PATMIretRing1Start: 1497 mov dword [ss:PATM_INTERRUPTFLAG], 0 1498 pushfd 1499 1500 %ifdef PATM_LOG_PATCHIRET 1501 push eax 1502 push ecx 1503 push edx 1504 lea edx, dword [ss:esp+12+4] ;3 dwords + pushed flags -> iret eip 1505 mov eax, PATM_ACTION_LOG_IRET 1506 lock or dword [ss:PATM_PENDINGACTION], eax 1507 mov ecx, PATM_ACTION_MAGIC 1508 db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap) 1509 pop edx 1510 pop ecx 1511 pop eax 1512 %endif 1513 1514 test dword [esp], X86_EFL_NT 1515 jnz near iretring1_fault1 1516 1517 ; we can't do an iret to v86 code, as we run with CPL=1. The iret would attempt a protected mode iret and (most likely) fault. 1518 test dword [esp+12], X86_EFL_VM 1519 jnz near iretring1_return_to_v86 1520 1521 ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 1522 ;;@todo: not correct for iret back to ring 2!!!!! 1523 ;;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 1524 1525 test dword [esp+8], 2 1526 jnz iretring1_checkpendingirq 1527 1528 test dword [esp+12], X86_EFL_IF 1529 jz near iretring1_clearIF 1530 1531 iretring1_checkpendingirq: 1532 1533 ; if interrupts are pending, then we must go back to the host context to handle them! 1534 ; Note: This is very important as pending pic interrupts can be overridden by apic interrupts if we don't check early enough (Fedora 5 boot) 1535 ; @@todo fix this properly, so we can dispatch pending interrupts in GC 1536 test dword [ss:PATM_VM_FORCEDACTIONS], VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC 1537 jz iretring1_continue 1538 1539 ; Go to our hypervisor trap handler to dispatch the pending irq 1540 mov dword [ss:PATM_TEMP_EAX], eax 1541 mov dword [ss:PATM_TEMP_ECX], ecx 1542 mov dword [ss:PATM_TEMP_EDI], edi 1543 mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX | PATM_RESTORE_EDI 1544 mov eax, PATM_ACTION_PENDING_IRQ_AFTER_IRET 1545 lock or dword [ss:PATM_PENDINGACTION], eax 1546 mov ecx, PATM_ACTION_MAGIC 1547 mov edi, PATM_CURINSTRADDR 1548 1549 popfd 1550 db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap) 1551 ; does not return 1552 1553 iretring1_continue: 1554 1555 test dword [esp+8], 2 1556 jnz iretring1_notring01 1557 1558 test dword [esp+8], 1 1559 jz iretring1_ring0 1560 1561 ; ring 1 return change CS & SS RPL to 2 from 1 1562 and dword [esp+8], ~1 ; CS 1563 or dword [esp+8], 2 1564 1565 and dword [esp+20], ~1 ; SS 1566 or dword [esp+20], 2 1567 1568 jmp short iretring1_notring01 1569 iretring1_ring0: 1570 ; force ring 1 CS RPL 1571 or dword [esp+8], 1 1572 1573 iretring1_notring01: 1574 ; This section must *always* be executed (!!) 1575 ; Extract the IOPL from the return flags, save them to our virtual flags and 1576 ; put them back to zero 1577 ; @note we assume iretd doesn't fault!!! 1578 push eax 1579 mov eax, dword [esp+16] 1580 and eax, X86_EFL_IOPL 1581 and dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL 1582 or dword [ss:PATM_VMFLAGS], eax 1583 pop eax 1584 and dword [esp+12], ~X86_EFL_IOPL 1585 1586 ; Set IF again; below we make sure this won't cause problems. 1587 or dword [ss:PATM_VMFLAGS], X86_EFL_IF 1588 1589 ; make sure iret is executed fully (including the iret below; cli ... iret can otherwise be interrupted) 1590 mov dword [ss:PATM_INHIBITIRQADDR], PATM_CURINSTRADDR 1591 1592 popfd 1593 mov dword [ss:PATM_INTERRUPTFLAG], 1 1594 iretd 1595 PATM_INT3 1596 1597 iretring1_fault: 1598 popfd 1599 mov dword [ss:PATM_INTERRUPTFLAG], 1 1600 PATM_INT3 1601 1602 iretring1_fault1: 1603 nop 1604 popfd 1605 mov dword [ss:PATM_INTERRUPTFLAG], 1 1606 PATM_INT3 1607 1608 iretring1_clearIF: 1609 push dword [esp+4] ; eip to return to 1610 pushfd 1611 push eax 1612 push PATM_FIXUP 1613 DB 0E8h ; call 1614 DD PATM_IRET_FUNCTION 1615 add esp, 4 ; pushed address of jump table 1616 1617 cmp eax, 0 1618 je near iretring1_fault3 1619 1620 mov dword [esp+12+4], eax ; stored eip in iret frame 1621 pop eax 1622 popfd 1623 add esp, 4 ; pushed eip 1624 1625 ; This section must *always* be executed (!!) 1626 ; Extract the IOPL from the return flags, save them to our virtual flags and 1627 ; put them back to zero 1628 push eax 1629 mov eax, dword [esp+16] 1630 and eax, X86_EFL_IOPL 1631 and dword [ss:PATM_VMFLAGS], ~X86_EFL_IOPL 1632 or dword [ss:PATM_VMFLAGS], eax 1633 pop eax 1634 and dword [esp+12], ~X86_EFL_IOPL 1635 1636 ; Clear IF 1637 and dword [ss:PATM_VMFLAGS], ~X86_EFL_IF 1638 popfd 1639 1640 test dword [esp+8], 1 1641 jz iretring1_clearIF_ring0 1642 1643 ; ring 1 return change CS & SS RPL to 2 from 1 1644 and dword [esp+8], ~1 ; CS 1645 or dword [esp+8], 2 1646 1647 and dword [esp+20], ~1 ; SS 1648 or dword [esp+20], 2 1649 ; the patched destination code will set PATM_INTERRUPTFLAG after the return! 1650 iretd 1651 1652 iretring1_clearIF_ring0: 1653 ; force ring 1 CS RPL 1654 or dword [esp+8], 1 1655 ; the patched destination code will set PATM_INTERRUPTFLAG after the return! 1656 iretd 1657 1658 iretring1_return_to_v86: 1659 test dword [esp+12], X86_EFL_IF 1660 jz iretring1_fault 1661 1662 ; Go to our hypervisor trap handler to perform the iret to v86 code 1663 mov dword [ss:PATM_TEMP_EAX], eax 1664 mov dword [ss:PATM_TEMP_ECX], ecx 1665 mov dword [ss:PATM_TEMP_RESTORE_FLAGS], PATM_RESTORE_EAX | PATM_RESTORE_ECX 1666 mov eax, PATM_ACTION_DO_V86_IRET 1667 lock or dword [ss:PATM_PENDINGACTION], eax 1668 mov ecx, PATM_ACTION_MAGIC 1669 1670 popfd 1671 1672 db 0fh, 0bh ; illegal instr (hardcoded assumption in PATMHandleIllegalInstrTrap) 1673 ; does not return 1674 1675 1676 iretring1_fault3: 1677 pop eax 1678 popfd 1679 add esp, 4 ; pushed eip 1680 jmp iretring1_fault 1681 1682 align 4 1683 PATMIretRing1Table: 1684 DW PATM_MAX_JUMPTABLE_ENTRIES ; nrSlots 1685 DW 0 ; ulInsertPos 1686 DD 0 ; cAddresses 1687 TIMES PATCHJUMPTABLE_SIZE DB 0 ; lookup slots 1688 1689 PATMIretRing1End: 1690 ENDPROC PATMIretRing1Replacement 1691 1692 SECTION .data 1693 ; Patch record for 'iretd' 1694 GLOBALNAME PATMIretRing1Record 1695 RTCCPTR_DEF PATMIretRing1Start 1696 DD 0 1697 DD 0 1698 DD 0 1699 DD PATMIretRing1End- PATMIretRing1Start 1700 %ifdef PATM_LOG_PATCHIRET 1701 DD 26 1702 %else 1703 DD 25 1704 %endif 1705 DD PATM_INTERRUPTFLAG 1706 DD 0 1707 %ifdef PATM_LOG_PATCHIRET 1708 DD PATM_PENDINGACTION 1709 DD 0 1710 %endif 1711 DD PATM_VM_FORCEDACTIONS 1712 DD 0 1713 DD PATM_TEMP_EAX 1714 DD 0 1715 DD PATM_TEMP_ECX 1716 DD 0 1717 DD PATM_TEMP_EDI 1718 DD 0 1719 DD PATM_TEMP_RESTORE_FLAGS 1720 DD 0 1721 DD PATM_PENDINGACTION 1722 DD 0 1723 DD PATM_CURINSTRADDR 1724 DD 0 1725 DD PATM_VMFLAGS 1726 DD 0 1727 DD PATM_VMFLAGS 1728 DD 0 1729 DD PATM_VMFLAGS 1730 DD 0 1731 DD PATM_INHIBITIRQADDR 1732 DD 0 1733 DD PATM_CURINSTRADDR 1734 DD 0 1735 DD PATM_INTERRUPTFLAG 1736 DD 0 1737 DD PATM_INTERRUPTFLAG 1738 DD 0 1739 DD PATM_INTERRUPTFLAG 1740 DD 0 1741 DD PATM_FIXUP 1742 DD PATMIretRing1Table - PATMIretRing1Start 1445 1743 DD PATM_IRET_FUNCTION 1446 1744 DD 0 -
trunk/src/VBox/VMM/VMMR3/PATMPatch.cpp
r44528 r45276 27 27 #include <VBox/vmm/cpum.h> 28 28 #include <VBox/vmm/mm.h> 29 #include <VBox/vmm/em.h> 29 30 #include <VBox/vmm/trpm.h> 30 31 #include <VBox/param.h> … … 436 437 437 438 AssertMsg(fSizeOverride == false, ("operand size override!!\n")); 438 439 439 callInfo.pCurInstrGC = pCurInstrGC; 440 440 441 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo); 441 #ifdef VBOX_WITH_RAW_RING1 442 if (EMIsRawRing1Enabled(pVM)) 443 { 444 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRing1Record, 0, false, &callInfo); 445 } 446 else 447 #endif 448 size = patmPatchGenCode(pVM, pPatch, pPB, &PATMIretRecord, 0, false, &callInfo); 442 449 443 450 PATCHGEN_EPILOG(pPatch, size); … … 1074 1081 int patmPatchGenIntEntry(PVM pVM, PPATCHINFO pPatch, RTRCPTR pIntHandlerGC) 1075 1082 { 1076 uint32_t size;1077 1083 int rc = VINF_SUCCESS; 1078 1084 1079 PATCHGEN_PROLOG(pVM, pPatch); 1080 1081 /* Add lookup record for patch to guest address translation */ 1082 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST); 1083 1084 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */ 1085 size = patmPatchGenCode(pVM, pPatch, pPB, 1086 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord, 1087 0, false); 1088 1089 PATCHGEN_EPILOG(pPatch, size); 1085 #ifdef VBOX_WITH_RAW_RING1 1086 if (!EMIsRawRing1Enabled(pVM)) /* direct passthru of interrupts is not allowed in the ring-1 support case as we can't deal with the ring-1/2 ambiguity in the patm asm code and we don't need it either as TRPMForwardTrap takes care of the details. */ 1087 { 1088 #endif 1089 uint32_t size; 1090 PATCHGEN_PROLOG(pVM, pPatch); 1091 1092 /* Add lookup record for patch to guest address translation */ 1093 patmR3AddP2GLookupRecord(pVM, pPatch, pPB, pIntHandlerGC, PATM_LOOKUP_PATCH2GUEST); 1094 1095 /* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */ 1096 size = patmPatchGenCode(pVM, pPatch, pPB, 1097 (pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord, 1098 0, false); 1099 1100 PATCHGEN_EPILOG(pPatch, size); 1101 #ifdef VBOX_WITH_RAW_RING1 1102 } 1103 #endif 1090 1104 1091 1105 // Interrupt gates set IF to 0 … … 1107 1121 { 1108 1122 uint32_t size; 1123 1124 Assert(!EMIsRawRing1Enabled(pVM)); 1109 1125 1110 1126 PATCHGEN_PROLOG(pVM, pPatch); -
trunk/src/VBox/VMM/VMMR3/PATMSSM.cpp
r44528 r45276 810 810 patmR3PatchConvertSSM2Mem(pPatchRec, &patch); 811 811 812 Log(("Restoring patch %RRv -> %RRv \n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset));812 Log(("Restoring patch %RRv -> %RRv state %x\n", pPatchRec->patch.pPrivInstrGC, patmInfo.pPatchMemGC + pPatchRec->patch.pPatchBlockOffset, pPatchRec->patch.uState)); 813 813 bool ret = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core); 814 814 Assert(ret); -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r44528 r45276 494 494 { 495 495 pgmUnlock(pVM); 496 #ifndef DEBUG_sander 496 497 AssertMsgFailed(("Range %#x not found!\n", GCPtr)); 498 #endif 497 499 return VERR_INVALID_PARAMETER; 498 500 } -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r44528 r45276 80 80 #include <iprt/string.h> 81 81 82 83 /**84 * Enable or disable tracking of Shadow GDT/LDT/TSS.85 * @{86 */87 #define SELM_TRACK_SHADOW_GDT_CHANGES88 #define SELM_TRACK_SHADOW_LDT_CHANGES89 #define SELM_TRACK_SHADOW_TSS_CHANGES90 /** @} */91 82 92 83 … … 565 556 * Uninstall guest GDT/LDT/TSS write access handlers. 566 557 */ 567 int rc ;558 int rc = VINF_SUCCESS; 568 559 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered) 569 560 { 561 #ifdef SELM_TRACK_GUEST_GDT_CHANGES 570 562 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt); 571 563 AssertRC(rc); 564 #endif 572 565 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX; 573 566 pVM->selm.s.GuestGdtr.cbGdt = 0; … … 576 569 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 577 570 { 571 #ifdef SELM_TRACK_GUEST_LDT_CHANGES 578 572 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 579 573 AssertRC(rc); 574 #endif 580 575 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 581 576 } 582 577 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX) 583 578 { 579 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 584 580 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss); 585 581 AssertRC(rc); 582 #endif 586 583 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 587 584 pVM->selm.s.GCSelTss = RTSEL_MAX; … … 619 616 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered) 620 617 { 618 #ifdef SELM_TRACK_GUEST_GDT_CHANGES 621 619 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt); 622 620 AssertRC(rc); 621 #endif 623 622 pVM->selm.s.GuestGdtr.pGdt = RTRCPTR_MAX; 624 623 pVM->selm.s.GuestGdtr.cbGdt = 0; … … 627 626 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 628 627 { 628 #ifdef SELM_TRACK_GUEST_LDT_CHANGES 629 629 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 630 630 AssertRC(rc); 631 #endif 631 632 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 632 633 } 633 634 if (pVM->selm.s.GCPtrGuestTss != RTRCPTR_MAX) 634 635 { 636 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 635 637 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestTss); 636 638 AssertRC(rc); 639 #endif 637 640 pVM->selm.s.GCPtrGuestTss = RTRCPTR_MAX; 638 641 pVM->selm.s.GCSelTss = RTSEL_MAX; … … 953 956 } 954 957 958 #ifdef VBOX_WITH_SAFE_STR 959 /** Use the guest's TR selector to plug the str virtualization hole. */ 960 if (CPUMGetGuestTR(pVCpu, NULL) != 0) 961 { 962 Log(("SELM: Use guest TSS selector %x\n", CPUMGetGuestTR(pVCpu, NULL))); 963 aHyperSel[SELM_HYPER_SEL_TSS] = CPUMGetGuestTR(pVCpu, NULL); 964 } 965 #endif 966 955 967 /* 956 968 * Work thru the copied GDT entries adjusting them for correct virtualization. … … 960 972 { 961 973 if (pGDTE->Gen.u1Present) 962 selmGuestToShadowDesc(p GDTE);974 selmGuestToShadowDesc(pVM, pGDTE); 963 975 964 976 /* Next GDT entry. */ … … 990 1002 VMR3Relocate(pVM, 0); 991 1003 } 992 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 1004 else 1005 #ifdef VBOX_WITH_SAFE_STR 1006 if ( cbEffLimit >= SELM_HYPER_DEFAULT_BASE 1007 || CPUMGetGuestTR(pVCpu, NULL) != 0) /* Our shadow TR entry was overwritten when we synced the guest's GDT. */ 1008 #else 1009 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 1010 #endif 993 1011 /* We overwrote all entries above, so we have to save them again. */ 994 1012 selmR3SetupHyperGDTSelectors(pVM); … … 1011 1029 { 1012 1030 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt)); 1013 1031 #ifdef SELM_TRACK_GUEST_GDT_CHANGES 1014 1032 /* 1015 1033 * [Re]Register write virtual handler for guest's GDT. … … 1025 1043 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, 1026 1044 "Guest GDT write access handler"); 1045 # ifdef VBOX_WITH_RAW_RING1 1046 /* Some guest OSes (QNX) share code and the GDT on the same page; PGMR3HandlerVirtualRegister doesn't support more than one handler, so we kick out the 1047 * PATM handler as this one is more important. 1048 * @todo fix this properly in PGMR3HandlerVirtualRegister 1049 */ 1050 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT) 1051 { 1052 LogRel(("selmR3UpdateShadowGdt: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority GDT page monitor\n", GDTR.pGdt)); 1053 rc = PGMHandlerVirtualDeregister(pVM, GDTR.pGdt & PAGE_BASE_GC_MASK); 1054 AssertRC(rc); 1055 1056 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, 1057 GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */, 1058 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, 1059 "Guest GDT write access handler"); 1060 } 1061 # endif 1027 1062 if (RT_FAILURE(rc)) 1028 1063 return rc; 1029 1064 #endif 1030 1065 /* Update saved Guest GDTR. */ 1031 1066 pVM->selm.s.GuestGdtr = GDTR; … … 1137 1172 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1138 1173 1174 #ifdef SELM_TRACK_GUEST_LDT_CHANGES 1139 1175 /* 1140 1176 * [Re]Register write virtual handler for guest's GDT. … … 1146 1182 AssertRC(rc); 1147 1183 } 1148 # ifdef DEBUG1184 # ifdef DEBUG 1149 1185 if (pDesc->Gen.u1Present) 1150 1186 Log(("LDT selector marked not present!!\n")); 1151 # endif1187 # endif 1152 1188 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */, 1153 1189 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler"); … … 1166 1202 return rc; 1167 1203 } 1168 1204 #else 1205 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt; 1206 #endif 1169 1207 pVM->selm.s.cbLdtLimit = cbLdt; 1170 1208 } … … 1205 1243 /** @todo investigate how intel handle various operations on half present cross page entries. */ 1206 1244 off = GCPtrLdt & (sizeof(X86DESC) - 1); 1207 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt));1245 //// AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt)); 1208 1246 1209 1247 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */ … … 1239 1277 { 1240 1278 if (pLDTE->Gen.u1Present) 1241 selmGuestToShadowDesc(p LDTE);1279 selmGuestToShadowDesc(pVM, pLDTE); 1242 1280 1243 1281 /* Next LDT entry. */ … … 1438 1476 } 1439 1477 1440 1478 #ifdef SELM_TRACK_GUEST_GDT_CHANGES 1441 1479 /** 1442 1480 * \#PF Handler callback for virtual access handler ranges. … … 1465 1503 return VINF_PGM_HANDLER_DO_DEFAULT; 1466 1504 } 1467 1468 1505 #endif 1506 1507 #ifdef SELM_TRACK_GUEST_LDT_CHANGES 1469 1508 /** 1470 1509 * \#PF Handler callback for virtual access handler ranges. … … 1493 1532 return VINF_PGM_HANDLER_DO_DEFAULT; 1494 1533 } 1495 1496 1534 #endif 1535 1536 1537 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 1497 1538 /** 1498 1539 * \#PF Handler callback for virtual access handler ranges. … … 1526 1567 return VINF_PGM_HANDLER_DO_DEFAULT; 1527 1568 } 1528 1569 #endif 1529 1570 1530 1571 /** … … 1675 1716 selmSetRing1Stack(pVM, Tss.ss0 | 1, Tss.esp0); 1676 1717 pVM->selm.s.fSyncTSSRing0Stack = fNoRing1Stack = false; 1718 1719 #ifdef VBOX_WITH_RAW_RING1 1720 /* Update our TSS structure for the guest's ring 2 stack */ 1721 selmSetRing2Stack(pVM, (Tss.ss1 & ~1) | 2, Tss.esp1); 1722 1723 if ( (pVM->selm.s.Tss.ss2 != ((Tss.ss1 & ~2) | 1)) 1724 || pVM->selm.s.Tss.esp2 != Tss.esp1) 1725 { 1726 Log(("SELMR3SyncTSS: Updating TSS ring 1 stack to %04X:%08X from %04X:%08X\n", Tss.ss1, Tss.esp1, (pVM->selm.s.Tss.ss2 & ~2) | 1, pVM->selm.s.Tss.esp2)); 1727 } 1728 #endif 1677 1729 } 1678 1730 } … … 1711 1763 if (cbMonitoredTss != 0) 1712 1764 { 1765 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 1713 1766 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1, 1714 1767 0, selmR3GuestTSSWriteHandler, … … 1716 1769 if (RT_FAILURE(rc)) 1717 1770 { 1771 # ifdef VBOX_WITH_RAW_RING1 1772 /* Some guest OSes (QNX) share code and the TSS on the same page; PGMR3HandlerVirtualRegister doesn't support more than one handler, so we kick out the 1773 * PATM handler as this one is more important. 1774 * @todo fix this properly in PGMR3HandlerVirtualRegister 1775 */ 1776 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT) 1777 { 1778 LogRel(("SELMR3SyncTSS: Virtual handler conflict %RGv -> kick out PATM handler for the higher priority TSS page monitor\n", GCPtrTss)); 1779 rc = PGMHandlerVirtualDeregister(pVM, GCPtrTss & PAGE_BASE_GC_MASK); 1780 AssertRC(rc); 1781 1782 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrTss, GCPtrTss + cbMonitoredTss - 1, 1783 0, selmR3GuestTSSWriteHandler, 1784 "selmRCGuestTSSWriteHandler", 0, "Guest TSS write access handler"); 1785 if (RT_FAILURE(rc)) 1786 { 1787 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1788 return rc; 1789 } 1790 } 1791 # else 1718 1792 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1719 1793 return rc; 1720 } 1721 1794 # endif 1795 } 1796 #endif 1722 1797 /* Update saved Guest TSS info. */ 1723 1798 pVM->selm.s.GCPtrGuestTss = GCPtrTss; … … 1888 1963 VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM) 1889 1964 { 1890 #if def VBOX_STRICT1965 #if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES) 1891 1966 PVMCPU pVCpu = VMMGetCpu(pVM); 1892 1967 … … 2019 2094 #endif /* !VBOX_STRICT */ 2020 2095 } 2096 2097 # ifdef VBOX_WITH_SAFE_STR 2098 /** 2099 * Validates the RawR0 TR shadow GDT entry 2100 * 2101 * @returns true if it matches. 2102 * @returns false and assertions on mismatch.. 2103 * @param pVM Pointer to the VM. 2104 */ 2105 VMMR3DECL(bool) SELMR3CheckShadowTR(PVM pVM) 2106 { 2107 # ifdef VBOX_STRICT 2108 PX86DESC paGdt = pVM->selm.s.paGdtR3; 2109 2110 /* 2111 * TSS descriptor 2112 */ 2113 PX86DESC pDesc = &paGdt[pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] >> 3]; 2114 RTRCPTR RCPtrTSS = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); 2115 2116 if ( pDesc->Gen.u16BaseLow != RT_LOWORD(RCPtrTSS) 2117 || pDesc->Gen.u8BaseHigh1 != RT_BYTE3(RCPtrTSS) 2118 || pDesc->Gen.u8BaseHigh2 != RT_BYTE4(RCPtrTSS) 2119 || pDesc->Gen.u16LimitLow != sizeof(VBOXTSS) - 1 2120 || pDesc->Gen.u4LimitHigh != 0 2121 || (pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL && pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2122 || pDesc->Gen.u1DescType != 0 /* system */ 2123 || pDesc->Gen.u2Dpl != 0 /* supervisor */ 2124 || pDesc->Gen.u1Present != 1 2125 || pDesc->Gen.u1Available != 0 2126 || pDesc->Gen.u1Long != 0 2127 || pDesc->Gen.u1DefBig != 0 2128 || pDesc->Gen.u1Granularity != 0 /* byte limit */ 2129 ) 2130 { 2131 AssertFailed(); 2132 return false; 2133 } 2134 # endif 2135 return true; 2136 } 2137 # endif 2021 2138 2022 2139 #endif /* VBOX_WITH_RAW_MODE */ -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r44528 r45276 1330 1330 } 1331 1331 1332 if (EMIsRawRing0Enabled(pVM)) 1332 if ( EMIsRawRing0Enabled(pVM) 1333 #ifdef VBOX_WITH_RAW_RING1 1334 && !EMIsRawRing1Enabled(pVM) /* can't deal with the ambiguity of ring 1 & 2 in the patch code. */ 1335 #endif 1336 ) 1333 1337 { 1334 1338 /* -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r45152 r45276 1257 1257 EMR3FatalError(pVCpu, VERR_VMM_HYPER_CR3_MISMATCH); 1258 1258 PGMMapCheck(pVM); 1259 # ifdef VBOX_WITH_SAFE_STR 1260 SELMR3CheckShadowTR(pVM); 1261 # endif 1259 1262 #endif 1260 1263 int rc;
Note:
See TracChangeset
for help on using the changeset viewer.