- Timestamp:
- Jul 3, 2009 12:57:21 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 added
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/EM.cpp
r21191 r21196 68 68 #include <VBox/dbgf.h> 69 69 70 #include <VBox/log.h>71 #include <iprt/thread.h>72 #include <iprt/assert.h>73 #include <iprt/asm.h>74 #include <iprt/semaphore.h>75 70 #include <iprt/string.h> 76 #include <iprt/avl.h>77 71 #include <iprt/stream.h> 78 #include <VBox/param.h>79 #include <VBox/err.h>80 72 81 73 … … 96 88 static int emR3RemStep(PVM pVM, PVMCPU pVCpu); 97 89 static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 98 static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu);99 static int emR3RawStep(PVM pVM, PVMCPU pVCpu);100 DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc);101 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);102 static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);103 90 DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS); 104 static int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 105 static int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 106 static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu); 107 static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret); 91 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 108 92 static int emR3SingleStepExecRem(PVM pVM, uint32_t cIterations); 109 static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu); 110 static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu); 111 static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu); 112 static EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 113 114 #define EMHANDLERC_WITH_PATM 115 #define EMHANDLERC_NAME emR3RawHandleRC 116 #include "EMHandleRCTmpl.h" 117 118 #define EMHANDLERC_NAME emR3HwaccmHandleRC 119 #include "EMHandleRCTmpl.h" 93 120 94 121 95 /** … … 585 559 586 560 /** 587 * Enables or disables a set of raw-mode execution modes.588 *589 * @returns VINF_SUCCESS on success.590 * @returns VINF_RESCHEDULE if a rescheduling might be required.591 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.592 *593 * @param pVM The VM to operate on.594 * @param enmMode The execution mode change.595 * @thread The emulation thread.596 */597 VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)598 {599 switch (enmMode)600 {601 case EMRAW_NONE:602 pVM->fRawR3Enabled = false;603 pVM->fRawR0Enabled = false;604 break;605 case EMRAW_RING3_ENABLE:606 pVM->fRawR3Enabled = true;607 break;608 case EMRAW_RING3_DISABLE:609 pVM->fRawR3Enabled = false;610 break;611 case EMRAW_RING0_ENABLE:612 pVM->fRawR0Enabled = true;613 break;614 case EMRAW_RING0_DISABLE:615 pVM->fRawR0Enabled = false;616 break;617 default:618 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));619 return VERR_INVALID_PARAMETER;620 }621 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",622 pVM->fRawR3Enabled, pVM->fRawR0Enabled));623 return pVM->aCpus[0].em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;624 }625 626 627 /**628 561 * Raise a fatal error. 629 562 * … … 1072 1005 1073 1006 1074 /**1075 * Resumes executing hypervisor after a debug event.1076 *1077 * This is kind of special since our current guest state is1078 * potentially out of sync.1079 *1080 * @returns VBox status code.1081 * @param pVM The VM handle.1082 * @param pVCpu The VMCPU handle.1083 */1084 static int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)1085 {1086 int rc;1087 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1088 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER);1089 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));1090 1091 /*1092 * Resume execution.1093 */1094 CPUMRawEnter(pVCpu, NULL);1095 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);1096 rc = VMMR3ResumeHyper(pVM, pVCpu);1097 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));1098 rc = CPUMRawLeave(pVCpu, NULL, rc);1099 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);1100 1101 /*1102 * Deal with the return code.1103 */1104 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);1105 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);1106 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);1107 return rc;1108 }1109 1110 1111 /**1112 * Steps rawmode.1113 *1114 * @returns VBox status code.1115 * @param pVM The VM handle.1116 * @param pVCpu The VMCPU handle.1117 */1118 static int emR3RawStep(PVM pVM, PVMCPU pVCpu)1119 {1120 Assert( pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER1121 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW1122 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);1123 int rc;1124 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1125 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;1126 #ifndef DEBUG_sandervl1127 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),1128 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));1129 #endif1130 if (fGuest)1131 {1132 /*1133 * Check vital forced actions, but ignore pending interrupts and timers.1134 */1135 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)1136 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))1137 {1138 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);1139 if (rc != VINF_SUCCESS)1140 return rc;1141 }1142 1143 /*1144 * Set flags for single stepping.1145 */1146 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);1147 }1148 else1149 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);1150 1151 /*1152 * Single step.1153 * We do not start time or anything, if anything we should just do a few nanoseconds.1154 */1155 CPUMRawEnter(pVCpu, NULL);1156 do1157 {1158 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)1159 rc = VMMR3ResumeHyper(pVM, pVCpu);1160 else1161 rc = VMMR3RawRunGC(pVM, pVCpu);1162 #ifndef DEBUG_sandervl1163 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),1164 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));1165 #endif1166 } while ( rc == VINF_SUCCESS1167 || rc == VINF_EM_RAW_INTERRUPT);1168 rc = CPUMRawLeave(pVCpu, NULL, rc);1169 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);1170 1171 /*1172 * Make sure the trap flag is cleared.1173 * (Too bad if the guest is trying to single step too.)1174 */1175 if (fGuest)1176 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);1177 else1178 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) & ~X86_EFL_TF);1179 1180 /*1181 * Deal with the return codes.1182 */1183 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);1184 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);1185 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);1186 return rc;1187 }1188 1189 1190 1007 #ifdef DEBUG 1191 1192 /**1193 * Steps hardware accelerated mode.1194 *1195 * @returns VBox status code.1196 * @param pVM The VM handle.1197 * @param pVCpu The VMCPU handle.1198 */1199 static int emR3HwAccStep(PVM pVM, PVMCPU pVCpu)1200 {1201 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);1202 1203 int rc;1204 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1205 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));1206 1207 /*1208 * Check vital forced actions, but ignore pending interrupts and timers.1209 */1210 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)1211 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))1212 {1213 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);1214 if (rc != VINF_SUCCESS)1215 return rc;1216 }1217 /*1218 * Set flags for single stepping.1219 */1220 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);1221 1222 /*1223 * Single step.1224 * We do not start time or anything, if anything we should just do a few nanoseconds.1225 */1226 do1227 {1228 rc = VMMR3HwAccRunGC(pVM, pVCpu);1229 } while ( rc == VINF_SUCCESS1230 || rc == VINF_EM_RAW_INTERRUPT);1231 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);1232 1233 /*1234 * Make sure the trap flag is cleared.1235 * (Too bad if the guest is trying to single step too.)1236 */1237 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);1238 1239 /*1240 * Deal with the return codes.1241 */1242 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);1243 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);1244 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);1245 return rc;1246 }1247 1248 1249 int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)1250 {1251 int rc = VINF_SUCCESS;1252 EMSTATE enmOldState = pVCpu->em.s.enmState;1253 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;1254 1255 Log(("Single step BEGIN:\n"));1256 for (uint32_t i = 0; i < cIterations; i++)1257 {1258 DBGFR3PrgStep(pVCpu);1259 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");1260 rc = emR3RawStep(pVM, pVCpu);1261 if (rc != VINF_SUCCESS)1262 break;1263 }1264 Log(("Single step END: rc=%Rrc\n", rc));1265 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);1266 pVCpu->em.s.enmState = enmOldState;1267 return rc;1268 }1269 1270 1271 static int emR3SingleStepExecHwAcc(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)1272 {1273 int rc = VINF_SUCCESS;1274 EMSTATE enmOldState = pVCpu->em.s.enmState;1275 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;1276 1277 Log(("Single step BEGIN:\n"));1278 for (uint32_t i = 0; i < cIterations; i++)1279 {1280 DBGFR3PrgStep(pVCpu);1281 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");1282 rc = emR3HwAccStep(pVM, pVCpu);1283 if ( rc != VINF_SUCCESS1284 || !HWACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))1285 break;1286 }1287 Log(("Single step END: rc=%Rrc\n", rc));1288 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);1289 pVCpu->em.s.enmState = enmOldState;1290 return rc == VINF_SUCCESS ? VINF_EM_RESCHEDULE_REM : rc;1291 }1292 1293 1008 1294 1009 static int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations) … … 1317 1032 1318 1033 /** 1319 * Executes one (or perhaps a few more) instruction(s).1320 *1321 * @returns VBox status code suitable for EM.1322 *1323 * @param pVM VM handle.1324 * @param pVCpu VMCPU handle1325 * @param rcGC GC return code1326 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the1327 * instruction and prefix the log output with this text.1328 */1329 #ifdef LOG_ENABLED1330 static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)1331 #else1332 static int emR3RawExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)1333 #endif1334 {1335 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1336 int rc;1337 1338 /*1339 *1340 * The simple solution is to use the recompiler.1341 * The better solution is to disassemble the current instruction and1342 * try handle as many as possible without using REM.1343 *1344 */1345 1346 #ifdef LOG_ENABLED1347 /*1348 * Disassemble the instruction if requested.1349 */1350 if (pszPrefix)1351 {1352 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);1353 DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);1354 }1355 #endif /* LOG_ENABLED */1356 1357 /*1358 * PATM is making life more interesting.1359 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll1360 * tell PATM there is a trap in this code and have it take the appropriate actions1361 * to allow us execute the code in REM.1362 */1363 if (PATMIsPatchGCAddr(pVM, pCtx->eip))1364 {1365 Log(("emR3RawExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));1366 1367 RTGCPTR pNewEip;1368 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);1369 switch (rc)1370 {1371 /*1372 * It's not very useful to emulate a single instruction and then go back to raw1373 * mode; just execute the whole block until IF is set again.1374 */1375 case VINF_SUCCESS:1376 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",1377 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));1378 pCtx->eip = pNewEip;1379 Assert(pCtx->eip);1380 1381 if (pCtx->eflags.Bits.u1IF)1382 {1383 /*1384 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)1385 */1386 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));1387 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");1388 }1389 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)1390 {1391 /* special case: iret, that sets IF, detected a pending irq/event */1392 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIRET");1393 }1394 return VINF_EM_RESCHEDULE_REM;1395 1396 /*1397 * One instruction.1398 */1399 case VINF_PATCH_EMULATE_INSTR:1400 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",1401 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));1402 pCtx->eip = pNewEip;1403 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");1404 1405 /*1406 * The patch was disabled, hand it to the REM.1407 */1408 case VERR_PATCH_DISABLED:1409 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",1410 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));1411 pCtx->eip = pNewEip;1412 if (pCtx->eflags.Bits.u1IF)1413 {1414 /*1415 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)1416 */1417 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));1418 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");1419 }1420 return VINF_EM_RESCHEDULE_REM;1421 1422 /* Force continued patch exection; usually due to write monitored stack. */1423 case VINF_PATCH_CONTINUE:1424 return VINF_SUCCESS;1425 1426 default:1427 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));1428 return VERR_IPE_UNEXPECTED_STATUS;1429 }1430 }1431 1432 #if 01433 /* Try our own instruction emulator before falling back to the recompiler. */1434 DISCPUSTATE Cpu;1435 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");1436 if (RT_SUCCESS(rc))1437 {1438 uint32_t size;1439 1440 switch (Cpu.pCurInstr->opcode)1441 {1442 /* @todo we can do more now */1443 case OP_MOV:1444 case OP_AND:1445 case OP_OR:1446 case OP_XOR:1447 case OP_POP:1448 case OP_INC:1449 case OP_DEC:1450 case OP_XCHG:1451 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);1452 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);1453 if (RT_SUCCESS(rc))1454 {1455 pCtx->rip += Cpu.opsize;1456 #ifdef EM_NOTIFY_HWACCM1457 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)1458 HWACCMR3NotifyEmulated(pVCpu);1459 #endif1460 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);1461 return rc;1462 }1463 if (rc != VERR_EM_INTERPRETER)1464 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);1465 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);1466 break;1467 }1468 }1469 #endif /* 0 */1470 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);1471 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));1472 EMRemLock(pVM);1473 /* Flush the recompiler TLB if the VCPU has changed. */1474 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)1475 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);1476 pVM->em.s.idLastRemCpu = pVCpu->idCpu;1477 1478 rc = REMR3EmulateInstruction(pVM, pVCpu);1479 EMRemUnlock(pVM);1480 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);1481 1482 #ifdef EM_NOTIFY_HWACCM1483 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)1484 HWACCMR3NotifyEmulated(pVCpu);1485 #endif1486 return rc;1487 }1488 1489 1490 /**1491 * Executes one (or perhaps a few more) instruction(s).1492 * This is just a wrapper for discarding pszPrefix in non-logging builds.1493 *1494 * @returns VBox status code suitable for EM.1495 * @param pVM VM handle.1496 * @param pVCpu VMCPU handle.1497 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the1498 * instruction and prefix the log output with this text.1499 * @param rcGC GC return code1500 */1501 DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)1502 {1503 #ifdef LOG_ENABLED1504 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);1505 #else1506 return emR3RawExecuteInstructionWorker(pVM, pVCpu, rcGC);1507 #endif1508 }1509 1510 /**1511 * Executes one (or perhaps a few more) IO instruction(s).1512 *1513 * @returns VBox status code suitable for EM.1514 * @param pVM VM handle.1515 * @param pVCpu VMCPU handle.1516 */1517 static int emR3RawExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)1518 {1519 int rc;1520 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1521 1522 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);1523 1524 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC1525 * as io instructions tend to come in packages of more than one1526 */1527 DISCPUSTATE Cpu;1528 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");1529 if (RT_SUCCESS(rc))1530 {1531 rc = VINF_EM_RAW_EMULATE_INSTR;1532 1533 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))1534 {1535 switch (Cpu.pCurInstr->opcode)1536 {1537 case OP_IN:1538 {1539 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);1540 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);1541 break;1542 }1543 1544 case OP_OUT:1545 {1546 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);1547 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);1548 break;1549 }1550 }1551 }1552 else if (Cpu.prefix & PREFIX_REP)1553 {1554 switch (Cpu.pCurInstr->opcode)1555 {1556 case OP_INSB:1557 case OP_INSWD:1558 {1559 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);1560 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);1561 break;1562 }1563 1564 case OP_OUTSB:1565 case OP_OUTSWD:1566 {1567 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);1568 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);1569 break;1570 }1571 }1572 }1573 1574 /*1575 * Handled the I/O return codes.1576 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)1577 */1578 if (IOM_SUCCESS(rc))1579 {1580 pCtx->rip += Cpu.opsize;1581 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);1582 return rc;1583 }1584 1585 if (rc == VINF_EM_RAW_GUEST_TRAP)1586 {1587 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);1588 rc = emR3RawGuestTrap(pVM, pVCpu);1589 return rc;1590 }1591 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));1592 1593 if (RT_FAILURE(rc))1594 {1595 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);1596 return rc;1597 }1598 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Rrc\n", rc));1599 }1600 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);1601 return emR3RawExecuteInstruction(pVM, pVCpu, "IO: ");1602 }1603 1604 1605 /**1606 * Handle a guest context trap.1607 *1608 * @returns VBox status code suitable for EM.1609 * @param pVM VM handle.1610 * @param pVCpu VMCPU handle.1611 */1612 static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)1613 {1614 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1615 1616 /*1617 * Get the trap info.1618 */1619 uint8_t u8TrapNo;1620 TRPMEVENT enmType;1621 RTGCUINT uErrorCode;1622 RTGCUINTPTR uCR2;1623 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);1624 if (RT_FAILURE(rc))1625 {1626 AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));1627 return rc;1628 }1629 1630 /*1631 * Traps can be directly forwarded in hardware accelerated mode.1632 */1633 if (HWACCMIsEnabled(pVM))1634 {1635 #ifdef LOGGING_ENABLED1636 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");1637 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");1638 #endif1639 return VINF_EM_RESCHEDULE_HWACC;1640 }1641 1642 #if 1 /* Experimental: Review, disable if it causes trouble. */1643 /*1644 * Handle traps in patch code first.1645 *1646 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)1647 * but several traps isn't handled specially by TRPM in RC and we end up here1648 * instead. One example is #DE.1649 */1650 uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));1651 if ( uCpl == 01652 && PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))1653 {1654 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));1655 return emR3PatchTrap(pVM, pVCpu, pCtx, rc);1656 }1657 #endif1658 1659 /*1660 * If the guest gate is marked unpatched, then we will check again if we can patch it.1661 * (This assumes that we've already tried and failed to dispatch the trap in1662 * RC for the gates that already has been patched. Which is true for most high1663 * volume traps, because these are handled specially, but not for odd ones like #DE.)1664 */1665 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)1666 {1667 CSAMR3CheckGates(pVM, u8TrapNo, 1);1668 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));1669 1670 /* If it was successful, then we could go back to raw mode. */1671 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)1672 {1673 /* Must check pending forced actions as our IDT or GDT might be out of sync. */1674 rc = EMR3CheckRawForcedActions(pVM, pVCpu);1675 AssertRCReturn(rc, rc);1676 1677 TRPMERRORCODE enmError = uErrorCode != ~0U1678 ? TRPM_TRAP_HAS_ERRORCODE1679 : TRPM_TRAP_NO_ERRORCODE;1680 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);1681 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)1682 {1683 TRPMResetTrap(pVCpu);1684 return VINF_EM_RESCHEDULE_RAW;1685 }1686 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));1687 }1688 }1689 1690 /*1691 * Scan kernel code that traps; we might not get another chance.1692 */1693 /** @todo move this up before the dispatching? */1694 if ( (pCtx->ss & X86_SEL_RPL) <= 11695 && !pCtx->eflags.Bits.u1VM)1696 {1697 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));1698 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);1699 }1700 1701 /*1702 * Trap specific handling.1703 */1704 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */1705 {1706 /*1707 * If MONITOR & MWAIT are supported, then interpret them here.1708 */1709 DISCPUSTATE cpu;1710 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");1711 if ( RT_SUCCESS(rc)1712 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))1713 {1714 uint32_t u32Dummy, u32Features, u32ExtFeatures;1715 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);1716 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)1717 {1718 rc = TRPMResetTrap(pVCpu);1719 AssertRC(rc);1720 1721 uint32_t opsize;1722 rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);1723 if (RT_SUCCESS(rc))1724 {1725 pCtx->rip += cpu.opsize;1726 #ifdef EM_NOTIFY_HWACCM1727 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)1728 HWACCMR3NotifyEmulated(pVCpu);1729 #endif1730 return rc;1731 }1732 return emR3RawExecuteInstruction(pVM, pVCpu, "Monitor: ");1733 }1734 }1735 }1736 else if (u8TrapNo == 13) /* (#GP) Privileged exception */1737 {1738 /*1739 * Handle I/O bitmap?1740 */1741 /** @todo We're not supposed to be here with a false guest trap concerning1742 * I/O access. We can easily handle those in RC. */1743 DISCPUSTATE cpu;1744 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");1745 if ( RT_SUCCESS(rc)1746 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))1747 {1748 /*1749 * We should really check the TSS for the IO bitmap, but it's not like this1750 * lazy approach really makes things worse.1751 */1752 rc = TRPMResetTrap(pVCpu);1753 AssertRC(rc);1754 return emR3RawExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");1755 }1756 }1757 1758 #ifdef LOG_ENABLED1759 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");1760 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");1761 1762 /* Get guest page information. */1763 uint64_t fFlags = 0;1764 RTGCPHYS GCPhys = 0;1765 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);1766 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",1767 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,1768 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",1769 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));1770 #endif1771 1772 /*1773 * #PG has CR2.1774 * (Because of stuff like above we must set CR2 in a delayed fashion.)1775 */1776 if (u8TrapNo == 14 /* #PG */)1777 pCtx->cr2 = uCR2;1778 1779 return VINF_EM_RESCHEDULE_REM;1780 }1781 1782 1783 /**1784 * Handle a ring switch trap.1785 * Need to do statistics and to install patches. The result is going to REM.1786 *1787 * @returns VBox status code suitable for EM.1788 * @param pVM VM handle.1789 * @param pVCpu VMCPU handle.1790 */1791 static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)1792 {1793 int rc;1794 DISCPUSTATE Cpu;1795 PCPUMCTX pCtx = pVCpu->em.s.pCtx;1796 1797 /*1798 * sysenter, syscall & callgate1799 */1800 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");1801 if (RT_SUCCESS(rc))1802 {1803 if (Cpu.pCurInstr->opcode == OP_SYSENTER)1804 {1805 if (pCtx->SysEnter.cs != 0)1806 {1807 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),1808 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);1809 if (RT_SUCCESS(rc))1810 {1811 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");1812 return VINF_EM_RESCHEDULE_RAW;1813 }1814 }1815 }1816 1817 #ifdef VBOX_WITH_STATISTICS1818 switch (Cpu.pCurInstr->opcode)1819 {1820 case OP_SYSENTER:1821 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);1822 break;1823 case OP_SYSEXIT:1824 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);1825 break;1826 case OP_SYSCALL:1827 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);1828 break;1829 case OP_SYSRET:1830 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);1831 break;1832 }1833 #endif1834 }1835 else1836 AssertRC(rc);1837 1838 /* go to the REM to emulate a single instruction */1839 return emR3RawExecuteInstruction(pVM, pVCpu, "RSWITCH: ");1840 }1841 1842 1843 /**1844 * Handle a trap (\#PF or \#GP) in patch code1845 *1846 * @returns VBox status code suitable for EM.1847 * @param pVM VM handle.1848 * @param pVCpu VMCPU handle.1849 * @param pCtx CPU context1850 * @param gcret GC return code1851 */1852 static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)1853 {1854 uint8_t u8TrapNo;1855 int rc;1856 TRPMEVENT enmType;1857 RTGCUINT uErrorCode;1858 RTGCUINTPTR uCR2;1859 1860 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));1861 1862 if (gcret == VINF_PATM_PATCH_INT3)1863 {1864 u8TrapNo = 3;1865 uCR2 = 0;1866 uErrorCode = 0;1867 }1868 else if (gcret == VINF_PATM_PATCH_TRAP_GP)1869 {1870 /* No active trap in this case. Kind of ugly. */1871 u8TrapNo = X86_XCPT_GP;1872 uCR2 = 0;1873 uErrorCode = 0;1874 }1875 else1876 {1877 rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);1878 if (RT_FAILURE(rc))1879 {1880 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));1881 return rc;1882 }1883 /* Reset the trap as we'll execute the original instruction again. */1884 TRPMResetTrap(pVCpu);1885 }1886 1887 /*1888 * Deal with traps inside patch code.1889 * (This code won't run outside GC.)1890 */1891 if (u8TrapNo != 1)1892 {1893 #ifdef LOG_ENABLED1894 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");1895 DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");1896 1897 DISCPUSTATE Cpu;1898 int rc;1899 1900 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");1901 if ( RT_SUCCESS(rc)1902 && Cpu.pCurInstr->opcode == OP_IRET)1903 {1904 uint32_t eip, selCS, uEFlags;1905 1906 /* Iret crashes are bad as we have already changed the flags on the stack */1907 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);1908 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);1909 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);1910 if (rc == VINF_SUCCESS)1911 {1912 if ( (uEFlags & X86_EFL_VM)1913 || (selCS & X86_SEL_RPL) == 3)1914 {1915 uint32_t selSS, esp;1916 1917 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);1918 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);1919 1920 if (uEFlags & X86_EFL_VM)1921 {1922 uint32_t selDS, selES, selFS, selGS;1923 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);1924 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);1925 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);1926 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);1927 if (rc == VINF_SUCCESS)1928 {1929 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));1930 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));1931 }1932 }1933 else1934 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));1935 }1936 else1937 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));1938 }1939 }1940 #endif /* LOG_ENABLED */1941 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",1942 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));1943 1944 RTGCPTR pNewEip;1945 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);1946 switch (rc)1947 {1948 /*1949 * Execute the faulting instruction.1950 */1951 case VINF_SUCCESS:1952 {1953 /** @todo execute a whole block */1954 Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));1955 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))1956 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));1957 1958 pCtx->eip = pNewEip;1959 AssertRelease(pCtx->eip);1960 1961 if (pCtx->eflags.Bits.u1IF)1962 {1963 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an1964 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.1965 */1966 if ( u8TrapNo == X86_XCPT_GP1967 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))1968 {1969 /** @todo move to PATMR3HandleTrap */1970 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));1971 PATMR3RemovePatch(pVM, pCtx->eip);1972 }1973 1974 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */1975 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */1976 1977 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");1978 /* Interrupts are enabled; just go back to the original instruction.1979 return VINF_SUCCESS; */1980 }1981 return VINF_EM_RESCHEDULE_REM;1982 }1983 1984 /*1985 * One instruction.1986 */1987 case VINF_PATCH_EMULATE_INSTR:1988 Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",1989 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));1990 pCtx->eip = pNewEip;1991 AssertRelease(pCtx->eip);1992 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");1993 1994 /*1995 * The patch was disabled, hand it to the REM.1996 */1997 case VERR_PATCH_DISABLED:1998 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))1999 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));2000 pCtx->eip = pNewEip;2001 AssertRelease(pCtx->eip);2002 2003 if (pCtx->eflags.Bits.u1IF)2004 {2005 /*2006 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)2007 */2008 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));2009 return emR3RawExecuteInstruction(pVM, pVCpu, "PATCHIR");2010 }2011 return VINF_EM_RESCHEDULE_REM;2012 2013 /* Force continued patch exection; usually due to write monitored stack. */2014 case VINF_PATCH_CONTINUE:2015 return VINF_SUCCESS;2016 2017 /*2018 * Anything else is *fatal*.2019 */2020 default:2021 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));2022 return VERR_IPE_UNEXPECTED_STATUS;2023 }2024 }2025 return VINF_SUCCESS;2026 }2027 2028 2029 /**2030 * Handle a privileged instruction.2031 *2032 * @returns VBox status code suitable for EM.2033 * @param pVM VM handle.2034 * @param pVCpu VMCPU handle;2035 */2036 static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)2037 {2038 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);2039 PCPUMCTX pCtx = pVCpu->em.s.pCtx;2040 2041 Assert(!pCtx->eflags.Bits.u1VM);2042 2043 if (PATMIsEnabled(pVM))2044 {2045 /*2046 * Check if in patch code.2047 */2048 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))2049 {2050 #ifdef LOG_ENABLED2051 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");2052 #endif2053 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));2054 return VERR_EM_RAW_PATCH_CONFLICT;2055 }2056 if ( (pCtx->ss & X86_SEL_RPL) == 02057 && !pCtx->eflags.Bits.u1VM2058 && !PATMIsPatchGCAddr(pVM, pCtx->eip))2059 {2060 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),2061 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);2062 if (RT_SUCCESS(rc))2063 {2064 #ifdef LOG_ENABLED2065 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");2066 #endif2067 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");2068 return VINF_SUCCESS;2069 }2070 }2071 }2072 2073 #ifdef LOG_ENABLED2074 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))2075 {2076 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");2077 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");2078 }2079 #endif2080 2081 /*2082 * Instruction statistics and logging.2083 */2084 DISCPUSTATE Cpu;2085 int rc;2086 2087 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");2088 if (RT_SUCCESS(rc))2089 {2090 #ifdef VBOX_WITH_STATISTICS2091 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);2092 switch (Cpu.pCurInstr->opcode)2093 {2094 case OP_INVLPG:2095 STAM_COUNTER_INC(&pStats->StatInvlpg);2096 break;2097 case OP_IRET:2098 STAM_COUNTER_INC(&pStats->StatIret);2099 break;2100 case OP_CLI:2101 STAM_COUNTER_INC(&pStats->StatCli);2102 emR3RecordCli(pVM, pVCpu, pCtx->rip);2103 break;2104 case OP_STI:2105 STAM_COUNTER_INC(&pStats->StatSti);2106 break;2107 case OP_INSB:2108 case OP_INSWD:2109 case OP_IN:2110 case OP_OUTSB:2111 case OP_OUTSWD:2112 case OP_OUT:2113 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));2114 break;2115 2116 case OP_MOV_CR:2117 if (Cpu.param1.flags & USE_REG_GEN32)2118 {2119 //read2120 Assert(Cpu.param2.flags & USE_REG_CR);2121 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);2122 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);2123 }2124 else2125 {2126 //write2127 Assert(Cpu.param1.flags & USE_REG_CR);2128 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);2129 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);2130 }2131 break;2132 2133 case OP_MOV_DR:2134 STAM_COUNTER_INC(&pStats->StatMovDRx);2135 break;2136 case OP_LLDT:2137 STAM_COUNTER_INC(&pStats->StatMovLldt);2138 break;2139 case OP_LIDT:2140 STAM_COUNTER_INC(&pStats->StatMovLidt);2141 break;2142 case OP_LGDT:2143 STAM_COUNTER_INC(&pStats->StatMovLgdt);2144 break;2145 case OP_SYSENTER:2146 STAM_COUNTER_INC(&pStats->StatSysEnter);2147 break;2148 case OP_SYSEXIT:2149 STAM_COUNTER_INC(&pStats->StatSysExit);2150 break;2151 case OP_SYSCALL:2152 STAM_COUNTER_INC(&pStats->StatSysCall);2153 break;2154 case OP_SYSRET:2155 STAM_COUNTER_INC(&pStats->StatSysRet);2156 break;2157 case OP_HLT:2158 STAM_COUNTER_INC(&pStats->StatHlt);2159 break;2160 default:2161 STAM_COUNTER_INC(&pStats->StatMisc);2162 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));2163 break;2164 }2165 #endif /* VBOX_WITH_STATISTICS */2166 if ( (pCtx->ss & X86_SEL_RPL) == 02167 && !pCtx->eflags.Bits.u1VM2168 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)2169 {2170 uint32_t size;2171 2172 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);2173 switch (Cpu.pCurInstr->opcode)2174 {2175 case OP_CLI:2176 pCtx->eflags.u32 &= ~X86_EFL_IF;2177 Assert(Cpu.opsize == 1);2178 pCtx->rip += Cpu.opsize;2179 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);2180 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */2181 2182 case OP_STI:2183 pCtx->eflags.u32 |= X86_EFL_IF;2184 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);2185 Assert(Cpu.opsize == 1);2186 pCtx->rip += Cpu.opsize;2187 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);2188 return VINF_SUCCESS;2189 2190 case OP_HLT:2191 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))2192 {2193 PATMTRANSSTATE enmState;2194 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);2195 2196 if (enmState == PATMTRANS_OVERWRITTEN)2197 {2198 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);2199 Assert(rc == VERR_PATCH_DISABLED);2200 /* Conflict detected, patch disabled */2201 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));2202 2203 enmState = PATMTRANS_SAFE;2204 }2205 2206 /* The translation had better be successful. Otherwise we can't recover. */2207 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));2208 if (enmState != PATMTRANS_OVERWRITTEN)2209 pCtx->eip = pOrgInstrGC;2210 }2211 /* no break; we could just return VINF_EM_HALT here */2212 2213 case OP_MOV_CR:2214 case OP_MOV_DR:2215 #ifdef LOG_ENABLED2216 if (PATMIsPatchGCAddr(pVM, pCtx->eip))2217 {2218 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");2219 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");2220 }2221 #endif2222 2223 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);2224 if (RT_SUCCESS(rc))2225 {2226 pCtx->rip += Cpu.opsize;2227 #ifdef EM_NOTIFY_HWACCM2228 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC)2229 HWACCMR3NotifyEmulated(pVCpu);2230 #endif2231 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);2232 2233 if ( Cpu.pCurInstr->opcode == OP_MOV_CR2234 && Cpu.param1.flags == USE_REG_CR /* write */2235 )2236 {2237 /* Deal with CR0 updates inside patch code that force2238 * us to go to the recompiler.2239 */2240 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)2241 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))2242 {2243 PATMTRANSSTATE enmState;2244 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);2245 2246 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));2247 if (enmState == PATMTRANS_OVERWRITTEN)2248 {2249 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);2250 Assert(rc == VERR_PATCH_DISABLED);2251 /* Conflict detected, patch disabled */2252 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));2253 enmState = PATMTRANS_SAFE;2254 }2255 /* The translation had better be successful. Otherwise we can't recover. */2256 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));2257 if (enmState != PATMTRANS_OVERWRITTEN)2258 pCtx->rip = pOrgInstrGC;2259 }2260 2261 /* Reschedule is necessary as the execution/paging mode might have changed. */2262 return VINF_EM_RESCHEDULE;2263 }2264 return rc; /* can return VINF_EM_HALT as well. */2265 }2266 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);2267 break; /* fall back to the recompiler */2268 }2269 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);2270 }2271 }2272 2273 if (PATMIsPatchGCAddr(pVM, pCtx->eip))2274 return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);2275 2276 return emR3RawExecuteInstruction(pVM, pVCpu, "PRIV");2277 }2278 2279 2280 /**2281 * Update the forced rawmode execution modifier.2282 *2283 * This function is called when we're returning from the raw-mode loop(s). If we're2284 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,2285 * if not in patch code, the flag will be cleared.2286 *2287 * We should never interrupt patch code while it's being executed. Cli patches can2288 * contain big code blocks, but they are always executed with IF=0. Other patches2289 * replace single instructions and should be atomic.2290 *2291 * @returns Updated rc.2292 *2293 * @param pVM The VM handle.2294 * @param pVCpu The VMCPU handle.2295 * @param pCtx The guest CPU context.2296 * @param rc The result code.2297 */2298 DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)2299 {2300 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */2301 {2302 /* ignore reschedule attempts. */2303 switch (rc)2304 {2305 case VINF_EM_RESCHEDULE:2306 case VINF_EM_RESCHEDULE_REM:2307 LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));2308 rc = VINF_SUCCESS;2309 break;2310 }2311 pVCpu->em.s.fForceRAW = true;2312 }2313 else2314 pVCpu->em.s.fForceRAW = false;2315 return rc;2316 }2317 2318 2319 /**2320 * Check for pending raw actions2321 *2322 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other2323 * EM statuses.2324 * @param pVM The VM to operate on.2325 * @param pVCpu The VMCPU handle.2326 */2327 VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)2328 {2329 return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);2330 }2331 2332 2333 /**2334 * Process raw-mode specific forced actions.2335 *2336 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.2337 *2338 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other2339 * EM statuses.2340 * @param pVM The VM handle.2341 * @param pVCpu The VMCPU handle.2342 * @param pCtx The guest CPUM register context.2343 */2344 static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)2345 {2346 /*2347 * Note that the order is *vitally* important!2348 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.2349 */2350 2351 2352 /*2353 * Sync selector tables.2354 */2355 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))2356 {2357 int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);2358 if (RT_FAILURE(rc))2359 return rc;2360 }2361 2362 /*2363 * Sync IDT.2364 *2365 * The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage2366 * and PGMShwModifyPage, so we're in for trouble if for instance a2367 * PGMSyncCR3+pgmPoolClearAll is pending.2368 */2369 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))2370 {2371 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)2372 && EMIsRawRing0Enabled(pVM)2373 && CSAMIsEnabled(pVM))2374 {2375 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));2376 if (RT_FAILURE(rc))2377 return rc;2378 }2379 2380 int rc = TRPMR3SyncIDT(pVM, pVCpu);2381 if (RT_FAILURE(rc))2382 return rc;2383 }2384 2385 /*2386 * Sync TSS.2387 */2388 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))2389 {2390 int rc = SELMR3SyncTSS(pVM, pVCpu);2391 if (RT_FAILURE(rc))2392 return rc;2393 }2394 2395 /*2396 * Sync page directory.2397 */2398 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))2399 {2400 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);2401 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));2402 if (RT_FAILURE(rc))2403 return rc;2404 2405 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));2406 2407 /* Prefetch pages for EIP and ESP. */2408 /** @todo This is rather expensive. Should investigate if it really helps at all. */2409 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));2410 if (rc == VINF_SUCCESS)2411 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));2412 if (rc != VINF_SUCCESS)2413 {2414 if (rc != VINF_PGM_SYNC_CR3)2415 {2416 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);2417 return rc;2418 }2419 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));2420 if (RT_FAILURE(rc))2421 return rc;2422 }2423 /** @todo maybe prefetch the supervisor stack page as well */2424 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));2425 }2426 2427 /*2428 * Allocate handy pages (just in case the above actions have consumed some pages).2429 */2430 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))2431 {2432 int rc = PGMR3PhysAllocateHandyPages(pVM);2433 if (RT_FAILURE(rc))2434 return rc;2435 }2436 2437 /*2438 * Check whether we're out of memory now.2439 *2440 * This may stem from some of the above actions or operations that has been executed2441 * since we ran FFs. The allocate handy pages must for instance always be followed by2442 * this check.2443 */2444 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))2445 return VINF_EM_NO_MEMORY;2446 2447 return VINF_SUCCESS;2448 }2449 2450 2451 /**2452 * Executes raw code.2453 *2454 * This function contains the raw-mode version of the inner2455 * execution loop (the outer loop being in EMR3ExecuteVM()).2456 *2457 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,2458 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.2459 *2460 * @param pVM VM handle.2461 * @param pVCpu VMCPU handle.2462 * @param pfFFDone Where to store an indicator telling whether or not2463 * FFs were done before returning.2464 */2465 static int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)2466 {2467 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);2468 2469 int rc = VERR_INTERNAL_ERROR;2470 PCPUMCTX pCtx = pVCpu->em.s.pCtx;2471 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));2472 pVCpu->em.s.fForceRAW = false;2473 *pfFFDone = false;2474 2475 2476 /*2477 *2478 * Spin till we get a forced action or raw mode status code resulting in2479 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.2480 *2481 */2482 for (;;)2483 {2484 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);2485 2486 /*2487 * Check various preconditions.2488 */2489 #ifdef VBOX_STRICT2490 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);2491 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);2492 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)2493 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),2494 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));2495 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)2496 && PGMMapHasConflicts(pVM))2497 {2498 PGMMapCheck(pVM);2499 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));2500 return VERR_INTERNAL_ERROR;2501 }2502 #endif /* VBOX_STRICT */2503 2504 /*2505 * Process high priority pre-execution raw-mode FFs.2506 */2507 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)2508 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))2509 {2510 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);2511 if (rc != VINF_SUCCESS)2512 break;2513 }2514 2515 /*2516 * If we're going to execute ring-0 code, the guest state needs to2517 * be modified a bit and some of the state components (IF, SS/CS RPL,2518 * and perhaps EIP) needs to be stored with PATM.2519 */2520 rc = CPUMRawEnter(pVCpu, NULL);2521 if (rc != VINF_SUCCESS)2522 {2523 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);2524 break;2525 }2526 2527 /*2528 * Scan code before executing it. Don't bother with user mode or V86 code2529 */2530 if ( (pCtx->ss & X86_SEL_RPL) <= 12531 && !pCtx->eflags.Bits.u1VM2532 && !PATMIsPatchGCAddr(pVM, pCtx->eip))2533 {2534 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);2535 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);2536 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);2537 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)2538 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))2539 {2540 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);2541 if (rc != VINF_SUCCESS)2542 {2543 rc = CPUMRawLeave(pVCpu, NULL, rc);2544 break;2545 }2546 }2547 }2548 2549 #ifdef LOG_ENABLED2550 /*2551 * Log important stuff before entering GC.2552 */2553 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);2554 if (pCtx->eflags.Bits.u1VM)2555 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));2556 else if ((pCtx->ss & X86_SEL_RPL) == 1)2557 {2558 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);2559 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));2560 }2561 else if ((pCtx->ss & X86_SEL_RPL) == 3)2562 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));2563 #endif /* LOG_ENABLED */2564 2565 2566 2567 /*2568 * Execute the code.2569 */2570 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);2571 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);2572 rc = VMMR3RawRunGC(pVM, pVCpu);2573 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);2574 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);2575 2576 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));2577 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));2578 2579 2580 2581 /*2582 * Restore the real CPU state and deal with high priority post2583 * execution FFs before doing anything else.2584 */2585 rc = CPUMRawLeave(pVCpu, NULL, rc);2586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);2587 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)2588 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))2589 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);2590 2591 #ifdef VBOX_STRICT2592 /*2593 * Assert TSS consistency & rc vs patch code.2594 */2595 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */2596 && EMIsRawRing0Enabled(pVM))2597 SELMR3CheckTSS(pVM);2598 switch (rc)2599 {2600 case VINF_SUCCESS:2601 case VINF_EM_RAW_INTERRUPT:2602 case VINF_PATM_PATCH_TRAP_PF:2603 case VINF_PATM_PATCH_TRAP_GP:2604 case VINF_PATM_PATCH_INT3:2605 case VINF_PATM_CHECK_PATCH_PAGE:2606 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:2607 case VINF_EM_RAW_GUEST_TRAP:2608 case VINF_EM_RESCHEDULE_RAW:2609 break;2610 2611 default:2612 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))2613 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));2614 break;2615 }2616 /*2617 * Let's go paranoid!2618 */2619 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)2620 && PGMMapHasConflicts(pVM))2621 {2622 PGMMapCheck(pVM);2623 AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));2624 return VERR_INTERNAL_ERROR;2625 }2626 #endif /* VBOX_STRICT */2627 2628 /*2629 * Process the returned status code.2630 */2631 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)2632 {2633 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);2634 break;2635 }2636 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);2637 if (rc != VINF_SUCCESS)2638 {2639 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);2640 if (rc != VINF_SUCCESS)2641 {2642 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);2643 break;2644 }2645 }2646 2647 /*2648 * Check and execute forced actions.2649 */2650 #ifdef VBOX_HIGH_RES_TIMERS_HACK2651 TMTimerPollVoid(pVM, pVCpu);2652 #endif2653 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);2654 if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)2655 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))2656 {2657 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);2658 2659 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);2660 rc = emR3ForcedActions(pVM, pVCpu, rc);2661 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);2662 if ( rc != VINF_SUCCESS2663 && rc != VINF_EM_RESCHEDULE_RAW)2664 {2665 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);2666 if (rc != VINF_SUCCESS)2667 {2668 *pfFFDone = true;2669 break;2670 }2671 }2672 }2673 }2674 2675 /*2676 * Return to outer loop.2677 */2678 #if defined(LOG_ENABLED) && defined(DEBUG)2679 RTLogFlush(NULL);2680 #endif2681 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);2682 return rc;2683 }2684 2685 2686 /**2687 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)2688 *2689 * This function contains the raw-mode version of the inner2690 * execution loop (the outer loop being in EMR3ExecuteVM()).2691 *2692 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,2693 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.2694 *2695 * @param pVM VM handle.2696 * @param pVCpu VMCPU handle.2697 * @param pfFFDone Where to store an indicator telling whether or not2698 * FFs were done before returning.2699 */2700 static int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)2701 {2702 int rc = VERR_INTERNAL_ERROR;2703 PCPUMCTX pCtx = pVCpu->em.s.pCtx;2704 2705 LogFlow(("emR3HwAccExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip));2706 *pfFFDone = false;2707 2708 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry);2709 2710 #ifdef EM_NOTIFY_HWACCM2711 HWACCMR3NotifyScheduled(pVCpu);2712 #endif2713 2714 /*2715 * Spin till we get a forced action which returns anything but VINF_SUCCESS.2716 */2717 for (;;)2718 {2719 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHwAccEntry, a);2720 2721 /*2722 * Process high priority pre-execution raw-mode FFs.2723 */2724 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HWACCM mode; shouldn't be set really. */2725 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)2726 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))2727 {2728 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);2729 if (rc != VINF_SUCCESS)2730 break;2731 }2732 2733 #ifdef LOG_ENABLED2734 /*2735 * Log important stuff before entering GC.2736 */2737 if (TRPMHasTrap(pVCpu))2738 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs, (RTGCPTR)pCtx->rip));2739 2740 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));2741 2742 if (pVM->cCPUs == 1)2743 {2744 if (pCtx->eflags.Bits.u1VM)2745 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));2746 else if (CPUMIsGuestIn64BitCodeEx(pCtx))2747 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2748 else2749 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2750 }2751 else2752 {2753 if (pCtx->eflags.Bits.u1VM)2754 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));2755 else if (CPUMIsGuestIn64BitCodeEx(pCtx))2756 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2757 else2758 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));2759 }2760 #endif /* LOG_ENABLED */2761 2762 /*2763 * Execute the code.2764 */2765 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHwAccEntry, a);2766 STAM_PROFILE_START(&pVCpu->em.s.StatHwAccExec, x);2767 rc = VMMR3HwAccRunGC(pVM, pVCpu);2768 STAM_PROFILE_STOP(&pVCpu->em.s.StatHwAccExec, x);2769 2770 /*2771 * Deal with high priority post execution FFs before doing anything else.2772 */2773 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);2774 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)2775 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))2776 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);2777 2778 /*2779 * Process the returned status code.2780 */2781 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)2782 break;2783 2784 rc = emR3HwaccmHandleRC(pVM, pVCpu, pCtx, rc);2785 if (rc != VINF_SUCCESS)2786 break;2787 2788 /*2789 * Check and execute forced actions.2790 */2791 #ifdef VBOX_HIGH_RES_TIMERS_HACK2792 TMTimerPollVoid(pVM, pVCpu);2793 #endif2794 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK)2795 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_MASK))2796 {2797 rc = emR3ForcedActions(pVM, pVCpu, rc);2798 if ( rc != VINF_SUCCESS2799 && rc != VINF_EM_RESCHEDULE_HWACC)2800 {2801 *pfFFDone = true;2802 break;2803 }2804 }2805 }2806 2807 /*2808 * Return to outer loop.2809 */2810 #if defined(LOG_ENABLED) && defined(DEBUG)2811 RTLogFlush(NULL);2812 #endif2813 return rc;2814 }2815 2816 2817 /**2818 1034 * Decides whether to execute RAW, HWACC or REM. 2819 1035 * … … 2823 1039 * @param pCtx The CPU context. 2824 1040 */ 2825 staticEMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1041 EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2826 1042 { 2827 1043 /* … … 2978 1194 * @param rc The current rc. 2979 1195 */ 2980 staticint emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)1196 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc) 2981 1197 { 2982 1198 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT)) … … 3016 1232 * 3017 1233 */ 3018 staticint emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)1234 int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc) 3019 1235 { 3020 1236 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a); -
trunk/src/VBox/VMM/EMHandleRCTmpl.h
r21192 r21196 5 5 6 6 /* 7 * Copyright (C) 2006-200 7Sun Microsystems, Inc.7 * Copyright (C) 2006-2009 Sun Microsystems, Inc. 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 19 19 * additional information or have any questions. 20 20 */ 21 #ifndef __EMHandleRCTmpl_h__ 22 #define __EMHandleRCTmpl_h__ 21 23 22 24 /** … … 34 36 * @param pCtx The guest cpu context. 35 37 */ 36 static int EMHANDLERC_NAME(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 38 #ifdef EMHANDLERC_WITH_PATM 39 int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 40 #elif defined(EMHANDLERC_WITH_HWACCM) 41 int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 42 #endif 37 43 { 38 44 switch (rc) … … 52 58 break; 53 59 60 #ifdef EMHANDLERC_WITH_PATM 54 61 /* 55 62 * Privileged instruction. 56 63 */ 57 64 case VINF_EM_RAW_EXCEPTION_PRIVILEGED: 58 #ifdef EMHANDLERC_WITH_PATM59 65 case VINF_PATM_PATCH_TRAP_GP: 60 #endif61 66 rc = emR3RawPrivileged(pVM, pVCpu); 62 67 break; 63 68 64 /*65 * Got a trap which needs dispatching.66 */67 69 case VINF_EM_RAW_GUEST_TRAP: 68 #ifdef EMHANDLERC_WITH_PATM 70 /* 71 * Got a trap which needs dispatching. 72 */ 69 73 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL)) 70 74 { … … 73 77 break; 74 78 } 75 #endif76 79 rc = emR3RawGuestTrap(pVM, pVCpu); 77 80 break; 78 81 79 #ifdef EMHANDLERC_WITH_PATM80 82 /* 81 83 * Trap in patch code. … … 184 186 rc = VINF_EM_RESCHEDULE_REM; 185 187 break; 186 #endif /* EMHANDLERC_WITH_PATM */187 188 188 189 /* … … 192 193 rc = emR3RawRingSwitch(pVM, pVCpu); 193 194 break; 195 #endif /* EMHANDLERC_WITH_PATM */ 194 196 195 197 /* … … 210 212 break; 211 213 214 #ifdef EMHANDLERC_WITH_HWACCM 212 215 /* 213 216 * (MM)IO intensive code block detected; fall back to the recompiler for better performance … … 216 219 rc = HWACCMR3EmulateIoBlock(pVM, pCtx); 217 220 break; 221 #endif 218 222 219 223 #ifdef EMHANDLERC_WITH_PATM … … 236 240 rc = emR3RawExecuteInstruction(pVM, pVCpu, "PD FAULT: "); 237 241 break; 238 #endif239 240 242 case VINF_EM_RAW_EMULATE_INSTR_HLT: 241 243 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */ 242 244 rc = emR3RawPrivileged(pVM, pVCpu); 243 245 break; 246 #endif 244 247 245 248 #ifdef EMHANDLERC_WITH_PATM … … 249 252 250 253 case VINF_PATCH_EMULATE_INSTR: 254 #else 255 case VINF_EM_RAW_GUEST_TRAP: 251 256 #endif 252 257 case VINF_EM_RAW_EMULATE_INSTR: … … 307 312 break; 308 313 309 #if ndef EMHANDLERC_WITH_PATM314 #ifdef EMHANDLERC_WITH_HWACCM 310 315 /* 311 316 * Up a level, after HwAccM have done some release logging. … … 335 340 } 336 341 337 #undef EMHANDLERC_NAME 338 #undef EMHANDLERC_WITH_PATM 339 342 #endif -
trunk/src/VBox/VMM/EMInternal.h
r20530 r21196 407 407 /** @} */ 408 408 409 410 int emR3HwAccExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 411 int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 412 int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 413 int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 414 EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 415 int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 416 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 417 int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 418 int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu); 419 int emR3RawStep(PVM pVM, PVMCPU pVCpu); 420 409 421 RT_C_DECLS_END 410 422 -
trunk/src/VBox/VMM/Makefile.kmk
r20998 r21196 82 82 DBGFSym.cpp \ 83 83 EM.cpp \ 84 EMRaw.cpp \ 85 EMHwaccm.cpp \ 84 86 IOM.cpp \ 85 87 GMM.cpp \ -
trunk/src/VBox/VMM/VMM.cpp
r21094 r21196 352 352 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitch, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitch", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH returns."); 353 353 STAM_REG(pVM, &pVM->vmm.s.StatRZRetRingSwitchInt, STAMTYPE_COUNTER, "/VMM/RZRet/RingSwitchInt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_RING_SWITCH_INT returns."); 354 STAM_REG(pVM, &pVM->vmm.s.StatRZRetExceptionPrivilege, STAMTYPE_COUNTER, "/VMM/RZRet/ExceptionPrivilege", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EXCEPTION_PRIVILEGED returns.");355 354 STAM_REG(pVM, &pVM->vmm.s.StatRZRetStaleSelector, STAMTYPE_COUNTER, "/VMM/RZRet/StaleSelector", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_STALE_SELECTOR returns."); 356 355 STAM_REG(pVM, &pVM->vmm.s.StatRZRetIRETTrap, STAMTYPE_COUNTER, "/VMM/RZRet/IRETTrap", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_IRET_TRAP returns."); … … 383 382 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPATMDuplicateFn, STAMTYPE_COUNTER, "/VMM/RZRet/PATMDuplicateFn", STAMUNIT_OCCURENCES, "Number of VINF_PATM_DUPLICATE_FUNCTION returns."); 384 383 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMChangeMode, STAMTYPE_COUNTER, "/VMM/RZRet/PGMChangeMode", STAMUNIT_OCCURENCES, "Number of VINF_PGM_CHANGE_MODE returns."); 385 STAM_REG(pVM, &pVM->vmm.s.StatRZRetEmulHlt, STAMTYPE_COUNTER, "/VMM/RZRet/EmulHlt", STAMUNIT_OCCURENCES, "Number of VINF_EM_RAW_EMULATE_INSTR_HLT returns.");386 384 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns."); 387 385 -
trunk/src/VBox/VMM/VMMInternal.h
r20992 r21196 332 332 STAMCOUNTER StatRZRetRingSwitch; 333 333 STAMCOUNTER StatRZRetRingSwitchInt; 334 STAMCOUNTER StatRZRetExceptionPrivilege;335 334 STAMCOUNTER StatRZRetStaleSelector; 336 335 STAMCOUNTER StatRZRetIRETTrap; … … 364 363 STAMCOUNTER StatRZRetPATMDuplicateFn; 365 364 STAMCOUNTER StatRZRetPGMChangeMode; 366 STAMCOUNTER StatRZRetEmulHlt;367 365 STAMCOUNTER StatRZRetPendingRequest; 368 366 STAMCOUNTER StatRZCallPDMLock; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r21144 r21196 2286 2286 case SVM_EXIT_MWAIT_ARMED: 2287 2287 case SVM_EXIT_TASK_SWITCH: /* can change CR3; emulate */ 2288 rc = V INF_EM_RAW_EXCEPTION_PRIVILEGED;2288 rc = VERR_EM_INTERPRETER; 2289 2289 break; 2290 2290 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r21144 r21196 3602 3602 case VMX_EXIT_RSM: /* 17 Guest software attempted to execute RSM in SMM. */ 3603 3603 AssertFailed(); /* can't happen. */ 3604 rc = V INF_EM_RAW_EXCEPTION_PRIVILEGED;3604 rc = VERR_EM_INTERPRETER; 3605 3605 break; 3606 3606 … … 3616 3616 case VMX_EXIT_VMXON: /* 27 Guest software executed VMXON. */ 3617 3617 /** @todo inject #UD immediately */ 3618 rc = V INF_EM_RAW_EXCEPTION_PRIVILEGED;3618 rc = VERR_EM_INTERPRETER; 3619 3619 break; 3620 3620 … … 3644 3644 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */ 3645 3645 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */ 3646 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */ 3647 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */ 3646 3648 /* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */ 3647 3649 rc = VERR_EM_INTERPRETER; 3648 break;3649 3650 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */3651 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */3652 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;3653 3650 break; 3654 3651 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r20984 r21196 338 338 case VINF_EM_RAW_RING_SWITCH_INT: 339 339 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt); 340 break;341 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:342 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetExceptionPrivilege);343 340 break; 344 341 case VINF_EM_RAW_STALE_SELECTOR: … … 468 465 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode); 469 466 break; 470 case VINF_EM_RAW_EMULATE_INSTR_HLT:471 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulHlt);472 break;473 467 case VINF_EM_PENDING_REQUEST: 474 468 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
Note:
See TracChangeset
for help on using the changeset viewer.