Changeset 99897 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 22, 2023 11:43:38 AM (21 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r99775 r99897 432 432 433 433 case EMSTATE_IEM: 434 case EMSTATE_RAW :434 case EMSTATE_RAW_OBSOLETE: 435 435 case EMSTATE_IEM_THEN_REM: 436 436 case EMSTATE_DEBUG_GUEST_IEM: … … 439 439 440 440 441 case EMSTATE_RE M:442 case EMSTATE_DEBUG_GUEST_RE M:441 case EMSTATE_RECOMPILER: 442 case EMSTATE_DEBUG_GUEST_RECOMPILER: 443 443 return DBGFEVENTCTX_REM; 444 444 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r99725 r99897 33 33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution 34 34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and 35 * emR3R emExecute).35 * emR3RmExecute). 36 36 * 37 37 * The interpreted execution is only used to avoid switching between … … 78 78 #include <iprt/thread.h> 79 79 80 #include "EMInline.h" 81 80 82 81 83 /********************************************************************************************************************************* … … 88 90 #endif 89 91 static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc); 90 #if defined(VBOX_WITH_REM) || defined(DEBUG)91 static int emR3RemStep(PVM pVM, PVMCPU pVCpu);92 #endif93 static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);94 92 95 93 … … 262 260 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called."); 263 261 #ifdef VBOX_WITH_STATISTICS 264 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");265 262 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution."); 266 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing."); 267 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead."); 268 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution."); 269 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead."); 270 #endif /* VBOX_WITH_STATISTICS */ 263 #endif 271 264 272 265 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution."); 273 266 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted)."); 274 267 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep)."); 275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs)."); 276 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs)."); 268 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs)."); 277 269 278 270 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM."); … … 418 410 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED); 419 411 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED); 420 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState); 412 SSMR3PutU32(pSSM, 413 pVCpu->em.s.enmPrevState == EMSTATE_NONE 414 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED 415 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI 416 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE); 421 417 422 418 /* Save mwait state. */ … … 447 443 * Validate version. 448 444 */ 449 if ( 450 || 445 if ( uVersion > EM_SAVED_STATE_VERSION 446 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP) 451 447 { 452 448 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION)); … … 468 464 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP) 469 465 { 466 /* We are only intereseted in two enmPrevState values for use when 467 EMR3ExecuteVM is called. 468 Since ~r157540. only these two and EMSTATE_NONE are saved. */ 470 469 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE); 471 470 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED); 471 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI 472 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED) 473 pVCpu->em.s.enmPrevState = EMSTATE_NONE; 472 474 473 475 pVCpu->em.s.enmState = EMSTATE_SUSPENDED; … … 517 519 switch (pArgs->enmPolicy) 518 520 { 519 case EMEXECPOLICY_RECOMPILE_RING0:520 case EMEXECPOLICY_RECOMPILE_RING3:521 break;522 521 case EMEXECPOLICY_IEM_ALL: 523 522 pVM->em.s.fIemExecutesAll = pArgs->fEnforce; … … 530 529 switch (pVCpuX->em.s.enmState) 531 530 { 531 case EMSTATE_DEBUG_GUEST_RECOMPILER: 532 if (pVM->em.s.fIemRecompiled) 533 break; 534 RT_FALL_THROUGH(); 532 535 case EMSTATE_DEBUG_GUEST_RAW: 533 536 case EMSTATE_DEBUG_GUEST_HM: 534 537 case EMSTATE_DEBUG_GUEST_NEM: 535 case EMSTATE_DEBUG_GUEST_REM:536 538 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) )); 537 539 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM; … … 543 545 } 544 546 break; 547 548 case EMEXECPOLICY_IEM_RECOMPILED: 549 pVM->em.s.fIemRecompiled = pArgs->fEnforce; 550 break; 551 545 552 default: 546 553 AssertFailedReturn(VERR_INVALID_PARAMETER); 547 554 } 548 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll)); 555 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n", 556 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled)); 549 557 } 550 558 551 559 /* 552 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.560 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler. 553 561 */ 554 return pVCpu->em.s.enmState == EMSTATE_RAW555 ||pVCpu->em.s.enmState == EMSTATE_HM562 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE); 563 return pVCpu->em.s.enmState == EMSTATE_HM 556 564 || pVCpu->em.s.enmState == EMSTATE_NEM 557 565 || pVCpu->em.s.enmState == EMSTATE_IEM 558 || pVCpu->em.s.enmState == EMSTATE_REM 566 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER 567 /* obsolete stuff: */ 559 568 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM 560 569 ? VINF_EM_RESCHEDULE … … 607 616 switch (enmPolicy) 608 617 { 609 case EMEXECPOLICY_RECOMPILE_RING0:610 case EMEXECPOLICY_RECOMPILE_RING3:611 *pfEnforced = false;612 break;613 618 case EMEXECPOLICY_IEM_ALL: 614 619 *pfEnforced = pVM->em.s.fIemExecutesAll; 615 620 break; 621 case EMEXECPOLICY_IEM_RECOMPILED: 622 *pfEnforced = pVM->em.s.fIemRecompiled; 623 break; 616 624 default: 617 625 AssertFailedReturn(VERR_INTERNAL_ERROR_2); … … 670 678 switch (enmState) 671 679 { 672 case EMSTATE_NONE: return "EMSTATE_NONE";673 case EMSTATE_RAW : return "EMSTATE_RAW";674 case EMSTATE_HM: return "EMSTATE_HM";675 case EMSTATE_IEM: return "EMSTATE_IEM";676 case EMSTATE_RE M: return "EMSTATE_REM";677 case EMSTATE_HALTED: return "EMSTATE_HALTED";678 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";679 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";680 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";681 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";682 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";683 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";684 case EMSTATE_DEBUG_GUEST_RE M: return "EMSTATE_DEBUG_GUEST_REM";685 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";686 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";687 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";688 case EMSTATE_NEM: return "EMSTATE_NEM";689 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";690 default: return "Unknown!";680 case EMSTATE_NONE: return "EMSTATE_NONE"; 681 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE"; 682 case EMSTATE_HM: return "EMSTATE_HM"; 683 case EMSTATE_IEM: return "EMSTATE_IEM"; 684 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER"; 685 case EMSTATE_HALTED: return "EMSTATE_HALTED"; 686 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI"; 687 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED"; 688 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING"; 689 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW"; 690 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM"; 691 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM"; 692 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER"; 693 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER"; 694 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION"; 695 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM"; 696 case EMSTATE_NEM: return "EMSTATE_NEM"; 697 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM"; 698 default: return "Unknown!"; 691 699 } 692 700 } … … 874 882 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM) 875 883 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); 876 #ifdef VBOX_WITH_REM /** @todo fix me? */877 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)878 rc = emR3RemStep(pVM, pVCpu);879 #endif880 884 else 881 885 { … … 924 928 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */ 925 929 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL); 926 break;927 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */928 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);929 930 break; 930 931 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */ … … 1031 1032 1032 1033 1033 #if defined(VBOX_WITH_REM) || defined(DEBUG)1034 /**1035 * Steps recompiled code.1036 *1037 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,1038 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.1039 *1040 * @param pVM The cross context VM structure.1041 * @param pVCpu The cross context virtual CPU structure.1042 */1043 static int emR3RemStep(PVM pVM, PVMCPU pVCpu)1044 {1045 #if defined(VBOX_VMM_TARGET_ARMV8)1046 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));1047 #else1048 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));1049 #endif1050 1051 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);1052 1053 #if defined(VBOX_VMM_TARGET_ARMV8)1054 Log3(("emR3RemStep: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));1055 #else1056 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));1057 #endif1058 return rc;1059 }1060 #endif /* VBOX_WITH_REM || DEBUG */1061 1062 1063 1034 /** 1064 1035 * Executes recompiled code. … … 1076 1047 * 1077 1048 */ 1078 static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) 1079 { 1049 static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) 1050 { 1051 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a); 1052 #ifdef VBOX_VMM_TARGET_ARMV8 1053 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64)); 1054 #else 1055 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip)); 1056 #endif 1057 1058 /* 1059 * Loop till we get a forced action which returns anything but VINF_SUCCESS. 1060 */ 1061 *pfFFDone = false; 1062 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 1063 for (;;) 1064 { 1080 1065 #ifdef LOG_ENABLED 1081 1066 # if defined(VBOX_VMM_TARGET_ARMV8) 1082 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));1067 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu))); 1083 1068 # else 1084 uint32_t cpl = CPUMGetGuestCPL(pVCpu);1085 1086 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)1087 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));1088 else1089 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));1069 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM) 1070 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, 1071 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, 1072 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u)); 1073 else 1074 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF)); 1090 1075 # endif 1091 1076 #endif 1092 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a); 1093 1094 /* 1095 * Spin till we get a forced action which returns anything but VINF_SUCCESS 1096 * or the REM suggests raw-mode execution. 1097 */ 1098 *pfFFDone = false; 1099 uint32_t cLoops = 0; 1100 int rc = VINF_SUCCESS; 1101 for (;;) 1102 { 1103 /* 1104 * Execute REM. 1077 1078 /* 1079 * Execute. 1105 1080 */ 1106 1081 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) 1107 1082 { 1108 1083 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c); 1109 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/)); 1084 #ifdef VBOX_WITH_IEM_RECOMPILER 1085 if (pVM->em.s.fIemRecompiled) 1086 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/); 1087 else 1088 #endif 1089 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/); 1110 1090 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c); 1111 1091 } … … 1116 1096 RTThreadSleep(5); 1117 1097 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u); 1118 rc = VINF_SUCCESS;1098 rcStrict = VINF_SUCCESS; 1119 1099 } 1120 1100 … … 1125 1105 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) 1126 1106 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) 1127 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));1107 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict); 1128 1108 1129 1109 /* 1130 1110 * Process the returned status code. 1131 1111 */ 1132 if (rc != VINF_SUCCESS) 1133 { 1134 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) 1112 if (rcStrict != VINF_SUCCESS) 1113 { 1114 #if 0 1115 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)) 1135 1116 break; 1136 if (rc != VINF_REM_INTERRUPED_FF) 1137 { 1138 /* Try dodge unimplemented IEM trouble by reschduling. */ 1139 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED 1140 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED) 1141 { 1142 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu); 1143 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM) 1144 { 1145 rc = VINF_EM_RESCHEDULE; 1146 break; 1147 } 1148 } 1149 1150 /* 1151 * Anything which is not known to us means an internal error 1152 * and the termination of the VM! 1153 */ 1154 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc)); 1155 break; 1156 } 1117 /* Fatal error: */ 1118 #endif 1119 break; 1157 1120 } 1158 1121 … … 1168 1131 #endif 1169 1132 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER); 1170 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK) 1171 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) ) 1172 { 1173 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a); 1174 rc = emR3ForcedActions(pVM, pVCpu, rc); 1175 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc); 1176 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a); 1177 if ( rc != VINF_SUCCESS 1178 && rc != VINF_EM_RESCHEDULE_REM) 1133 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK) 1134 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) ) 1135 { 1136 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); 1137 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict)); 1138 if ( rcStrict != VINF_SUCCESS 1139 && rcStrict != VINF_EM_RESCHEDULE_REM) 1179 1140 { 1180 1141 *pfFFDone = true; … … 1183 1144 } 1184 1145 1185 /*1186 * Have to check if we can get back to fast execution mode every so often.1187 */1188 if (!(++cLoops & 7))1189 {1190 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);1191 if ( enmCheck != EMSTATE_REM1192 && enmCheck != EMSTATE_IEM_THEN_REM)1193 {1194 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));1195 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);1196 return VINF_EM_RESCHEDULE;1197 }1198 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));1199 }1200 1201 1146 } /* The Inner Loop, recompiled execution mode version. */ 1202 1147 1203 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a); 1204 return rc; 1205 } 1206 1207 1208 #ifdef DEBUG 1209 1210 int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations) 1211 { 1212 EMSTATE enmOldState = pVCpu->em.s.enmState; 1213 1214 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM; 1215 1216 Log(("Single step BEGIN:\n")); 1217 for (uint32_t i = 0; i < cIterations; i++) 1218 { 1219 DBGFR3PrgStep(pVCpu); 1220 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS"); 1221 emR3RemStep(pVM, pVCpu); 1222 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM) 1223 break; 1224 } 1225 Log(("Single step END:\n")); 1226 #if defined(VBOX_VMM_TARGET_ARMV8) 1227 AssertReleaseFailed(); 1228 #else 1229 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF); 1230 #endif 1231 pVCpu->em.s.enmState = enmOldState; 1232 return VINF_EM_RESCHEDULE; 1233 } 1234 1235 #endif /* DEBUG */ 1148 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a); 1149 return rcStrict; 1150 } 1236 1151 1237 1152 … … 1277 1192 1278 1193 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu); 1279 if (enmNewState != EMSTATE_RE M&& enmNewState != EMSTATE_IEM_THEN_REM)1194 if (enmNewState != EMSTATE_RECOMPILER && enmNewState != EMSTATE_IEM_THEN_REM) 1280 1195 { 1281 1196 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n", … … 1297 1212 * Switch to REM. 1298 1213 */ 1299 Log(("emR3ExecuteIemThenRem: -> EMSTATE_RE M(after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));1300 pVCpu->em.s.enmState = EMSTATE_RE M;1214 Log(("emR3ExecuteIemThenRem: -> EMSTATE_RECOMPILER (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions)); 1215 pVCpu->em.s.enmState = EMSTATE_RECOMPILER; 1301 1216 return VINF_SUCCESS; 1302 1217 } … … 1323 1238 if ( pVM->em.s.fIemExecutesAll 1324 1239 || VM_IS_EXEC_ENGINE_IEM(pVM)) 1240 #ifdef VBOX_WITH_IEM_RECOMPILER 1241 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM; 1242 #else 1325 1243 return EMSTATE_IEM; 1244 #endif 1326 1245 1327 1246 #if !defined(VBOX_VMM_TARGET_ARMV8) … … 2171 2090 * @param pVCpu The cross context virtual CPU structure. 2172 2091 */ 2173 bool emR3IsExecutionAllowed (PVM pVM, PVMCPU pVCpu)2174 { 2175 uint64_t u64UserTime, u64KernelTime;2176 2177 if ( pVM->uCpuExecutionCap != 1002178 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))2179 { 2180 uint64_t u64TimeNow = RTTimeMilliTS();2181 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)2092 bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu) 2093 { 2094 Assert(pVM->uCpuExecutionCap != 100); 2095 uint64_t cMsUserTime; 2096 uint64_t cMsKernelTime; 2097 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime))) 2098 { 2099 uint64_t const msTimeNow = RTTimeMilliTS(); 2100 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < msTimeNow) 2182 2101 { 2183 2102 /* New time slice. */ 2184 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;2185 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;2103 pVCpu->em.s.u64TimeSliceStart = msTimeNow; 2104 pVCpu->em.s.u64TimeSliceStartExec = cMsKernelTime + cMsUserTime; 2186 2105 pVCpu->em.s.u64TimeSliceExec = 0; 2187 2106 } 2188 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec; 2189 2190 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)); 2191 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100) 2192 return false; 2107 pVCpu->em.s.u64TimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.u64TimeSliceStartExec; 2108 2109 bool const fRet = pVCpu->em.s.u64TimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100; 2110 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, 2111 pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)); 2112 return fRet; 2193 2113 } 2194 2114 return true; … … 2483 2403 case VINF_EM_DBG_BREAKPOINT: 2484 2404 case VINF_EM_DBG_STEP: 2485 if (enmOldState == EMSTATE_RAW) 2486 { 2487 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW)); 2488 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW; 2489 } 2490 else if (enmOldState == EMSTATE_HM) 2405 if (enmOldState == EMSTATE_HM) 2491 2406 { 2492 2407 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM)); … … 2498 2413 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; 2499 2414 } 2500 else if (enmOldState == EMSTATE_RE M)2501 { 2502 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RE M));2503 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RE M;2415 else if (enmOldState == EMSTATE_RECOMPILER) 2416 { 2417 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER)); 2418 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER; 2504 2419 } 2505 2420 else … … 2569 2484 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE) 2570 2485 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT)) 2571 && ( enmNewState == EMSTATE_RAW 2572 || enmNewState == EMSTATE_HM 2486 && ( enmNewState == EMSTATE_HM 2573 2487 || enmNewState == EMSTATE_NEM 2574 || enmNewState == EMSTATE_REM 2575 || enmNewState == EMSTATE_IEM_THEN_REM 2576 || enmNewState == EMSTATE_DEBUG_GUEST_RAW 2488 || enmNewState == EMSTATE_RECOMPILER 2577 2489 || enmNewState == EMSTATE_DEBUG_GUEST_HM 2578 2490 || enmNewState == EMSTATE_DEBUG_GUEST_NEM 2579 2491 || enmNewState == EMSTATE_DEBUG_GUEST_IEM 2580 || enmNewState == EMSTATE_DEBUG_GUEST_REM) ) 2492 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER 2493 /* Obsolete stuff: */ 2494 || enmNewState == EMSTATE_IEM_THEN_REM ) ) 2581 2495 { 2582 2496 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE) … … 2604 2518 { 2605 2519 /* 2606 * Execute raw.2607 */2608 case EMSTATE_RAW:2609 AssertLogRelMsgFailed(("%Rrc\n", rc));2610 rc = VERR_EM_INTERNAL_ERROR;2611 break;2612 2613 /*2614 2520 * Execute hardware accelerated raw. 2615 2521 */ … … 2632 2538 * Execute recompiled. 2633 2539 */ 2634 case EMSTATE_RE M:2635 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);2636 Log2(("EMR3ExecuteVM: emR3Re mExecute -> %Rrc\n", rc));2540 case EMSTATE_RECOMPILER: 2541 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, &fFFDone)); 2542 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc)); 2637 2543 break; 2638 2544 … … 2763 2669 case EMSTATE_DEBUG_GUEST_NEM: 2764 2670 case EMSTATE_DEBUG_GUEST_IEM: 2765 case EMSTATE_DEBUG_GUEST_RE M:2671 case EMSTATE_DEBUG_GUEST_RECOMPILER: 2766 2672 TMR3NotifySuspend(pVM, pVCpu); 2767 2673 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc)); … … 2818 2724 */ 2819 2725 case EMSTATE_NONE: 2726 case EMSTATE_RAW_OBSOLETE: 2820 2727 case EMSTATE_TERMINATING: 2821 2728 default: -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r99220 r99897 58 58 #include <iprt/asm.h> 59 59 60 #include "EMInline.h" 61 60 62 61 63 /********************************************************************************************************************************* -
trunk/src/VBox/VMM/VMMR3/EMR3Dbg.cpp
r99051 r99897 44 44 * Implements the '.alliem' command. } 45 45 */ 46 static DECLCALLBACK(int) e nmR3DbgCmdAllIem(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs)46 static DECLCALLBACK(int) emR3DbgCmdAllIem(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM, PCDBGCVAR paArgs, unsigned cArgs) 47 47 { 48 48 int rc; … … 76 76 { 77 77 { 78 "alliem", 0, 1, &g_BoolArg, 1, 0, e nmR3DbgCmdAllIem, "[boolean]",78 "alliem", 0, 1, &g_BoolArg, 1, 0, emR3DbgCmdAllIem, "[boolean]", 79 79 "Enables or disabled executing ALL code in IEM, if no arguments are given it displays the current status." 80 80 }, -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r99220 r99897 58 58 #include <iprt/asm.h> 59 59 60 #include "EMInline.h" 61 60 62 61 63 /********************************************************************************************************************************* -
trunk/src/VBox/VMM/include/EMInternal.h
r99208 r99897 130 130 /** Whether IEM executes everything. */ 131 131 bool fIemExecutesAll; 132 /** Whether IEM execution (pure) is recompiled (true) or interpreted (false). */ 133 bool fIemRecompiled; 132 134 /** Whether a triple fault triggers a guru. */ 133 135 bool fGuruOnTripleFault; 134 136 /** Alignment padding. */ 135 bool afPadding[2]; 136 137 /** Id of the VCPU that last executed code in the recompiler. */ 138 VMCPUID idLastRemCpu; 137 bool afPadding[5]; 139 138 } EM; 140 139 /** Pointer to EM VM instance data. */ … … 224 223 STAMPROFILEADV StatNEMEntry; 225 224 STAMPROFILE StatNEMExec; 226 STAMPROFILE StatREMEmu;227 225 STAMPROFILE StatREMExec; 228 STAMPROFILE StatREMSync; 229 STAMPROFILEADV StatREMTotal; 230 STAMPROFILE StatRAWExec; 231 STAMPROFILEADV StatRAWEntry; 232 STAMPROFILEADV StatRAWTail; 233 STAMPROFILEADV StatRAWTotal; 226 STAMPROFILE StatREMTotal; 234 227 STAMPROFILEADV StatTotal; 235 228 /** @} */ … … 326 319 VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags); 327 320 328 int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations); 329 330 bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu); 321 bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu); 331 322 332 323 VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu); -
trunk/src/VBox/VMM/testcase/tstAnimate.cpp
r98644 r99897 881 881 if (RT_SUCCESS(rc)) 882 882 { 883 rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_RECOMPILE_RING0, true); AssertReleaseRC(rc); 884 rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_RECOMPILE_RING3, true); AssertReleaseRC(rc); 883 rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_IEM_ALL, true); AssertReleaseRC(rc); 885 884 DBGFR3Info(pUVM, "cpumguest", "verbose", NULL); 886 885 if (fPowerOn)
Note:
See TracChangeset
for help on using the changeset viewer.