Changeset 70979 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Feb 13, 2018 1:38:48 AM (7 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r70954 r70979 173 173 $(if $(VBOX_WITH_RAW_MODE),VMMR3/EMRaw.cpp) \ 174 174 VMMR3/EMHM.cpp \ 175 VMMR3/EMR3Nem.cpp \ 175 176 VMMR3/FTM.cpp \ 176 177 VMMR3/GIM.cpp \ -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r70953 r70979 421 421 422 422 /* these should be considered for release statistics. */ 423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction."); 424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged."); 425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead."); 426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution."); 427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution."); 428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM)."); 429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution."); 430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution."); 431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing."); 432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead."); 433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution."); 434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead."); 423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction."); 424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged."); 425 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead."); 426 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution."); 427 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called."); 428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution."); 429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM)."); 430 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead."); 431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution."); 432 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called."); 433 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution."); 434 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution."); 435 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing."); 436 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead."); 437 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution."); 438 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead."); 435 439 436 440 #endif /* VBOX_WITH_STATISTICS */ 437 441 438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");443 444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");442 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution."); 443 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted)."); 444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep)."); 445 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs)."); 446 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs)."); 447 448 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM."); 445 449 } 446 450 … … 673 677 674 678 /* 675 * Force rescheduling if in RAW, HM, IEM, or REM.679 * Force rescheduling if in RAW, HM, NEM, IEM, or REM. 676 680 */ 677 681 return pVCpu->em.s.enmState == EMSTATE_RAW 678 682 || pVCpu->em.s.enmState == EMSTATE_HM 683 || pVCpu->em.s.enmState == EMSTATE_NEM 679 684 || pVCpu->em.s.enmState == EMSTATE_IEM 680 685 || pVCpu->em.s.enmState == EMSTATE_REM … … 789 794 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION"; 790 795 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM"; 796 case EMSTATE_NEM: return "EMSTATE_NEM"; 797 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM"; 791 798 default: return "Unknown!"; 792 799 } … … 829 836 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM) 830 837 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/); 838 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM) 839 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/)); 831 840 #ifdef VBOX_WITH_REM 832 841 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM) … … 2356 2365 * Reschedule - to raw-mode execution. 2357 2366 */ 2367 /** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */ 2358 2368 case VINF_EM_RESCHEDULE_RAW: 2359 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));2360 2369 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM); 2361 pVCpu->em.s.enmState = EMSTATE_RAW; 2370 if (VM_IS_RAW_MODE_ENABLED(pVM)) 2371 { 2372 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW)); 2373 pVCpu->em.s.enmState = EMSTATE_RAW; 2374 } 2375 else 2376 { 2377 AssertLogRelFailed(); 2378 pVCpu->em.s.enmState = EMSTATE_NONE; 2379 } 2362 2380 break; 2363 2381 2364 2382 /* 2365 * Reschedule - to hardware accelerated raw-mode execution.2383 * Reschedule - to HM or NEM. 2366 2384 */ 2367 2385 case VINF_EM_RESCHEDULE_HM: 2368 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));2369 2386 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM); 2370 2387 Assert(!pVCpu->em.s.fForceRAW); 2371 pVCpu->em.s.enmState = EMSTATE_HM; 2388 if (VM_IS_HM_ENABLED(pVM)) 2389 { 2390 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM)); 2391 pVCpu->em.s.enmState = EMSTATE_HM; 2392 } 2393 else if (VM_IS_NEM_ENABLED(pVM)) 2394 { 2395 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM)); 2396 pVCpu->em.s.enmState = EMSTATE_NEM; 2397 } 2398 else 2399 { 2400 AssertLogRelFailed(); 2401 pVCpu->em.s.enmState = EMSTATE_NONE; 2402 } 2372 2403 break; 2373 2404 … … 2536 2567 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM; 2537 2568 } 2569 else if (enmOldState == EMSTATE_NEM) 2570 { 2571 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM)); 2572 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; 2573 } 2538 2574 else if (enmOldState == EMSTATE_REM) 2539 2575 { … … 2609 2645 && ( enmNewState == EMSTATE_RAW 2610 2646 || enmNewState == EMSTATE_HM 2647 || enmNewState == EMSTATE_NEM 2611 2648 || enmNewState == EMSTATE_REM 2612 2649 || enmNewState == EMSTATE_IEM_THEN_REM 2613 2650 || enmNewState == EMSTATE_DEBUG_GUEST_RAW 2614 2651 || enmNewState == EMSTATE_DEBUG_GUEST_HM 2652 || enmNewState == EMSTATE_DEBUG_GUEST_NEM 2615 2653 || enmNewState == EMSTATE_DEBUG_GUEST_IEM 2616 2654 || enmNewState == EMSTATE_DEBUG_GUEST_REM) ) … … 2656 2694 case EMSTATE_HM: 2657 2695 rc = emR3HmExecute(pVM, pVCpu, &fFFDone); 2696 break; 2697 2698 /* 2699 * Execute hardware accelerated raw. 2700 */ 2701 case EMSTATE_NEM: 2702 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone)); 2658 2703 break; 2659 2704 … … 2769 2814 case EMSTATE_DEBUG_GUEST_RAW: 2770 2815 case EMSTATE_DEBUG_GUEST_HM: 2816 case EMSTATE_DEBUG_GUEST_NEM: 2771 2817 case EMSTATE_DEBUG_GUEST_IEM: 2772 2818 case EMSTATE_DEBUG_GUEST_REM: -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r69111 r70979 284 284 285 285 /** 286 * Process raw-mode specific forced actions. 287 * 288 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending. 286 * Process HM specific forced actions. 287 * 288 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK 289 * or/and VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK are pending. 289 290 * 290 291 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other … … 378 379 *pfFFDone = false; 379 380 380 STAM_COUNTER_INC(&pVCpu->em.s.StatH mExecuteEntry);381 STAM_COUNTER_INC(&pVCpu->em.s.StatHMExecuteCalled); 381 382 382 383 #ifdef EM_NOTIFY_HM … … 389 390 for (;;) 390 391 { 391 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatH mEntry, a);392 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHMEntry, a); 392 393 393 394 /* Check if a forced reschedule is pending. */ … … 420 421 421 422 uint32_t cpl = CPUMGetGuestCPL(pVCpu); 422 423 423 if (pVM->cCpus == 1) 424 424 { … … 444 444 * Execute the code. 445 445 */ 446 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatH mEntry, a);446 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHMEntry, a); 447 447 448 448 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) 449 449 { 450 STAM_PROFILE_START(&pVCpu->em.s.StatH mExec, x);450 STAM_PROFILE_START(&pVCpu->em.s.StatHMExec, x); 451 451 rc = VMMR3HmRunGC(pVM, pVCpu); 452 STAM_PROFILE_STOP(&pVCpu->em.s.StatH mExec, x);452 STAM_PROFILE_STOP(&pVCpu->em.s.StatHMExec, x); 453 453 } 454 454 else -
trunk/src/VBox/VMM/VMMR3/EMR3Nem.cpp
r70947 r70979 1 1 /* $Id$ */ 2 2 /** @file 3 * EM - Execution Monitor / Manager - hardware virtualization3 * EM - Execution Monitor / Manager - NEM interface. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2006-201 7Oracle Corporation7 * Copyright (C) 2006-2018 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 28 28 #include <VBox/vmm/iem.h> 29 29 #include <VBox/vmm/iom.h> 30 #include <VBox/vmm/nem.h> 30 31 #include <VBox/vmm/dbgf.h> 31 32 #include <VBox/vmm/pgm.h> … … 39 40 #include <VBox/vmm/pdmcritsect.h> 40 41 #include <VBox/vmm/pdmqueue.h> 41 #include <VBox/vmm/hm.h>42 42 #include "EMInternal.h" 43 43 #include <VBox/vmm/vm.h> … … 63 63 * Internal Functions * 64 64 *********************************************************************************************************************************/ 65 DECLINLINE(int) emR3 HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);66 static int emR3 HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);67 static int emR3 HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);68 69 #define EMHANDLERC_WITH_ HM70 #define emR3ExecuteInstruction emR3 HmExecuteInstruction71 #define emR3ExecuteIOInstruction emR3 HmExecuteIOInstruction65 DECLINLINE(int) emR3NemExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS); 66 static int emR3NemExecuteIOInstruction(PVM pVM, PVMCPU pVCpu); 67 static int emR3NemForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 68 69 #define EMHANDLERC_WITH_NEM 70 #define emR3ExecuteInstruction emR3NemExecuteInstruction 71 #define emR3ExecuteIOInstruction emR3NemExecuteIOInstruction 72 72 #include "EMHandleRCTmpl.h" 73 73 … … 88 88 * @thread EMT. 89 89 */ 90 V MMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)90 VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) 91 91 { 92 92 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 93 93 Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK)); 94 94 95 if (! HMR3CanExecuteGuest(pVM, pCtx))95 if (!NEMR3CanExecuteGuest(pVM, pVCpu, pCtx)) 96 96 return VINF_EM_RESCHEDULE; 97 97 … … 105 105 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 106 106 { 107 VBOXSTRICTRC rcStrict = emR3 HmForcedActions(pVM, pVCpu, pCtx);107 VBOXSTRICTRC rcStrict = emR3NemForcedActions(pVM, pVCpu, pCtx); 108 108 if (rcStrict != VINF_SUCCESS) 109 109 { 110 Log((" EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));110 Log(("emR3NemSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 111 111 return rcStrict; 112 112 } … … 116 116 * Go execute it. 117 117 */ 118 bool fOld = HMSetSingleInstruction(pVM, pVCpu, true);119 VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);120 HMSetSingleInstruction(pVM, pVCpu, fOld);121 LogFlow((" EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));118 bool fOld = NEMR3SetSingleInstruction(pVM, pVCpu, true); 119 VBOXSTRICTRC rcStrict = NEMR3RunGC(pVM, pVCpu); 120 NEMR3SetSingleInstruction(pVM, pVCpu, fOld); 121 LogFlow(("emR3NemSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 122 122 123 123 /* … … 130 130 { 131 131 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); 132 LogFlow((" EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));132 LogFlow(("emR3NemSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 133 133 } 134 134 135 135 if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST)) 136 136 { 137 rcStrict = emR3 HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict));138 Log((" EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));137 rcStrict = emR3NemHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict)); 138 Log(("emR3NemSingleInstruction: emR3NemHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 139 139 } 140 140 … … 148 148 if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip) 149 149 rcStrict = VINF_EM_DBG_STEPPED; 150 Log((" EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip));150 Log(("emR3NemSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip)); 151 151 return rcStrict; 152 152 } … … 167 167 */ 168 168 #if defined(LOG_ENABLED) || defined(DOXYGEN_RUNNING) 169 static int emR3 HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)169 static int emR3NemExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix) 170 170 #else 171 static int emR3 HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)171 static int emR3NemExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC) 172 172 #endif 173 173 { … … 236 236 * @param rcGC GC return code 237 237 */ 238 DECLINLINE(int) emR3 HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)238 DECLINLINE(int) emR3NemExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC) 239 239 { 240 240 #ifdef LOG_ENABLED 241 return emR3 HmExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);241 return emR3NemExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix); 242 242 #else 243 243 RT_NOREF_PV(pszPrefix); 244 return emR3 HmExecuteInstructionWorker(pVM, pVCpu, rcGC);244 return emR3NemExecuteInstructionWorker(pVM, pVCpu, rcGC); 245 245 #endif 246 246 } … … 253 253 * @param pVCpu The cross context virtual CPU structure. 254 254 */ 255 static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu) 256 { 255 static int emR3NemExecuteIOInstruction(PVM pVM, PVMCPU pVCpu) 256 { 257 RT_NOREF_PV(pVM); 258 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a); 259 260 #if 0 257 261 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 258 259 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);260 262 261 263 /* … … 271 273 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), 272 274 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict)); 275 #endif 273 276 274 277 /* 275 278 * Hand it over to the interpreter. 276 279 */ 277 rcStrict = IEMExecOne(pVCpu);278 LogFlow(("emR3 HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));280 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu); 281 LogFlow(("emR3NemExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 279 282 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem); 283 280 284 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a); 281 285 return VBOXSTRICTRC_TODO(rcStrict); … … 284 288 285 289 /** 286 * Process raw-mode specific forced actions. 287 * 288 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending. 290 * Process NEM specific forced actions. 291 * 292 * This function is called when any FFs in VM_FF_HIGH_PRIORITY_PRE_RAW_MASK 293 * or/and VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK are pending. 289 294 * 290 295 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other … … 294 299 * @param pCtx Pointer to the guest CPU context. 295 300 */ 296 static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 297 { 298 /* 299 * Sync page directory. 301 static int emR3NemForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 302 { 303 #ifdef VBOX_WITH_RAW_MODE 304 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 305 #endif 306 307 /* 308 * Sync page directory should not happen in NEM mode. 300 309 */ 301 310 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 302 311 { 303 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 304 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 305 if (RT_FAILURE(rc)) 306 return rc; 307 308 #ifdef VBOX_WITH_RAW_MODE 309 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 310 #endif 311 312 /* Prefetch pages for EIP and ESP. */ 313 /** @todo This is rather expensive. Should investigate if it really helps at all. */ 314 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip)); 315 if (rc == VINF_SUCCESS) 316 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp)); 317 if (rc != VINF_SUCCESS) 318 { 319 if (rc != VINF_PGM_SYNC_CR3) 320 { 321 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS); 322 return rc; 323 } 324 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 325 if (RT_FAILURE(rc)) 326 return rc; 327 } 328 /** @todo maybe prefetch the supervisor stack page as well */ 329 #ifdef VBOX_WITH_RAW_MODE 330 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 331 #endif 312 Log(("NEM: TODO: Make VMCPU_FF_PGM_SYNC_CR3 / VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL quiet! (%#x)\n", pVCpu->fLocalForcedActions)); 313 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL); 332 314 } 333 315 … … 352 334 return VINF_EM_NO_MEMORY; 353 335 336 RT_NOREF_PV(pCtx); 354 337 return VINF_SUCCESS; 355 338 } … … 370 353 * FFs were done before returning. 371 354 */ 372 int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)373 { 374 int rc= VERR_IPE_UNINITIALIZED_STATUS;375 PCPUMCTX pCtx= pVCpu->em.s.pCtx;376 377 LogFlow(("emR3 HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));355 VBOXSTRICTRC emR3NemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) 356 { 357 VBOXSTRICTRC rcStrict = VERR_IPE_UNINITIALIZED_STATUS; 358 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 359 360 LogFlow(("emR3NemExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); 378 361 *pfFFDone = false; 379 362 380 STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry); 381 382 #ifdef EM_NOTIFY_HM 383 HMR3NotifyScheduled(pVCpu); 384 #endif 363 STAM_COUNTER_INC(&pVCpu->em.s.StatNEMExecuteCalled); 385 364 386 365 /* … … 389 368 for (;;) 390 369 { 391 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a); 392 370 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatNEMEntry, a); 371 372 #if 0 393 373 /* Check if a forced reschedule is pending. */ 394 if ( HMR3IsRescheduleRequired(pVM, pCtx))395 { 396 rc = VINF_EM_RESCHEDULE;374 if (NEMR3IsRescheduleRequired(pVM, pCtx)) 375 { 376 rcStrict = VINF_EM_RESCHEDULE; 397 377 break; 398 378 } 379 #endif 399 380 400 381 /* 401 382 * Process high priority pre-execution raw-mode FFs. 402 383 */ 403 #ifdef VBOX_WITH_RAW_MODE 404 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 405 #endif 406 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 407 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 408 { 409 rc = emR3HmForcedActions(pVM, pVCpu, pCtx); 410 if (rc != VINF_SUCCESS) 384 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 385 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) 386 { 387 rcStrict = emR3NemForcedActions(pVM, pVCpu, pCtx); 388 if (rcStrict != VINF_SUCCESS) 411 389 break; 412 390 } … … 420 398 421 399 uint32_t cpl = CPUMGetGuestCPL(pVCpu); 422 423 400 if (pVM->cCpus == 1) 424 401 { 425 402 if (pCtx->eflags.Bits.u1VM) 426 Log((" HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));403 Log(("NEMV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); 427 404 else if (CPUMIsGuestIn64BitCodeEx(pCtx)) 428 Log((" HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));405 Log(("NEMR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 429 406 else 430 Log((" HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));407 Log(("NEMR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 431 408 } 432 409 else 433 410 { 434 411 if (pCtx->eflags.Bits.u1VM) 435 Log((" HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));412 Log(("NEMV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF)); 436 413 else if (CPUMIsGuestIn64BitCodeEx(pCtx)) 437 Log((" HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));414 Log(("NEMR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 438 415 else 439 Log((" HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));416 Log(("NEMR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); 440 417 } 441 418 #endif /* LOG_ENABLED */ … … 444 421 * Execute the code. 445 422 */ 446 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.Stat HmEntry, a);423 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatNEMEntry, a); 447 424 448 425 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) 449 426 { 450 STAM_PROFILE_START(&pVCpu->em.s.Stat HmExec, x);451 rc = VMMR3HmRunGC(pVM, pVCpu);452 STAM_PROFILE_STOP(&pVCpu->em.s.Stat HmExec, x);427 STAM_PROFILE_START(&pVCpu->em.s.StatNEMExec, x); 428 rcStrict = NEMR3RunGC(pVM, pVCpu); 429 STAM_PROFILE_STOP(&pVCpu->em.s.StatNEMExec, x); 453 430 } 454 431 else … … 458 435 RTThreadSleep(5); 459 436 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u); 460 rc = VINF_SUCCESS;437 rcStrict = VINF_SUCCESS; 461 438 } 462 439 … … 468 445 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) 469 446 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) 470 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);447 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); 471 448 472 449 /* 473 450 * Process the returned status code. 474 451 */ 475 if (rc >= VINF_EM_FIRST && rc<= VINF_EM_LAST)452 if (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) 476 453 break; 477 454 478 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);479 if (rc != VINF_SUCCESS)455 rcStrict = emR3NemHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict)); 456 if (rcStrict != VINF_SUCCESS) 480 457 break; 481 458 … … 489 466 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK)) 490 467 { 491 rc = emR3ForcedActions(pVM, pVCpu, rc);492 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc );493 if ( rc!= VINF_SUCCESS494 && rc!= VINF_EM_RESCHEDULE_HM)468 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); 469 VBOXVMM_EM_FF_ALL_RET(pVCpu, rcStrict); 470 if ( rcStrict != VINF_SUCCESS 471 && rcStrict != VINF_EM_RESCHEDULE_HM) 495 472 { 496 473 *pfFFDone = true; … … 506 483 RTLogFlush(NULL); 507 484 #endif 508 return rc ;509 } 510 485 return rcStrict; 486 } 487 -
trunk/src/VBox/VMM/VMMR3/NEMR3.cpp
r70977 r70979 236 236 237 237 238 VMMR3_INT_DECL(VBOXSTRICTRC) NEMR3RunGC(PVM pVM, PVMCPU pVCpu) 239 { 240 Assert(VM_IS_NEM_ENABLED(pVM)); 241 return nemR3NativeRunGC(pVM, pVCpu); 242 } 243 244 245 VMMR3_INT_DECL(bool) NEMR3CanExecuteGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 246 { 247 Assert(VM_IS_NEM_ENABLED(pVM)); 248 return nemR3NativeCanExecuteGuest(pVM, pVCpu, pCtx); 249 } 250 251 252 VMMR3_INT_DECL(bool) NEMR3SetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable) 253 { 254 Assert(VM_IS_NEM_ENABLED(pVM)); 255 return nemR3NativeSetSingleInstruction(pVM, pVCpu, fEnable); 256 } 257 258 259 238 260 VMMR3_INT_DECL(int) NEMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb) 239 261 { -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-win.cpp
r70977 r70979 869 869 870 870 871 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 872 { 873 NOREF(pVM); NOREF(pVCpu); 874 return VERR_INTERNAL_ERROR_2; 875 } 876 877 878 bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 879 { 880 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx); 881 return true; 882 } 883 884 885 bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable) 886 { 887 NOREF(pVM); NOREF(pVCpu); NOREF(fEnable); 888 return false; 889 } 890 871 891 872 892 DECLINLINE(int) nemR3NativeGCPhys2R3PtrReadOnly(PVM pVM, RTGCPHYS GCPhys, const void **ppv) -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r70780 r70979 1 1 /* $Id$ */ 2 2 /** @file 3 * EM - emR3[Raw|Hm ]HandleRC template.3 * EM - emR3[Raw|Hm|Nem]HandleRC template. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2006-201 7Oracle Corporation7 * Copyright (C) 2006-2018 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 19 19 #define ___EMHandleRCTmpl_h 20 20 21 #if defined(EMHANDLERC_WITH_PATM) && defined(EMHANDLERC_WITH_HM)22 # error " Only one define"21 #if defined(EMHANDLERC_WITH_PATM) + defined(EMHANDLERC_WITH_HM) + defined(EMHANDLERC_WITH_NEM) != 1 22 # error "Exactly one of these must be defined: EMHANDLERC_WITH_PATM, EMHANDLERC_WITH_HM, EMHANDLERC_WITH_NEM" 23 23 #endif 24 24 … … 42 42 #elif defined(EMHANDLERC_WITH_HM) || defined(DOXYGEN_RUNNING) 43 43 int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 44 #elif defined(EMHANDLERC_WITH_NEM) 45 int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 44 46 #endif 45 47 { … … 129 131 #endif /* EMHANDLERC_WITH_PATM */ 130 132 133 #ifndef EMHANDLERC_WITH_NEM 131 134 /* 132 135 * Conflict or out of page tables. … … 168 171 AssertMsg(RT_FAILURE(rc) || (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST), ("%Rrc\n", rc)); 169 172 break; 173 #endif /* !EMHANDLERC_WITH_NEM */ 170 174 171 175 #ifdef EMHANDLERC_WITH_PATM … … 250 254 break; 251 255 } 256 #endif 257 /** @todo IEM/REM need to handle VMCALL/VMMCALL, see 258 * @bugref{7270#c168}. */ 259 uint8_t cbInstr = 0; 260 VBOXSTRICTRC rcStrict = GIMExecHypercallInstr(pVCpu, pCtx, &cbInstr); 261 if (rcStrict == VINF_SUCCESS) 262 { 263 Assert(cbInstr); 264 pCtx->rip += cbInstr; 265 /* Update interrupt inhibition. */ 266 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 267 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 268 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 269 rc = VINF_SUCCESS; 270 } 271 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING) 272 rc = VINF_SUCCESS; 252 273 else 253 #endif 254 { 255 /** @todo IEM/REM need to handle VMCALL/VMMCALL, see 256 * @bugref{7270#c168}. */ 257 uint8_t cbInstr = 0; 258 VBOXSTRICTRC rcStrict = GIMExecHypercallInstr(pVCpu, pCtx, &cbInstr); 259 if (rcStrict == VINF_SUCCESS) 260 { 261 Assert(cbInstr); 262 pCtx->rip += cbInstr; 263 /* Update interrupt inhibition. */ 264 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 265 && pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 266 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 267 rc = VINF_SUCCESS; 268 } 269 else if (rcStrict == VINF_GIM_HYPERCALL_CONTINUING) 270 rc = VINF_SUCCESS; 271 else 272 { 273 Assert(rcStrict != VINF_GIM_R3_HYPERCALL); 274 rc = VBOXSTRICTRC_VAL(rcStrict); 275 } 276 break; 277 } 274 { 275 Assert(rcStrict != VINF_GIM_R3_HYPERCALL); 276 rc = VBOXSTRICTRC_VAL(rcStrict); 277 } 278 break; 278 279 } 279 280 -
trunk/src/VBox/VMM/include/EMInternal.h
r69111 r70979 407 407 STAMPROFILE StatHalted; 408 408 STAMPROFILEADV StatCapped; 409 STAMPROFILEADV StatH mEntry;410 STAMPROFILE StatH mExec;409 STAMPROFILEADV StatHMEntry; 410 STAMPROFILE StatHMExec; 411 411 STAMPROFILE StatIEMEmu; 412 412 STAMPROFILE StatIEMThenREM; 413 STAMPROFILEADV StatNEMEntry; 414 STAMPROFILE StatNEMExec; 413 415 STAMPROFILE StatREMEmu; 414 416 STAMPROFILE StatREMExec; … … 426 428 /** R3: Profiling of emR3RawPrivileged. */ 427 429 STAMPROFILE StatPrivEmu; 428 /** R3: Number of time emR3HmExecute is called. */ 429 STAMCOUNTER StatHmExecuteEntry; 430 /** R3: Number of times emR3HmExecute is called. */ 431 STAMCOUNTER StatHMExecuteCalled; 432 /** R3: Number of times emR3NEMExecute is called. */ 433 STAMCOUNTER StatNEMExecuteCalled; 430 434 431 435 /** More statistics (R3). */ … … 454 458 int emR3InitDbg(PVM pVM); 455 459 456 int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 457 int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 460 int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 461 VBOXSTRICTRC emR3NemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 462 int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone); 463 458 464 int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 459 465 int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 466 int emR3NemHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 467 460 468 EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 461 469 int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 462 470 int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc); 471 463 472 int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc); 464 473 int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu); 465 474 int emR3RawStep(PVM pVM, PVMCPU pVCpu); 475 476 VBOXSTRICTRC emR3NemSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags); 477 466 478 int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations); 479 467 480 bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu); 468 481 -
trunk/src/VBox/VMM/include/NEMInternal.h
r70977 r70979 115 115 void nemR3NativeReset(PVM pVM); 116 116 void nemR3NativeResetCpu(PVMCPU pVCpu); 117 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu); 118 bool nemR3NativeCanExecuteGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 119 bool nemR3NativeSetSingleInstruction(PVM pVM, PVMCPU pVCpu, bool fEnable); 120 117 121 int nemR3NativeNotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb); 118 122 int nemR3NativeNotifyPhysMmioExMap(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, uint32_t fFlags, void *pvMmio2);
Note:
See TracChangeset
for help on using the changeset viewer.