Changeset 72642 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 21, 2018 3:41:14 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123148
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 1 added
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r72503 r72642 690 690 VMMR0/GMMR0.cpp \ 691 691 VMMR0/GVMMR0.cpp \ 692 VMMR0/EMR0.cpp \ 692 693 VMMR0/HMR0.cpp \ 693 694 VMMR0/HMR0A.asm \ -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r72636 r72642 438 438 } 439 439 440 #ifndef IN_RC 440 441 441 442 /** … … 601 602 PEMEXITENTRY pHistEntry, uint64_t uExitNo) 602 603 { 604 # ifdef IN_RING0 605 /* Disregard the preempt disabled flag. */ 606 uFlagsAndType &= ~EMEXIT_F_PREEMPT_DISABLED; 607 # endif 608 603 609 /* 604 610 * Work the hash table. 605 611 */ 606 612 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitRecords) == 1024); 607 # define EM_EXIT_RECORDS_IDX_MASK 0x3ff613 # define EM_EXIT_RECORDS_IDX_MASK 0x3ff 608 614 uintptr_t idxSlot = ((uintptr_t)uFlatPC >> 1) & EM_EXIT_RECORDS_IDX_MASK; 609 615 PEMEXITREC pExitRec = &pVCpu->em.s.aExitRecords[idxSlot]; … … 733 739 } 734 740 741 #endif /* !IN_RC */ 735 742 736 743 /** … … 761 768 pHistEntry->idxSlot = UINT32_MAX; 762 769 770 #ifndef IN_RC 763 771 /* 764 772 * If common exit type, we will insert/update the exit into the exit record hash table. 765 773 */ 766 774 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 775 # ifdef IN_RING0 776 && pVCpu->em.s.fExitOptimizationEnabledR0 777 && ( !(uFlagsAndType & EMEXIT_F_PREEMPT_DISABLED) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 778 # else 767 779 && pVCpu->em.s.fExitOptimizationEnabled 768 && uFlatPC != UINT64_MAX) 780 # endif 781 && uFlatPC != UINT64_MAX 782 ) 769 783 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 784 #endif 770 785 return NULL; 771 786 } … … 842 857 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)); 843 858 859 #ifndef IN_RC 844 860 /* 845 861 * If common exit type, we will insert/update the exit into the exit record hash table. 846 862 */ 847 863 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 864 # ifdef IN_RING0 865 && pVCpu->em.s.fExitOptimizationEnabledR0 866 && ( !(uFlagsAndType & EMEXIT_F_PREEMPT_DISABLED) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 867 # else 848 868 && pVCpu->em.s.fExitOptimizationEnabled 849 && pHistEntry->uFlatPC != UINT64_MAX) 869 # endif 870 && pHistEntry->uFlatPC != UINT64_MAX 871 ) 850 872 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo); 873 #endif 851 874 return NULL; 852 875 } … … 879 902 pHistEntry->uFlatPC = uFlatPC; 880 903 904 #ifndef IN_RC 881 905 /* 882 906 * If common exit type, we will insert/update the exit into the exit record hash table. 883 907 */ 884 908 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 885 && pVCpu->em.s.fExitOptimizationEnabled) 909 # ifdef IN_RING0 910 && pVCpu->em.s.fExitOptimizationEnabledR0 911 && ( !(uFlagsAndType & EMEXIT_F_PREEMPT_DISABLED) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 912 # else 913 && pVCpu->em.s.fExitOptimizationEnabled 914 # endif 915 ) 886 916 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 917 #endif 887 918 return NULL; 888 919 } -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72607 r72642 14193 14193 #endif 14194 14194 { 14195 uint32_t cInstructionSinceLastExit = 0; 14195 #ifdef IN_RING0 14196 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD); 14197 #endif 14198 uint32_t cInstructionSinceLastExit = 0; 14196 14199 14197 14200 /* … … 14258 14261 { 14259 14262 #ifdef IN_RING0 14260 if (!RTThreadPreemptIsPending(NIL_RTTHREAD)) 14263 if ( !fCheckPreemptionPending 14264 || !RTThreadPreemptIsPending(NIL_RTTHREAD)) 14261 14265 #endif 14262 14266 { -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72639 r72642 11854 11854 11855 11855 VBOXSTRICTRC rcStrict; 11856 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_CPUID), 11857 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 11856 PCEMEXITREC pExitRec; 11857 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 11858 EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 11859 EMEXITTYPE_CPUID), 11860 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 11858 11861 if (!pExitRec) 11859 11862 { … … 12825 12828 !fIOString 12826 12829 ? !fIOWrite 12827 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_READ) 12828 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_WRITE) 12830 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 12831 EMEXITTYPE_IO_PORT_READ) 12832 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 12833 EMEXITTYPE_IO_PORT_WRITE) 12829 12834 : !fIOWrite 12830 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_READ) 12831 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_IO_PORT_STR_WRITE), 12835 ? EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 12836 EMEXITTYPE_IO_PORT_STR_READ) 12837 : EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 12838 EMEXITTYPE_IO_PORT_STR_WRITE), 12832 12839 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 12833 12840 if (!pExitRec) … … 13318 13325 13319 13326 VBOXSTRICTRC rcStrict; 13320 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM, EMEXITTYPE_MMIO), 13321 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13327 PCEMEXITREC pExitRec; 13328 pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, 13329 EMEXIT_MAKE_FLAGS_AND_TYPE(EMEXIT_F_KIND_EM | EMEXIT_F_PREEMPT_DISABLED, 13330 EMEXITTYPE_MMIO), 13331 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base); 13322 13332 if (!pExitRec) 13323 13333 { -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r72617 r72642 30 30 # include <VBox/vmm/nem.h> 31 31 #endif 32 #include <VBox/vmm/em.h> 32 33 #include <VBox/vmm/stam.h> 33 34 #include <VBox/vmm/tm.h> … … 470 471 { 471 472 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 472 #ifdef VBOX_WITH_PCI_PASSTHROUGH 473 rc = PciRawR0InitVM(pGVM, pVM); 474 #endif 473 rc = EMR0InitVM(pGVM, pVM); 475 474 if (RT_SUCCESS(rc)) 476 475 { 477 476 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 478 rc = GIMR0InitVM(pVM); 477 #ifdef VBOX_WITH_PCI_PASSTHROUGH 478 rc = PciRawR0InitVM(pGVM, pVM); 479 #endif 479 480 if (RT_SUCCESS(rc)) 480 481 { 481 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); 482 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 483 rc = GIMR0InitVM(pVM); 482 484 if (RT_SUCCESS(rc)) 483 485 { 484 GVMMR0DoneInitVM(pGVM); 485 486 /* 487 * Collect a bit of info for the VM release log. 488 */ 489 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty(); 490 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();; 491 492 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 493 return rc; 486 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); 487 if (RT_SUCCESS(rc)) 488 { 489 GVMMR0DoneInitVM(pGVM); 490 491 /* 492 * Collect a bit of info for the VM release log. 493 */ 494 pVM->vmm.s.fIsPreemptPendingApiTrusty = RTThreadPreemptIsPendingTrusty(); 495 pVM->vmm.s.fIsPreemptPossible = RTThreadPreemptIsPossible();; 496 497 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING); 498 return rc; 499 } 500 501 /* bail out*/ 502 GIMR0TermVM(pVM); 494 503 } 495 496 /* bail out*/497 GIMR0TermVM(pVM); 504 #ifdef VBOX_WITH_PCI_PASSTHROUGH 505 PciRawR0TermVM(pGVM, pVM); 506 #endif 498 507 } 499 #ifdef VBOX_WITH_PCI_PASSTHROUGH500 PciRawR0TermVM(pGVM, pVM);501 #endif502 508 } 503 509 } -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r72634 r72642 149 149 } 150 150 151 /** 152 * @cfgm{/EM/ExitOptimizationEnabled, bool, true for NEM otherwise false} 153 * Whether to try correlate exit history, detect hot spots and try optimize 154 * these using IEM if there are other exits close by. 155 * @todo enable for HM too. 156 */ 151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", 152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault)); 153 154 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true} 155 * Whether to try correlate exit history in any context, detect hot spots and 156 * try optimize these using IEM if there are other exits close by. This 157 * overrides the context specific settings. */ 157 158 bool fExitOptimizationEnabled = true; 158 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, VM_IS_NEM_ENABLED(pVM));159 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true); 159 160 AssertLogRelRCReturn(rc, rc); 160 161 162 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true} 163 * Whether to optimize exits in ring-0. Setting this to false will also disable 164 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption 165 * capabilities of the host kernel, this optimization may be unavailable. */ 166 bool fExitOptimizationEnabledR0 = true; 167 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true); 168 AssertLogRelRCReturn(rc, rc); 169 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled; 170 171 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false} 172 * Whether to optimize exits in ring-0 when preemption is disable (or preemption 173 * hooks are in effect). */ 174 /** @todo change the default to true here */ 175 bool fExitOptimizationEnabledR0PreemptDisabled = true; 176 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false); 177 AssertLogRelRCReturn(rc, rc); 178 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0; 179 161 180 for (VMCPUID i = 0; i < pVM->cCpus; i++) 162 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled; 163 164 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool fExitOptimizationEnabled=%RTbool\n", 165 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault, fExitOptimizationEnabled)); 181 { 182 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled; 183 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0; 184 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled; 185 } 166 186 167 187 #ifdef VBOX_WITH_REM … … 502 522 503 523 /** 524 * Called when a VM initialization stage is completed. 525 * 526 * @returns VBox status code. 527 * @param pVM The cross context VM structure. 528 * @param enmWhat The initialization state that was completed. 529 */ 530 VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat) 531 { 532 if (enmWhat == VMINITCOMPLETED_RING0) 533 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n", 534 pVM->aCpus[0].em.s.fExitOptimizationEnabled, pVM->aCpus[0].em.s.fExitOptimizationEnabledR0, 535 pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled)); 536 return VINF_SUCCESS; 537 } 538 539 540 /** 504 541 * Applies relocations to data and code managed by this 505 542 * component. This function will be called at init and -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r72343 r72642 1197 1197 if (RT_SUCCESS(rc)) 1198 1198 rc = CPUMR3InitCompleted(pVM, enmWhat); 1199 if (RT_SUCCESS(rc)) 1200 rc = EMR3InitCompleted(pVM, enmWhat); 1199 1201 if (enmWhat == VMINITCOMPLETED_RING3) 1200 1202 { -
trunk/src/VBox/VMM/include/EMInternal.h
r72634 r72642 503 503 * wrapped around or not. */ 504 504 uint64_t iNextExit; 505 /** Whether exit optimizations are enabled or not. */ 506 bool fExitOptimizationEnabled; 505 /** Whether exit optimizations are enabled or not (in general). */ 506 bool fExitOptimizationEnabled : 1; 507 /** Whether exit optimizations are enabled for ring-0 (in general). */ 508 bool fExitOptimizationEnabledR0 : 1; 509 /** Whether exit optimizations are enabled for ring-0 when preemption is disabled. */ 510 bool fExitOptimizationEnabledR0PreemptDisabled : 1; 507 511 /** Explicit padding. */ 508 512 bool afPadding2[1];
Note:
See TracChangeset
for help on using the changeset viewer.