Changeset 80024 in vbox
- Timestamp:
- Jul 28, 2019 1:30:53 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 132459
- Location:
- trunk
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/em.h
r78712 r80024 128 128 /** @} */ 129 129 130 131 /**132 * Checks if raw ring-3 execute mode is enabled.133 *134 * @returns true if enabled.135 * @returns false if disabled.136 * @param pVM The cross context VM structure.137 */138 #define EMIsRawRing3Enabled(pVM) (!(pVM)->fRecompileUser)139 140 /**141 * Checks if raw ring-0 execute mode is enabled.142 *143 * @returns true if enabled.144 * @returns false if disabled.145 * @param pVM The cross context VM structure.146 */147 #define EMIsRawRing0Enabled(pVM) (!(pVM)->fRecompileSupervisor)148 149 #ifdef VBOX_WITH_RAW_RING1150 /**151 * Checks if raw ring-1 execute mode is enabled.152 *153 * @returns true if enabled.154 * @returns false if disabled.155 * @param pVM The cross context VM structure.156 */157 # define EMIsRawRing1Enabled(pVM) ((pVM)->fRawRing1Enabled)158 #else159 # define EMIsRawRing1Enabled(pVM) false160 #endif161 162 /**163 * Checks if execution with hardware assisted virtualization is enabled.164 *165 * @returns true if enabled.166 * @returns false if disabled.167 * @param pVM The cross context VM structure.168 */169 #define EMIsHwVirtExecutionEnabled(pVM) (!(pVM)->fRecompileSupervisor && !(pVM)->fRecompileUser)170 171 /**172 * Checks if execution of supervisor code should be done in the173 * recompiler or not.174 *175 * @returns true if enabled.176 * @returns false if disabled.177 * @param pVM The cross context VM structure.178 */179 #define EMIsSupervisorCodeRecompiled(pVM) ((pVM)->fRecompileSupervisor)180 181 130 VMMDECL(void) EMSetInhibitInterruptsPC(PVMCPU pVCpu, RTGCUINTPTR PC); 182 131 VMMDECL(RTGCUINTPTR) EMGetInhibitInterruptsPC(PVMCPU pVCpu); -
trunk/include/VBox/vmm/vm.h
r80020 r80024 1183 1183 uint8_t const bMainExecutionEngine; 1184 1184 1185 /** Whether to recompile user mode code or run it raw/hm/nem.1186 * In non-raw-mode both fRecompileUser and fRecompileSupervisor must be set1187 * to recompiler stuff. */1188 bool fRecompileUser;1189 /** Whether to recompile supervisor mode code or run it raw/hm/nem.1190 * In non-raw-mode both fRecompileUser and fRecompileSupervisor must be set1191 * to recompiler stuff. */1192 bool fRecompileSupervisor;1193 /** Whether raw mode supports ring-1 code or not.1194 * This will be cleared when not in raw-mode. */1195 bool fRawRing1Enabled;1196 /** PATM enabled flag.1197 * This is placed here for performance reasons.1198 * This will be cleared when not in raw-mode. */1199 bool fPATMEnabled;1200 /** CSAM enabled flag.1201 * This is placed here for performance reasons.1202 * This will be cleared when not in raw-mode. */1203 bool fCSAMEnabled;1204 1205 1185 /** Hardware VM support is available and enabled. 1206 1186 * Determined very early during init. … … 1223 1203 1224 1204 /** Alignment padding. */ 1225 uint8_t uPadding1[ 2];1205 uint8_t uPadding1[7]; 1226 1206 1227 1207 /** @name Debugging -
trunk/include/VBox/vmm/vm.mac
r79995 r80024 109 109 %endif 110 110 .bMainExecutionEngine resb 1 111 .fRecompileUser resb 1112 .fRecompileSupervisor resb 1113 .fRawRing1Enabled resb 1114 .fPATMEnabled resb 1115 .fCSAMEnabled resb 1116 111 .fHMEnabled resb 1 117 112 .fHMNeedRawModeCtx resb 1 … … 119 114 .fUseLargePages resb 1 120 115 121 .uPadding1 resb 2116 .uPadding1 resb 7 122 117 123 118 .hTraceBufR3 RTR3PTR_RES 1 -
trunk/src/VBox/Runtime/testcase/tstLdrObj.cpp
r76553 r80024 57 57 extern "C" DECLEXPORT(int) Entrypoint(void) 58 58 { 59 g_VM.fRecompileSupervisor = false;60 g_VM.fRecompileUser = false;61 59 g_VM.fGlobalForcedActions = 0; 62 60 strcpy(achBss, szStr2); -
trunk/src/VBox/VMM/Config.kmk
r78438 r80024 67 67 VMM_COMMON_DEFS += VBOX_WITH_SAFE_STR 68 68 endif 69 ifdef VBOX_WITH_RAW_RING170 VMM_COMMON_DEFS += VBOX_WITH_RAW_RING171 endif72 69 ifdef VBOX_WITH_64ON32_IDT 73 70 VMM_COMMON_DEFS += VBOX_WITH_64ON32_IDT -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r80020 r80024 2116 2116 else 2117 2117 { 2118 # ifdef VBOX_WITH_RAW_RING12119 if ( EMIsRawRing1Enabled(pVM)2120 && !pCtx->eflags.Bits.u1VM2121 && (pCtx->ss.Sel & X86_SEL_RPL) == 1)2122 {2123 /* Set CPL to Ring-2. */2124 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 2;2125 if (pCtx->cs.Sel && (pCtx->cs.Sel & X86_SEL_RPL) == 1)2126 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 2;2127 }2128 # else2129 2118 AssertMsg((pCtx->ss.Sel & X86_SEL_RPL) >= 2 || pCtx->eflags.Bits.u1VM, 2130 2119 ("ring-1 code not supported\n")); 2131 # endif 2120 2132 2121 /* 2133 2122 * PATM takes care of IOPL and IF flags for Ring-3 and Ring-2 code as well. … … 2214 2203 if (!pCtx->eflags.Bits.u1VM) 2215 2204 { 2216 # ifdef VBOX_WITH_RAW_RING1 2217 if ( EMIsRawRing1Enabled(pVM) 2218 && (pCtx->ss.Sel & X86_SEL_RPL) == 2) 2219 { 2220 /* Not quite sure if this is really required, but shouldn't harm (too much anyways). */ 2221 /** @todo See what happens if we remove this. */ 2222 if ((pCtx->ds.Sel & X86_SEL_RPL) == 2) 2223 pCtx->ds.Sel = (pCtx->ds.Sel & ~X86_SEL_RPL) | 1; 2224 if ((pCtx->es.Sel & X86_SEL_RPL) == 2) 2225 pCtx->es.Sel = (pCtx->es.Sel & ~X86_SEL_RPL) | 1; 2226 if ((pCtx->fs.Sel & X86_SEL_RPL) == 2) 2227 pCtx->fs.Sel = (pCtx->fs.Sel & ~X86_SEL_RPL) | 1; 2228 if ((pCtx->gs.Sel & X86_SEL_RPL) == 2) 2229 pCtx->gs.Sel = (pCtx->gs.Sel & ~X86_SEL_RPL) | 1; 2230 2231 /* 2232 * Ring-2 selector => Ring-1. 2233 */ 2234 pCtx->ss.Sel = (pCtx->ss.Sel & ~X86_SEL_RPL) | 1; 2235 if ((pCtx->cs.Sel & X86_SEL_RPL) == 2) 2236 pCtx->cs.Sel = (pCtx->cs.Sel & ~X86_SEL_RPL) | 1; 2237 } 2238 else 2239 { 2240 # endif 2241 /** @todo See what happens if we remove this. */ 2242 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1) 2243 pCtx->ds.Sel &= ~X86_SEL_RPL; 2244 if ((pCtx->es.Sel & X86_SEL_RPL) == 1) 2245 pCtx->es.Sel &= ~X86_SEL_RPL; 2246 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1) 2247 pCtx->fs.Sel &= ~X86_SEL_RPL; 2248 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1) 2249 pCtx->gs.Sel &= ~X86_SEL_RPL; 2250 # ifdef VBOX_WITH_RAW_RING1 2251 } 2252 # endif 2205 /** @todo See what happens if we remove this. */ 2206 if ((pCtx->ds.Sel & X86_SEL_RPL) == 1) 2207 pCtx->ds.Sel &= ~X86_SEL_RPL; 2208 if ((pCtx->es.Sel & X86_SEL_RPL) == 1) 2209 pCtx->es.Sel &= ~X86_SEL_RPL; 2210 if ((pCtx->fs.Sel & X86_SEL_RPL) == 1) 2211 pCtx->fs.Sel &= ~X86_SEL_RPL; 2212 if ((pCtx->gs.Sel & X86_SEL_RPL) == 1) 2213 pCtx->gs.Sel &= ~X86_SEL_RPL; 2253 2214 } 2254 2215 } … … 2512 2473 if (pVCpu->cpum.s.fRawEntered) 2513 2474 { 2514 if ( uCpl == 2 2515 && EMIsRawRing1Enabled(pVCpu->CTX_SUFF(pVM))) 2516 uCpl = 1; 2517 else if (uCpl == 1) 2475 if (uCpl == 1) 2518 2476 uCpl = 0; 2519 2477 } -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r80020 r80024 3240 3240 return iemRaiseGeneralProtectionFaultBySelector(pVCpu, uNewCs); 3241 3241 } 3242 3243 #ifdef VBOX_WITH_RAW_MODE_NOT_R03244 /* Raw ring-0 and ring-1 compression adjustments for PATM performance tricks and other CS leaks. */3245 PVM pVM = pVCpu->CTX_SUFF(pVM);3246 if (EMIsRawRing0Enabled(pVM) && VM_IS_RAW_MODE_ENABLED(pVM))3247 {3248 if ((uNewCs & X86_SEL_RPL) == 1)3249 {3250 if ( pVCpu->iem.s.uCpl == 03251 && ( !EMIsRawRing1Enabled(pVM)3252 || pVCpu->cpum.GstCtx.cs.Sel == (uNewCs & X86_SEL_MASK_OFF_RPL)) )3253 {3254 Log(("iret: Ring-0 compression fix: uNewCS=%#x -> %#x\n", uNewCs, uNewCs & X86_SEL_MASK_OFF_RPL));3255 uNewCs &= X86_SEL_MASK_OFF_RPL;3256 }3257 # ifdef LOG_ENABLED3258 else if (pVCpu->iem.s.uCpl <= 1 && EMIsRawRing1Enabled(pVM))3259 Log(("iret: uNewCs=%#x genuine return to ring-1.\n", uNewCs));3260 # endif3261 }3262 else if ( (uNewCs & X86_SEL_RPL) == 23263 && EMIsRawRing1Enabled(pVM)3264 && pVCpu->iem.s.uCpl <= 1)3265 {3266 Log(("iret: Ring-1 compression fix: uNewCS=%#x -> %#x\n", uNewCs, (uNewCs & X86_SEL_MASK_OFF_RPL) | 1));3267 uNewCs = (uNewCs & X86_SEL_MASK_OFF_RPL) | 2;3268 }3269 }3270 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */3271 3272 3242 3273 3243 /* Privilege checks. */ -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r76553 r80024 769 769 unsigned iPT = pCur->cPTs; 770 770 while (iPT-- > 0) 771 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */ 772 && (EMIsRawRing0Enabled(pVM) || pPD->a[iPDE + iPT].n.u1User)) 771 if (pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */) 773 772 { 774 773 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); … … 801 800 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr); 802 801 803 if ( Pde.n.u1Present 804 && (EMIsRawRing0Enabled(pVM) || Pde.n.u1User)) 802 if (Pde.n.u1Present) 805 803 { 806 804 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); … … 863 861 while (iPT-- > 0) 864 862 { 865 if ( pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */ 866 && ( EMIsRawRing0Enabled(pVM) 867 || pPD->a[iPDE + iPT].n.u1User)) 863 if (pPD->a[iPDE + iPT].n.u1Present /** @todo PGMGstGetPDE. */) 868 864 { 869 865 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); … … 904 900 X86PDEPAE Pde = pgmGstGetPaePDE(pVCpu, GCPtr); 905 901 906 if ( Pde.n.u1Present 907 && (EMIsRawRing0Enabled(pVM) || Pde.n.u1User)) 902 if (Pde.n.u1Present) 908 903 { 909 904 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r80020 r80024 111 111 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM"); 112 112 113 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false); 114 AssertLogRelRCReturn(rc, rc); 115 113 116 bool fEnabled; 114 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);115 AssertLogRelRCReturn(rc, rc);116 pVM->fRecompileUser = !fEnabled;117 118 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);119 AssertLogRelRCReturn(rc, rc);120 pVM->fRecompileSupervisor = !fEnabled;121 122 #ifdef VBOX_WITH_RAW_RING1123 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);124 AssertLogRelRCReturn(rc, rc);125 #else126 pVM->fRawRing1Enabled = false; /* Disabled by default. */127 #endif128 129 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);130 AssertLogRelRCReturn(rc, rc);131 132 117 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false); 133 118 AssertLogRelRCReturn(rc, rc); … … 139 124 } 140 125 141 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", 142 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault)); 126 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault)); 143 127 144 128 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true} … … 573 557 { 574 558 case EMEXECPOLICY_RECOMPILE_RING0: 575 pVM->fRecompileSupervisor = pArgs->fEnforce;576 break;577 559 case EMEXECPOLICY_RECOMPILE_RING3: 578 pVM->fRecompileUser = pArgs->fEnforce;579 560 break; 580 561 case EMEXECPOLICY_IEM_ALL: … … 584 565 AssertFailedReturn(VERR_INVALID_PARAMETER); 585 566 } 586 Log(("EM: Set execution policy (fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool)\n", 587 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll)); 567 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll)); 588 568 } 589 569 … … 647 627 { 648 628 case EMEXECPOLICY_RECOMPILE_RING0: 649 *pfEnforced = pVM->fRecompileSupervisor;650 break;651 629 case EMEXECPOLICY_RECOMPILE_RING3: 652 *pfEnforced = pVM->fRecompileUser;630 *pfEnforced = false; 653 631 break; 654 632 case EMEXECPOLICY_IEM_ALL: … … 1406 1384 if (!VM_IS_RAW_MODE_ENABLED(pVM)) 1407 1385 { 1408 if (EMIsHwVirtExecutionEnabled(pVM)) 1409 { 1410 if (VM_IS_HM_ENABLED(pVM)) 1411 { 1412 if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx)) 1413 return EMSTATE_HM; 1414 } 1415 else if (NEMR3CanExecuteGuest(pVM, pVCpu)) 1416 return EMSTATE_NEM; 1417 1418 /* 1419 * Note! Raw mode and hw accelerated mode are incompatible. The latter 1420 * turns off monitoring features essential for raw mode! 1421 */ 1422 return EMSTATE_IEM_THEN_REM; 1423 } 1386 if (VM_IS_HM_ENABLED(pVM)) 1387 { 1388 if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx)) 1389 return EMSTATE_HM; 1390 } 1391 else if (NEMR3CanExecuteGuest(pVM, pVCpu)) 1392 return EMSTATE_NEM; 1393 1394 /* 1395 * Note! Raw mode and hw accelerated mode are incompatible. The latter 1396 * turns off monitoring features essential for raw mode! 1397 */ 1398 return EMSTATE_IEM_THEN_REM; 1424 1399 } 1425 1400 … … 1466 1441 || (uSS & X86_SEL_RPL) == 3) 1467 1442 { 1468 if (!EMIsRawRing3Enabled(pVM))1469 return EMSTATE_REM;1470 1471 1443 if (!(EFlags.u32 & X86_EFL_IF)) 1472 1444 { … … 1475 1447 } 1476 1448 1477 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))1449 if (!(u32CR0 & X86_CR0_WP)) 1478 1450 { 1479 1451 Log2(("raw mode refused: CR0.WP + RawR0\n")); … … 1483 1455 else 1484 1456 { 1485 if (!EMIsRawRing0Enabled(pVM))1486 return EMSTATE_REM;1487 1488 if (EMIsRawRing1Enabled(pVM))1489 {1490 /* Only ring 0 and 1 supervisor code. */1491 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */1492 {1493 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));1494 return EMSTATE_REM;1495 }1496 }1497 1457 /* Only ring 0 supervisor code. */ 1498 elseif ((uSS & X86_SEL_RPL) != 0)1458 if ((uSS & X86_SEL_RPL) != 0) 1499 1459 { 1500 1460 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL)); -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r80013 r80024 101 101 static int vmR3InitRing3(PVM pVM, PUVM pUVM); 102 102 static int vmR3InitRing0(PVM pVM); 103 #ifdef VBOX_WITH_RAW_MODE104 static int vmR3InitRC(PVM pVM);105 #endif106 103 static int vmR3InitDoCompleted(PVM pVM, VMINITCOMPLETED enmWhat); 107 104 static void vmR3DestroyUVM(PUVM pUVM, uint32_t cMilliesEMTWait); … … 591 588 AssertRelease(pVM->cCpus == cCpus); 592 589 AssertRelease(pVM->uCpuExecutionCap == 100); 593 #ifdef VBOX_WITH_RAW_MODE594 AssertRelease(pVM->offVMCPU == RT_UOFFSETOF(VM, aCpus));595 #endif596 590 AssertCompileMemberAlignment(VM, cpum, 64); 597 591 AssertCompileMemberAlignment(VM, tm, 64); … … 662 656 #endif 663 657 /* 664 * Init the Raw-Mode Context components.658 * Now we can safely set the VM halt method to default. 665 659 */ 666 #ifdef VBOX_WITH_RAW_MODE 667 rc = vmR3InitRC(pVM); 660 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT); 668 661 if (RT_SUCCESS(rc)) 669 #endif670 662 { 671 663 /* 672 * Now we can safely set the VM halt method to default.664 * Set the state and we're done. 673 665 */ 674 rc = vmR3SetHaltMethodU(pUVM, VMHALTMETHOD_DEFAULT); 675 if (RT_SUCCESS(rc)) 676 { 677 /* 678 * Set the state and we're done. 679 */ 680 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING); 681 return VINF_SUCCESS; 682 } 666 vmR3SetState(pVM, VMSTATE_CREATED, VMSTATE_CREATING); 667 return VINF_SUCCESS; 683 668 } 684 669 #ifdef VBOX_WITH_DEBUGGER … … 767 752 * Base EM and HM config properties. 768 753 */ 769 /** @todo We don't need to read any of this here. The relevant modules reads770 * them again and will be in a better position to set them correctly. */771 Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */772 bool fEnabled;773 rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc);774 pVM->fRecompileUser = !fEnabled;775 rc = CFGMR3QueryBoolDef(pRoot, "RawR0Enabled", &fEnabled, false); AssertRCReturn(rc, rc);776 pVM->fRecompileSupervisor = !fEnabled;777 #ifdef VBOX_WITH_RAW_MODE778 # ifdef VBOX_WITH_RAW_RING1779 rc = CFGMR3QueryBoolDef(pRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);780 # endif781 rc = CFGMR3QueryBoolDef(pRoot, "PATMEnabled", &pVM->fPATMEnabled, true); AssertRCReturn(rc, rc);782 rc = CFGMR3QueryBoolDef(pRoot, "CSAMEnabled", &pVM->fCSAMEnabled, true); AssertRCReturn(rc, rc);783 rc = CFGMR3QueryBoolDef(pRoot, "HMEnabled", &pVM->fHMEnabled, true); AssertRCReturn(rc, rc);784 #else785 754 pVM->fHMEnabled = true; 786 #endif787 LogRel(("VM: fHMEnabled=%RTbool (configured) fRecompileUser=%RTbool fRecompileSupervisor=%RTbool\n"788 "VM: fRawRing1Enabled=%RTbool CSAM=%RTbool PATM=%RTbool\n",789 pVM->fHMEnabled, pVM->fRecompileUser, pVM->fRecompileSupervisor,790 pVM->fRawRing1Enabled, pVM->fCSAMEnabled, pVM->fPATMEnabled));791 755 792 756 /* -
trunk/src/VBox/VMM/include/SELMInline.h
r76585 r80024 207 207 pDesc->Gen.u1Available = 1; 208 208 } 209 # ifdef VBOX_WITH_RAW_RING1210 else if ( pDesc->Gen.u2Dpl == 1211 && EMIsRawRing1Enabled(pVM)212 && (pDesc->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))213 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) )214 {215 pDesc->Gen.u2Dpl = 2;216 pDesc->Gen.u1Available = 1;217 }218 # endif219 209 else 220 210 pDesc->Gen.u1Available = 0; -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r80016 r80024 1438 1438 GEN_CHECK_OFF(VM, cbSelf); 1439 1439 GEN_CHECK_OFF(VM, bMainExecutionEngine); 1440 GEN_CHECK_OFF(VM, fRecompileUser);1441 GEN_CHECK_OFF(VM, fRecompileSupervisor);1442 GEN_CHECK_OFF(VM, fPATMEnabled);1443 GEN_CHECK_OFF(VM, fCSAMEnabled);1444 1440 GEN_CHECK_OFF(VM, fHMEnabled); 1445 1441 GEN_CHECK_OFF(VM, fFaultTolerantMaster); -
trunk/src/recompiler/Makefile.kmk
r76553 r80024 70 70 ifdef VBOX_WITH_RAW_MODE 71 71 VBoxRemPrimary_DEFS += VBOX_WITH_RAW_MODE 72 endif73 ifdef VBOX_WITH_RAW_RING174 VBoxRemPrimary_DEFS += VBOX_WITH_RAW_RING175 72 endif 76 73 VBoxRemPrimary_DEFS.linux = _GNU_SOURCE -
trunk/src/recompiler/VBoxRecompiler.c
r80020 r80024 1430 1430 1431 1431 /* 1432 * The simple check first...1433 */1434 if (!EMIsHwVirtExecutionEnabled(env->pVM))1435 return false;1436 1437 /*1438 1432 * Create partial context for HMCanExecuteGuest. 1439 1433 */ … … 1591 1585 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3) 1592 1586 { 1593 if (!EMIsRawRing3Enabled(env->pVM))1594 return false;1595 1596 1587 if (!(env->eflags & IF_MASK)) 1597 1588 { … … 1601 1592 } 1602 1593 1603 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))1594 if (!(u32CR0 & CR0_WP_MASK)) 1604 1595 { 1605 1596 STAM_COUNTER_INC(&gStatRefuseWP0); … … 1610 1601 else 1611 1602 { 1612 if (!EMIsRawRing0Enabled(env->pVM))1613 return false;1614 1615 1603 // Let's start with pure 32 bits ring 0 code first 1616 1604 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK)) … … 1621 1609 } 1622 1610 1623 if (EMIsRawRing1Enabled(env->pVM))1624 {1625 /* Only ring 0 and 1 supervisor code. */1626 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */1627 {1628 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));1629 return false;1630 }1631 }1632 1611 /* Only R0. */ 1633 elseif (((fFlags >> HF_CPL_SHIFT) & 3) != 0)1612 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0) 1634 1613 { 1635 1614 STAM_COUNTER_INC(&gStatRefuseRing1or2); -
trunk/src/recompiler/target-i386/op_helper.c
r76397 r80024 3233 3233 goto return_to_vm86; 3234 3234 } 3235 #ifdef VBOX3236 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))3237 {3238 if ( !EMIsRawRing1Enabled(env->pVM)3239 || env->segs[R_CS].selector == (new_cs & 0xfffc))3240 {3241 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));3242 new_cs = new_cs & 0xfffc;3243 }3244 else3245 {3246 /* Ugly assumption: assume a genuine switch to ring-1. */3247 Log(("Genuine switch to ring-1 (iret)\n"));3248 }3249 }3250 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))3251 {3252 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));3253 new_cs = (new_cs & 0xfffc) | 1;3254 }3255 #endif3256 3235 } else { 3257 3236 /* 16 bits */
Note:
See TracChangeset
for help on using the changeset viewer.