Changeset 73617 in vbox for trunk/src/VBox
- Timestamp:
- Aug 10, 2018 2:09:55 PM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r73606 r73617 56 56 57 57 /** 58 * Checks if the guest is in a suitable state for hardware-assisted execution. 59 * 60 * @returns @c true if it is suitable, @c false otherwise. 61 * @param pVCpu The cross context virtual CPU structure. 62 * @param pCtx Pointer to the guest CPU context. 63 * 64 * @remarks @a pCtx can be a partial context created and not necessarily the same as 65 * pVCpu->cpum.GstCtx. 66 */ 67 VMMDECL(bool) HMCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx) 68 { 69 PVM pVM = pVCpu->CTX_SUFF(pVM); 70 Assert(HMIsEnabled(pVM)); 71 72 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM 73 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 74 || CPUMIsGuestVmxEnabled(pCtx)) 75 { 76 LogFunc(("In nested-guest mode - returning false")); 77 return false; 78 } 79 #endif 80 81 /* AMD-V supports real & protected mode with or without paging. */ 82 if (pVM->hm.s.svm.fEnabled) 83 { 84 pVCpu->hm.s.fActive = true; 85 return true; 86 } 87 88 return HMVmxCanExecuteGuest(pVCpu, pCtx); 89 } 90 91 92 /** 58 93 * Queues a guest page for invalidation. 59 94 * -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r73610 r73617 24 24 #include "HMInternal.h" 25 25 #include <VBox/vmm/vm.h> 26 #include <VBox/vmm/pdmapi.h> 26 27 27 28 … … 166 167 } 167 168 169 170 /** 171 * Checks if a code selector (CS) is suitable for execution using hardware-assisted 172 * VMX when unrestricted execution isn't available. 173 * 174 * @returns true if selector is suitable for VMX, otherwise 175 * false. 176 * @param pSel Pointer to the selector to check (CS). 177 * @param uStackDpl The CPL, aka the DPL of the stack segment. 178 */ 179 static bool hmVmxIsCodeSelectorOk(PCCPUMSELREG pSel, unsigned uStackDpl) 180 { 181 /* 182 * Segment must be an accessed code segment, it must be present and it must 183 * be usable. 184 * Note! These are all standard requirements and if CS holds anything else 185 * we've got buggy code somewhere! 186 */ 187 AssertCompile(X86DESCATTR_TYPE == 0xf); 188 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE)) 189 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P), 190 ("%#x\n", pSel->Attr.u), 191 false); 192 193 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL 194 must equal SS.DPL for non-confroming segments. 195 Note! This is also a hard requirement like above. */ 196 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF 197 ? pSel->Attr.n.u2Dpl <= uStackDpl 198 : pSel->Attr.n.u2Dpl == uStackDpl, 199 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl), 200 false); 201 202 /* 203 * The following two requirements are VT-x specific: 204 * - G bit must be set if any high limit bits are set. 205 * - G bit must be clear if any low limit bits are clear. 206 */ 207 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity) 208 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity)) 209 return true; 210 return false; 211 } 212 213 214 /** 215 * Checks if a data selector (DS/ES/FS/GS) is suitable for execution using 216 * hardware-assisted VMX when unrestricted execution isn't available. 217 * 218 * @returns true if selector is suitable for VMX, otherwise 219 * false. 220 * @param pSel Pointer to the selector to check 221 * (DS/ES/FS/GS). 222 */ 223 static bool hmVmxIsDataSelectorOk(PCCPUMSELREG pSel) 224 { 225 /* 226 * Unusable segments are OK. These days they should be marked as such, as 227 * but as an alternative we for old saved states and AMD<->VT-x migration 228 * we also treat segments with all the attributes cleared as unusable. 229 */ 230 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u) 231 return true; 232 233 /** @todo tighten these checks. Will require CPUM load adjusting. */ 234 235 /* Segment must be accessed. */ 236 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED) 237 { 238 /* Code segments must also be readable. */ 239 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE) 240 || (pSel->Attr.u & X86_SEL_TYPE_READ)) 241 { 242 /* The S bit must be set. */ 243 if (pSel->Attr.n.u1DescType) 244 { 245 /* Except for conforming segments, DPL >= RPL. */ 246 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL) 247 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC) 248 { 249 /* Segment must be present. */ 250 if (pSel->Attr.n.u1Present) 251 { 252 /* 253 * The following two requirements are VT-x specific: 254 * - G bit must be set if any high limit bits are set. 255 * - G bit must be clear if any low limit bits are clear. 256 */ 257 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity) 258 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity)) 259 return true; 260 } 261 } 262 } 263 } 264 } 265 266 return false; 267 } 268 269 270 /** 271 * Checks if the stack selector (SS) is suitable for execution using 272 * hardware-assisted VMX when unrestricted execution isn't available. 273 * 274 * @returns true if selector is suitable for VMX, otherwise 275 * false. 276 * @param pSel Pointer to the selector to check (SS). 277 */ 278 static bool hmVmxIsStackSelectorOk(PCCPUMSELREG pSel) 279 { 280 /* 281 * Unusable segments are OK. These days they should be marked as such, as 282 * but as an alternative we for old saved states and AMD<->VT-x migration 283 * we also treat segments with all the attributes cleared as unusable. 284 */ 285 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */ 286 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u) 287 return true; 288 289 /* 290 * Segment must be an accessed writable segment, it must be present. 291 * Note! These are all standard requirements and if SS holds anything else 292 * we've got buggy code somewhere! 293 */ 294 AssertCompile(X86DESCATTR_TYPE == 0xf); 295 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE)) 296 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P), 297 ("%#x\n", pSel->Attr.u), false); 298 299 /* DPL must equal RPL. 300 Note! This is also a hard requirement like above. */ 301 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL), 302 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false); 303 304 /* 305 * The following two requirements are VT-x specific: 306 * - G bit must be set if any high limit bits are set. 307 * - G bit must be clear if any low limit bits are clear. 308 */ 309 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity) 310 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity)) 311 return true; 312 return false; 313 } 314 315 316 /** 317 * Checks if the guest is in a suitable state for hardware-assisted VMX execution. 318 * 319 * @returns @c true if it is suitable, @c false otherwise. 320 * @param pVCpu The cross context virtual CPU structure. 321 * @param pCtx Pointer to the guest CPU context. 322 * 323 * @remarks @a pCtx can be a partial context and thus may not be necessarily the 324 * same as pVCpu->cpum.GstCtx! Thus don't eliminate the @a pCtx parameter. 325 * Secondly, if additional checks are added that require more of the CPU 326 * state, make sure REM (which supplies a partial state) is updated. 327 */ 328 VMM_INT_DECL(bool) HMVmxCanExecuteGuest(PVMCPU pVCpu, PCCPUMCTX pCtx) 329 { 330 PVM pVM = pVCpu->CTX_SUFF(pVM); 331 Assert(HMIsEnabled(pVM)); 332 Assert(!CPUMIsGuestVmxEnabled(pCtx)); 333 Assert( ( pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS) 334 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS)); 335 336 pVCpu->hm.s.fActive = false; 337 338 bool const fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM); 339 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 340 { 341 /* 342 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted 343 * guest execution feature is missing (VT-x only). 344 */ 345 if (fSupportsRealMode) 346 { 347 if (CPUMIsGuestInRealModeEx(pCtx)) 348 { 349 /* 350 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector 351 * bases and limits, i.e. limit must be 64K and base must be selector * 16. 352 * If this is not true, we cannot execute real mode as V86 and have to fall 353 * back to emulation. 354 */ 355 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4) 356 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4) 357 || pCtx->es.Sel != (pCtx->es.u64Base >> 4) 358 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4) 359 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4) 360 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4)) 361 { 362 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase); 363 return false; 364 } 365 if ( (pCtx->cs.u32Limit != 0xffff) 366 || (pCtx->ds.u32Limit != 0xffff) 367 || (pCtx->es.u32Limit != 0xffff) 368 || (pCtx->ss.u32Limit != 0xffff) 369 || (pCtx->fs.u32Limit != 0xffff) 370 || (pCtx->gs.u32Limit != 0xffff)) 371 { 372 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit); 373 return false; 374 } 375 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk); 376 } 377 else 378 { 379 /* 380 * Verify the requirements for executing code in protected mode. VT-x can't 381 * handle the CPU state right after a switch from real to protected mode 382 * (all sorts of RPL & DPL assumptions). 383 */ 384 if (pVCpu->hm.s.vmx.fWasInRealMode) 385 { 386 /** @todo If guest is in V86 mode, these checks should be different! */ 387 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL)) 388 { 389 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl); 390 return false; 391 } 392 if ( !hmVmxIsCodeSelectorOk(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl) 393 || !hmVmxIsDataSelectorOk(&pCtx->ds) 394 || !hmVmxIsDataSelectorOk(&pCtx->es) 395 || !hmVmxIsDataSelectorOk(&pCtx->fs) 396 || !hmVmxIsDataSelectorOk(&pCtx->gs) 397 || !hmVmxIsStackSelectorOk(&pCtx->ss)) 398 { 399 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel); 400 return false; 401 } 402 } 403 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */ 404 if (pCtx->gdtr.cbGdt) 405 { 406 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 407 { 408 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr); 409 return false; 410 } 411 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 412 { 413 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt); 414 return false; 415 } 416 } 417 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk); 418 } 419 } 420 else 421 { 422 if ( !CPUMIsGuestInLongModeEx(pCtx) 423 && !pVM->hm.s.vmx.fUnrestrictedGuest) 424 { 425 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */ 426 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */ 427 return false; 428 429 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */ 430 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0) 431 return false; 432 433 /* 434 * The guest is about to complete the switch to protected mode. Wait a bit longer. 435 * Windows XP; switch to protected mode; all selectors are marked not present 436 * in the hidden registers (possible recompiler bug; see load_seg_vm). 437 */ 438 /** @todo Is this supposed recompiler bug still relevant with IEM? */ 439 if (pCtx->cs.Attr.n.u1Present == 0) 440 return false; 441 if (pCtx->ss.Attr.n.u1Present == 0) 442 return false; 443 444 /* 445 * Windows XP: possible same as above, but new recompiler requires new 446 * heuristics? VT-x doesn't seem to like something about the guest state and 447 * this stuff avoids it. 448 */ 449 /** @todo This check is actually wrong, it doesn't take the direction of the 450 * stack segment into account. But, it does the job for now. */ 451 if (pCtx->rsp >= pCtx->ss.u32Limit) 452 return false; 453 } 454 } 455 } 456 457 if (pVM->hm.s.vmx.fEnabled) 458 { 459 uint32_t uCr0Mask; 460 461 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */ 462 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0; 463 464 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */ 465 uCr0Mask &= ~X86_CR0_NE; 466 467 if (fSupportsRealMode) 468 { 469 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */ 470 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE); 471 } 472 else 473 { 474 /* We support protected mode without paging using identity mapping. */ 475 uCr0Mask &= ~X86_CR0_PG; 476 } 477 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask) 478 return false; 479 480 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */ 481 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1; 482 if ((pCtx->cr0 & uCr0Mask) != 0) 483 return false; 484 485 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */ 486 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0; 487 uCr0Mask &= ~X86_CR4_VMXE; 488 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask) 489 return false; 490 491 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */ 492 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1; 493 if ((pCtx->cr4 & uCr0Mask) != 0) 494 return false; 495 496 pVCpu->hm.s.fActive = true; 497 return true; 498 } 499 500 return false; 501 } 502 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r73609 r73617 4553 4553 { 4554 4554 Assert(pVM->hm.s.vmx.pRealModeTSS); 4555 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HM R3CanExecuteGuest() -XXX- what about inner loop changes? */4555 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMCanExecuteGuest() -XXX- what about inner loop changes? */ 4556 4556 4557 4557 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */ … … 8272 8272 if ( !pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest 8273 8273 && CPUMIsGuestInRealModeEx(&pVCpu->cpum.GstCtx)) 8274 {8275 8274 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true; 8276 }8277 8275 8278 8276 /* … … 13171 13169 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 13172 13170 13173 int rc;13174 13171 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 13175 13172 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 13181 13178 #endif 13182 13179 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */ 13183 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);13184 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);13185 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);13186 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL);13180 int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 13181 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 13182 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13183 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13187 13184 AssertRCReturn(rc, rc); 13188 13185 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pCtx->cs.Sel, pCtx->rip, … … 13196 13193 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest); 13197 13194 13198 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */ 13199 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13195 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13200 13196 AssertRCReturn(rc, rc); 13201 13197 13202 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 13203 uint32_t cbOp = 0; 13204 PVM pVM = pVCpu->CTX_SUFF(pVM); 13205 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; 13206 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); 13207 if (RT_SUCCESS(rc)) 13208 { 13209 rc = VINF_SUCCESS; 13210 Assert(cbOp == pDis->cbInstr); 13211 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pCtx->cs.Sel, pCtx->rip)); 13212 switch (pDis->pCurInstr->uOpcode) 13213 { 13214 case OP_CLI: 13198 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu); 13199 if (rcStrict == VINF_SUCCESS) 13200 { 13201 if (!CPUMIsGuestInRealModeEx(pCtx)) 13202 { 13203 /* 13204 * The guest is no longer in real-mode, check if we can continue executing the 13205 * guest using hardware-assisted VMX. Otherwise, fall back to emulation. 13206 */ 13207 if (HMVmxCanExecuteGuest(pVCpu, pCtx)) 13215 13208 { 13216 pCtx->eflags.Bits.u1IF = 0; 13217 pCtx->eflags.Bits.u1RF = 0; 13218 pCtx->rip += pDis->cbInstr; 13219 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13220 if ( !fDbgStepping 13221 && pCtx->eflags.Bits.u1TF) 13222 { 13223 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13224 AssertRCReturn(rc, rc); 13225 } 13226 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 13227 break; 13209 Log4Func(("Mode changed but guest still suitable for executing using VT-x\n")); 13210 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false; 13211 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13228 13212 } 13229 13230 case OP_STI: 13213 else 13231 13214 { 13232 bool fOldIF = pCtx->eflags.Bits.u1IF; 13233 pCtx->eflags.Bits.u1IF = 1; 13234 pCtx->eflags.Bits.u1RF = 0; 13235 pCtx->rip += pDis->cbInstr; 13236 if (!fOldIF) 13237 { 13238 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 13239 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 13240 } 13241 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13242 if ( !fDbgStepping 13243 && pCtx->eflags.Bits.u1TF) 13244 { 13245 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13246 AssertRCReturn(rc, rc); 13247 } 13248 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 13249 break; 13215 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13216 rcStrict = VINF_EM_RESCHEDULE; 13250 13217 } 13251 13252 case OP_HLT: 13253 { 13254 rc = VINF_EM_HALT; 13255 pCtx->rip += pDis->cbInstr; 13256 pCtx->eflags.Bits.u1RF = 0; 13257 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 13258 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 13259 break; 13260 } 13261 13262 case OP_POPF: 13263 { 13264 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pCtx->cs.Sel, pCtx->rip)); 13265 uint32_t cbParm; 13266 uint32_t uMask; 13267 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13268 if (pDis->fPrefix & DISPREFIX_OPSIZE) 13269 { 13270 cbParm = 4; 13271 uMask = 0xffffffff; 13272 } 13273 else 13274 { 13275 cbParm = 2; 13276 uMask = 0xffff; 13277 } 13278 13279 /* Get the stack pointer & pop the contents of the stack onto Eflags. */ 13280 RTGCPTR GCPtrStack = 0; 13281 X86EFLAGS Eflags; 13282 Eflags.u32 = 0; 13283 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13284 &GCPtrStack); 13285 if (RT_SUCCESS(rc)) 13286 { 13287 Assert(sizeof(Eflags.u32) >= cbParm); 13288 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm, PGMACCESSORIGIN_HM)); 13289 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13290 } 13291 if (RT_FAILURE(rc)) 13292 { 13293 rc = VERR_EM_INTERPRETER; 13294 break; 13295 } 13296 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pCtx->rsp, uMask, pCtx->rip)); 13297 pCtx->eflags.u32 = (pCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 13298 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 13299 pCtx->esp += cbParm; 13300 pCtx->esp &= uMask; 13301 pCtx->rip += pDis->cbInstr; 13302 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13303 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 13304 POPF restores EFLAGS.TF. */ 13305 if ( !fDbgStepping 13306 && fGstStepping) 13307 { 13308 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13309 AssertRCReturn(rc, rc); 13310 } 13311 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 13312 break; 13313 } 13314 13315 case OP_PUSHF: 13316 { 13317 uint32_t cbParm; 13318 uint32_t uMask; 13319 if (pDis->fPrefix & DISPREFIX_OPSIZE) 13320 { 13321 cbParm = 4; 13322 uMask = 0xffffffff; 13323 } 13324 else 13325 { 13326 cbParm = 2; 13327 uMask = 0xffff; 13328 } 13329 13330 /* Get the stack pointer & push the contents of eflags onto the stack. */ 13331 RTGCPTR GCPtrStack = 0; 13332 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 13333 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack); 13334 if (RT_FAILURE(rc)) 13335 { 13336 rc = VERR_EM_INTERPRETER; 13337 break; 13338 } 13339 X86EFLAGS Eflags = pCtx->eflags; 13340 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */ 13341 Eflags.Bits.u1RF = 0; 13342 Eflags.Bits.u1VM = 0; 13343 13344 rc = VBOXSTRICTRC_TODO(PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm, PGMACCESSORIGIN_HM)); 13345 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 13346 { 13347 AssertMsgFailed(("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13348 rc = VERR_EM_INTERPRETER; 13349 break; 13350 } 13351 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack)); 13352 pCtx->esp -= cbParm; 13353 pCtx->esp &= uMask; 13354 pCtx->rip += pDis->cbInstr; 13355 pCtx->eflags.Bits.u1RF = 0; 13356 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS); 13357 if ( !fDbgStepping 13358 && pCtx->eflags.Bits.u1TF) 13359 { 13360 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13361 AssertRCReturn(rc, rc); 13362 } 13363 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 13364 break; 13365 } 13366 13367 case OP_IRET: 13368 { 13369 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel 13370 * instruction reference. */ 13371 RTGCPTR GCPtrStack = 0; 13372 uint32_t uMask = 0xffff; 13373 bool fGstStepping = RT_BOOL(pCtx->eflags.Bits.u1TF); 13374 uint16_t aIretFrame[3]; 13375 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE)) 13376 { 13377 rc = VERR_EM_INTERPRETER; 13378 break; 13379 } 13380 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0, 13381 &GCPtrStack); 13382 if (RT_SUCCESS(rc)) 13383 { 13384 rc = VBOXSTRICTRC_TODO(PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame), 13385 PGMACCESSORIGIN_HM)); 13386 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc)); /** @todo allow strict return codes here */ 13387 } 13388 if (RT_FAILURE(rc)) 13389 { 13390 rc = VERR_EM_INTERPRETER; 13391 break; 13392 } 13393 pCtx->eip = 0; 13394 pCtx->ip = aIretFrame[0]; 13395 pCtx->cs.Sel = aIretFrame[1]; 13396 pCtx->cs.ValidSel = aIretFrame[1]; 13397 pCtx->cs.u64Base = (uint64_t)pCtx->cs.Sel << 4; 13398 pCtx->eflags.u32 = (pCtx->eflags.u32 & ((UINT32_C(0xffff0000) | X86_EFL_1) & ~X86_EFL_RF)) 13399 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 13400 pCtx->sp += sizeof(aIretFrame); 13401 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 13402 | HM_CHANGED_GUEST_CS); 13403 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 13404 if ( !fDbgStepping 13405 && fGstStepping) 13406 { 13407 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13408 AssertRCReturn(rc, rc); 13409 } 13410 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pCtx->cs.Sel, pCtx->ip)); 13411 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 13412 break; 13413 } 13414 13415 case OP_INT: 13416 { 13417 uint16_t uVector = pDis->Param1.uValue & 0xff; 13418 hmR0VmxSetPendingIntN(pVCpu, uVector, pDis->cbInstr); 13419 /* INT clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13420 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 13421 break; 13422 } 13423 13424 case OP_INTO: 13425 { 13426 if (pCtx->eflags.Bits.u1OF) 13427 { 13428 hmR0VmxSetPendingXcptOF(pVCpu, pDis->cbInstr); 13429 /* INTO clears EFLAGS.TF, we must not set any pending debug exceptions here. */ 13430 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 13431 } 13432 else 13433 { 13434 pCtx->eflags.Bits.u1RF = 0; 13435 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 13436 } 13437 break; 13438 } 13439 13440 default: 13441 { 13442 pCtx->eflags.Bits.u1RF = 0; /* This is correct most of the time... */ 13443 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0 /* pvFault */, 13444 EMCODETYPE_SUPERVISOR); 13445 rc = VBOXSTRICTRC_VAL(rc2); 13446 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13447 /** @todo We have to set pending-debug exceptions here when the guest is 13448 * single-stepping depending on the instruction that was interpreted. */ 13449 13450 /* 13451 * HACK ALERT! Detect mode change and go to ring-3 to properly exit this 13452 * real mode emulation stuff. 13453 */ 13454 if ( rc == VINF_SUCCESS 13455 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE)) 13456 { 13457 Log4Func(("Mode changed -> VINF_EM_RESCHEDULE\n")); 13458 /** @todo Exit fRealOnV86Active here w/o dropping back to ring-3. */ 13459 rc = VINF_EM_RESCHEDULE; 13460 } 13461 13462 Log4Func(("#GP rc=%Rrc\n", rc)); 13463 break; 13464 } 13465 } 13466 } 13467 else 13468 rc = VERR_EM_INTERPRETER; 13469 13470 AssertMsg( rc == VINF_SUCCESS 13471 || rc == VERR_EM_INTERPRETER 13472 || rc == VINF_EM_HALT 13473 || rc == VINF_EM_RESCHEDULE 13474 , ("#GP Unexpected rc=%Rrc\n", rc)); 13475 return rc; 13218 } 13219 else 13220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13221 } 13222 else if (rcStrict == VINF_IEM_RAISED_XCPT) 13223 { 13224 rcStrict = VINF_SUCCESS; 13225 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_RAISED_XCPT_MASK); 13226 } 13227 return VBOXSTRICTRC_VAL(rcStrict); 13476 13228 } 13477 13229 -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r73606 r73617 1451 1451 if (VM_IS_HM_ENABLED(pVM)) 1452 1452 { 1453 if (HM R3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))1453 if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx)) 1454 1454 return EMSTATE_HM; 1455 1455 } -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r73224 r73617 86 86 Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK)); 87 87 88 if (!HM R3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))88 if (!HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx)) 89 89 return VINF_EM_RESCHEDULE; 90 90 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r73606 r73617 2790 2790 2791 2791 /** 2792 * Checks if a code selector (CS) is suitable for execution2793 * within VMX when unrestricted execution isn't available.2794 *2795 * @returns true if selector is suitable for VMX, otherwise2796 * false.2797 * @param pSel Pointer to the selector to check (CS).2798 * @param uStackDpl The CPL, aka the DPL of the stack segment.2799 */2800 static bool hmR3IsCodeSelectorOkForVmx(PCPUMSELREG pSel, unsigned uStackDpl)2801 {2802 /*2803 * Segment must be an accessed code segment, it must be present and it must2804 * be usable.2805 * Note! These are all standard requirements and if CS holds anything else2806 * we've got buggy code somewhere!2807 */2808 AssertCompile(X86DESCATTR_TYPE == 0xf);2809 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P | X86DESCATTR_UNUSABLE))2810 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_CODE | X86DESCATTR_DT | X86DESCATTR_P),2811 ("%#x\n", pSel->Attr.u),2812 false);2813 2814 /* For conforming segments, CS.DPL must be <= SS.DPL, while CS.DPL2815 must equal SS.DPL for non-confroming segments.2816 Note! This is also a hard requirement like above. */2817 AssertMsgReturn( pSel->Attr.n.u4Type & X86_SEL_TYPE_CONF2818 ? pSel->Attr.n.u2Dpl <= uStackDpl2819 : pSel->Attr.n.u2Dpl == uStackDpl,2820 ("u4Type=%#x u2Dpl=%u uStackDpl=%u\n", pSel->Attr.n.u4Type, pSel->Attr.n.u2Dpl, uStackDpl),2821 false);2822 2823 /*2824 * The following two requirements are VT-x specific:2825 * - G bit must be set if any high limit bits are set.2826 * - G bit must be clear if any low limit bits are clear.2827 */2828 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)2829 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))2830 return true;2831 return false;2832 }2833 2834 2835 /**2836 * Checks if a data selector (DS/ES/FS/GS) is suitable for2837 * execution within VMX when unrestricted execution isn't2838 * available.2839 *2840 * @returns true if selector is suitable for VMX, otherwise2841 * false.2842 * @param pSel Pointer to the selector to check2843 * (DS/ES/FS/GS).2844 */2845 static bool hmR3IsDataSelectorOkForVmx(PCPUMSELREG pSel)2846 {2847 /*2848 * Unusable segments are OK. These days they should be marked as such, as2849 * but as an alternative we for old saved states and AMD<->VT-x migration2850 * we also treat segments with all the attributes cleared as unusable.2851 */2852 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)2853 return true;2854 2855 /** @todo tighten these checks. Will require CPUM load adjusting. */2856 2857 /* Segment must be accessed. */2858 if (pSel->Attr.u & X86_SEL_TYPE_ACCESSED)2859 {2860 /* Code segments must also be readable. */2861 if ( !(pSel->Attr.u & X86_SEL_TYPE_CODE)2862 || (pSel->Attr.u & X86_SEL_TYPE_READ))2863 {2864 /* The S bit must be set. */2865 if (pSel->Attr.n.u1DescType)2866 {2867 /* Except for conforming segments, DPL >= RPL. */2868 if ( pSel->Attr.n.u2Dpl >= (pSel->Sel & X86_SEL_RPL)2869 || pSel->Attr.n.u4Type >= X86_SEL_TYPE_ER_ACC)2870 {2871 /* Segment must be present. */2872 if (pSel->Attr.n.u1Present)2873 {2874 /*2875 * The following two requirements are VT-x specific:2876 * - G bit must be set if any high limit bits are set.2877 * - G bit must be clear if any low limit bits are clear.2878 */2879 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)2880 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))2881 return true;2882 }2883 }2884 }2885 }2886 }2887 2888 return false;2889 }2890 2891 2892 /**2893 * Checks if the stack selector (SS) is suitable for execution2894 * within VMX when unrestricted execution isn't available.2895 *2896 * @returns true if selector is suitable for VMX, otherwise2897 * false.2898 * @param pSel Pointer to the selector to check (SS).2899 */2900 static bool hmR3IsStackSelectorOkForVmx(PCPUMSELREG pSel)2901 {2902 /*2903 * Unusable segments are OK. These days they should be marked as such, as2904 * but as an alternative we for old saved states and AMD<->VT-x migration2905 * we also treat segments with all the attributes cleared as unusable.2906 */2907 /** @todo r=bird: actually all zeroes isn't gonna cut it... SS.DPL == CPL. */2908 if (pSel->Attr.n.u1Unusable || !pSel->Attr.u)2909 return true;2910 2911 /*2912 * Segment must be an accessed writable segment, it must be present.2913 * Note! These are all standard requirements and if SS holds anything else2914 * we've got buggy code somewhere!2915 */2916 AssertCompile(X86DESCATTR_TYPE == 0xf);2917 AssertMsgReturn( (pSel->Attr.u & (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P | X86_SEL_TYPE_CODE))2918 == (X86_SEL_TYPE_ACCESSED | X86_SEL_TYPE_WRITE | X86DESCATTR_DT | X86DESCATTR_P),2919 ("%#x\n", pSel->Attr.u), false);2920 2921 /* DPL must equal RPL.2922 Note! This is also a hard requirement like above. */2923 AssertMsgReturn(pSel->Attr.n.u2Dpl == (pSel->Sel & X86_SEL_RPL),2924 ("u2Dpl=%u Sel=%#x\n", pSel->Attr.n.u2Dpl, pSel->Sel), false);2925 2926 /*2927 * The following two requirements are VT-x specific:2928 * - G bit must be set if any high limit bits are set.2929 * - G bit must be clear if any low limit bits are clear.2930 */2931 if ( ((pSel->u32Limit & 0xfff00000) == 0x00000000 || pSel->Attr.n.u1Granularity)2932 && ((pSel->u32Limit & 0x00000fff) == 0x00000fff || !pSel->Attr.n.u1Granularity))2933 return true;2934 return false;2935 }2936 2937 2938 /**2939 * Checks if we can currently use hardware accelerated mode.2940 *2941 * @returns true if we can currently use hardware acceleration, otherwise false.2942 * @param pVM The cross context VM structure.2943 * @param pCtx Pointer to the guest CPU context.2944 */2945 VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)2946 {2947 PVMCPU pVCpu = VMMGetCpu(pVM);2948 2949 Assert(HMIsEnabled(pVM));2950 2951 #ifdef VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM2952 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)2953 || CPUMIsGuestVmxEnabled(pCtx))2954 {2955 Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false"));2956 return false;2957 }2958 #endif2959 2960 /* AMD-V supports real & protected mode with or without paging. */2961 if (pVM->hm.s.svm.fEnabled)2962 {2963 pVCpu->hm.s.fActive = true;2964 return true;2965 }2966 2967 pVCpu->hm.s.fActive = false;2968 2969 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */2970 Assert( (pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS)2971 || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS));2972 2973 bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVmmDevHeapIsEnabled(pVM);2974 if (!pVM->hm.s.vmx.fUnrestrictedGuest)2975 {2976 /*2977 * The VMM device heap is a requirement for emulating real mode or protected mode without paging with the unrestricted2978 * guest execution feature is missing (VT-x only).2979 */2980 if (fSupportsRealMode)2981 {2982 if (CPUMIsGuestInRealModeEx(pCtx))2983 {2984 /*2985 * In V86 mode (VT-x or not), the CPU enforces real-mode compatible selector2986 * bases and limits, i.e. limit must be 64K and base must be selector * 16.2987 * If this is not true, we cannot execute real mode as V86 and have to fall2988 * back to emulation.2989 */2990 if ( pCtx->cs.Sel != (pCtx->cs.u64Base >> 4)2991 || pCtx->ds.Sel != (pCtx->ds.u64Base >> 4)2992 || pCtx->es.Sel != (pCtx->es.u64Base >> 4)2993 || pCtx->ss.Sel != (pCtx->ss.u64Base >> 4)2994 || pCtx->fs.Sel != (pCtx->fs.u64Base >> 4)2995 || pCtx->gs.Sel != (pCtx->gs.u64Base >> 4))2996 {2997 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelBase);2998 return false;2999 }3000 if ( (pCtx->cs.u32Limit != 0xffff)3001 || (pCtx->ds.u32Limit != 0xffff)3002 || (pCtx->es.u32Limit != 0xffff)3003 || (pCtx->ss.u32Limit != 0xffff)3004 || (pCtx->fs.u32Limit != 0xffff)3005 || (pCtx->gs.u32Limit != 0xffff))3006 {3007 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRmSelLimit);3008 return false;3009 }3010 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckRmOk);3011 }3012 else3013 {3014 /*3015 * Verify the requirements for executing code in protected mode. VT-x can't3016 * handle the CPU state right after a switch from real to protected mode3017 * (all sorts of RPL & DPL assumptions).3018 */3019 if (pVCpu->hm.s.vmx.fWasInRealMode)3020 {3021 /** @todo If guest is in V86 mode, these checks should be different! */3022 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL))3023 {3024 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadRpl);3025 return false;3026 }3027 if ( !hmR3IsCodeSelectorOkForVmx(&pCtx->cs, pCtx->ss.Attr.n.u2Dpl)3028 || !hmR3IsDataSelectorOkForVmx(&pCtx->ds)3029 || !hmR3IsDataSelectorOkForVmx(&pCtx->es)3030 || !hmR3IsDataSelectorOkForVmx(&pCtx->fs)3031 || !hmR3IsDataSelectorOkForVmx(&pCtx->gs)3032 || !hmR3IsStackSelectorOkForVmx(&pCtx->ss))3033 {3034 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel);3035 return false;3036 }3037 }3038 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */3039 if (pCtx->gdtr.cbGdt)3040 {3041 if ((pCtx->tr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)3042 {3043 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadTr);3044 return false;3045 }3046 else if ((pCtx->ldtr.Sel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)3047 {3048 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadLdt);3049 return false;3050 }3051 }3052 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckPmOk);3053 }3054 }3055 else3056 {3057 if ( !CPUMIsGuestInLongModeEx(pCtx)3058 && !pVM->hm.s.vmx.fUnrestrictedGuest)3059 {3060 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */3061 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */3062 return false;3063 3064 /* Too early for VT-x; Solaris guests will fail with a guru meditation otherwise; same for XP. */3065 if (pCtx->idtr.pIdt == 0 || pCtx->idtr.cbIdt == 0 || pCtx->tr.Sel == 0)3066 return false;3067 3068 /*3069 * The guest is about to complete the switch to protected mode. Wait a bit longer.3070 * Windows XP; switch to protected mode; all selectors are marked not present3071 * in the hidden registers (possible recompiler bug; see load_seg_vm).3072 */3073 /** @todo Is this supposed recompiler bug still relevant with IEM? */3074 if (pCtx->cs.Attr.n.u1Present == 0)3075 return false;3076 if (pCtx->ss.Attr.n.u1Present == 0)3077 return false;3078 3079 /*3080 * Windows XP: possible same as above, but new recompiler requires new3081 * heuristics? VT-x doesn't seem to like something about the guest state and3082 * this stuff avoids it.3083 */3084 /** @todo This check is actually wrong, it doesn't take the direction of the3085 * stack segment into account. But, it does the job for now. */3086 if (pCtx->rsp >= pCtx->ss.u32Limit)3087 return false;3088 }3089 }3090 }3091 3092 if (pVM->hm.s.vmx.fEnabled)3093 {3094 uint32_t uCr0Mask;3095 3096 /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */3097 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr0Fixed0;3098 3099 /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */3100 uCr0Mask &= ~X86_CR0_NE;3101 3102 if (fSupportsRealMode)3103 {3104 /* We ignore the PE & PG bits here on purpose; we emulate real and protected mode without paging. */3105 uCr0Mask &= ~(X86_CR0_PG|X86_CR0_PE);3106 }3107 else3108 {3109 /* We support protected mode without paging using identity mapping. */3110 uCr0Mask &= ~X86_CR0_PG;3111 }3112 if ((pCtx->cr0 & uCr0Mask) != uCr0Mask)3113 return false;3114 3115 /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */3116 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr0Fixed1;3117 if ((pCtx->cr0 & uCr0Mask) != 0)3118 return false;3119 3120 /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */3121 uCr0Mask = (uint32_t)pVM->hm.s.vmx.Msrs.u64Cr4Fixed0;3122 uCr0Mask &= ~X86_CR4_VMXE;3123 if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)3124 return false;3125 3126 /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */3127 uCr0Mask = (uint32_t)~pVM->hm.s.vmx.Msrs.u64Cr4Fixed1;3128 if ((pCtx->cr4 & uCr0Mask) != 0)3129 return false;3130 3131 pVCpu->hm.s.fActive = true;3132 return true;3133 }3134 3135 return false;3136 }3137 3138 3139 /**3140 2792 * Checks if we need to reschedule due to VMM device heap changes. 3141 2793 *
Note:
See TracChangeset
for help on using the changeset viewer.