Changeset 69409 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 27, 2017 4:14:12 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 118738
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r69141 r69409 72 72 */ 73 73 int rc = PGMChangeMode(pVCpu, pCtx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER); 74 #ifdef IN_RING3 75 Assert(rc != VINF_PGM_CHANGE_MODE); 76 #endif 74 77 AssertRCReturn(rc, rc); 75 78 … … 239 242 * Reload the guest's "host state". 240 243 */ 241 CPUMSvmVmExitRestoreHostState(p Ctx);244 CPUMSvmVmExitRestoreHostState(pVCpu, pCtx); 242 245 243 246 /* … … 291 294 LogFlow(("iemSvmVmrun\n")); 292 295 296 #ifdef IN_RING0 297 /* 298 * Until PGM can handle switching the guest paging mode in ring-0, 299 * there's no point in trying to emulate VMRUN in ring-0 as we have 300 * to go back to ring-3 anyway, see @bugref{7243#c48}. 301 */ 302 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; 303 #endif 304 293 305 /* 294 306 * Cache the physical address of the VMCB for #VMEXIT exceptions. … … 451 463 /* 452 464 * Continue validating guest-state and controls. 453 */ 454 /* EFER, CR0 and CR4. */ 465 * 466 * We pass CR0 as 0 to CPUMQueryValidatedGuestEfer below to skip the illegal 467 * EFER.LME bit transition check. We pass the nested-guest's EFER as both the 468 * old and new EFER value to not have any guest EFER bits influence the new 469 * nested-guest EFER. 470 */ 455 471 uint64_t uValidEfer; 456 rc = CPUMQueryValidatedGuestEfer(pVM, pVmcbNstGst->u64CR0, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer);472 rc = CPUMQueryValidatedGuestEfer(pVM, 0 /* CR0 */, pVmcbNstGst->u64EFER, pVmcbNstGst->u64EFER, &uValidEfer); 457 473 if (RT_FAILURE(rc)) 458 474 { … … 460 476 return iemSvmVmexit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 461 477 } 478 479 /* Validate paging and CPU mode bits. */ 462 480 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME); 463 481 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode); … … 546 564 pCtx->rsp = pVmcbNstGst->u64RSP; 547 565 pCtx->rip = pVmcbNstGst->u64RIP; 548 pCtx->msrEFER = uValidEfer;566 CPUMSetGuestMsrEferNoCheck(pVCpu, pCtx->msrEFER, uValidEfer); 549 567 550 568 /* Mask DR6, DR7 bits mandatory set/clear bits. */ … … 569 587 { /* likely */ } 570 588 else if (RT_SUCCESS(rcStrict)) 589 { 590 LogFlow(("iemSvmVmrun: iemSvmWorldSwitch returned %Rrc, setting passup status\n", VBOXSTRICTRC_VAL(rcStrict))); 571 591 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 592 } 572 593 else 573 594 { … … 634 655 pCtx->cs.Sel, pCtx->rip, pCtx->cr0, pCtx->cr3, pCtx->cr4, pCtx->msrEFER, pCtx->rflags.u64)); 635 656 657 LogFlow(("iemSvmVmrun: returns %d\n", VBOXSTRICTRC_VAL(rcStrict))); 636 658 return rcStrict; 637 659 }
Note:
See TracChangeset
for help on using the changeset viewer.