Changeset 92316 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Nov 10, 2021 12:58:27 PM (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r92220 r92316 68 68 /** MSRs. */ 69 69 SUPHWVIRTMSRS g_HmMsrs; 70 /** VMX: Set if swapping EFER is supported. */ 71 static bool g_fHmVmxSupportsVmcsEfer = false; 70 72 71 73 … … 409 411 } 410 412 411 412 static int nemR3DarwinCopyStateToHv(PVMCC pVM, PVMCPUCC pVCpu) 413 { 414 #define WRITE_GREG(a_GReg, a_Value) \ 415 do \ 416 { \ 417 hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \ 418 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 419 { /* likely */ } \ 420 else \ 421 return VERR_INTERNAL_ERROR; \ 422 } while(0) 423 #define WRITE_VMCS_FIELD(a_Field, a_Value) \ 424 do \ 425 { \ 426 hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \ 427 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 428 { /* likely */ } \ 429 else \ 430 return VERR_INTERNAL_ERROR; \ 431 } while(0) 432 433 RT_NOREF(pVM); 434 435 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK); 436 if (!fWhat) 413 #if 0 /* unused */ 414 DECLINLINE(int) nemR3DarwinMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Val) 415 { 416 hv_return_t hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, idMsr, pu64Val); 417 if (RT_LIKELY(hrc == HV_SUCCESS)) 437 418 return VINF_SUCCESS; 438 419 439 hv_return_t hrc; 440 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK) 441 { 442 if (fWhat & CPUMCTX_EXTRN_RAX) 443 WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax); 444 if (fWhat & CPUMCTX_EXTRN_RCX) 445 WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx); 446 if (fWhat & CPUMCTX_EXTRN_RDX) 447 WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx); 448 if (fWhat & CPUMCTX_EXTRN_RBX) 449 WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx); 450 if (fWhat & CPUMCTX_EXTRN_RSP) 451 WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp); 452 if (fWhat & CPUMCTX_EXTRN_RBP) 453 WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp); 454 if (fWhat & CPUMCTX_EXTRN_RSI) 455 WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi); 456 if (fWhat & CPUMCTX_EXTRN_RDI) 457 WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi); 458 if (fWhat & CPUMCTX_EXTRN_R8_R15) 459 { 460 WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8); 461 WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9); 462 WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10); 463 WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11); 464 WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12); 465 WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13); 466 WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14); 467 WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15); 468 } 469 } 470 471 /* RIP & Flags */ 472 if (fWhat & CPUMCTX_EXTRN_RIP) 473 WRITE_GREG(HV_X86_RIP, pVCpu->cpum.GstCtx.rip); 474 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 475 WRITE_GREG(HV_X86_RFLAGS, pVCpu->cpum.GstCtx.rflags.u); 476 477 /* Segments */ 478 #define ADD_SEG(a_enmName, a_SReg) \ 479 do { \ 480 WRITE_VMCS_FIELD(VMX_VMCS16_GUEST_ ## a_enmName ## _SEL, (a_SReg).Sel); \ 481 WRITE_VMCS_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _LIMIT, (a_SReg).u32Limit); \ 482 WRITE_VMCS_FIELD(VMX_VMCS32_GUEST_ ## a_enmName ## _ACCESS_RIGHTS, (a_SReg).Attr.u); \ 483 WRITE_VMCS_FIELD(VMX_VMCS_GUEST_ ## a_enmName ## _BASE, (a_SReg).u64Base); \ 484 } while (0) 485 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 486 { 487 if (fWhat & CPUMCTX_EXTRN_ES) 488 ADD_SEG(ES, pVCpu->cpum.GstCtx.es); 489 if (fWhat & CPUMCTX_EXTRN_CS) 490 ADD_SEG(CS, pVCpu->cpum.GstCtx.cs); 491 if (fWhat & CPUMCTX_EXTRN_SS) 492 ADD_SEG(SS, pVCpu->cpum.GstCtx.ss); 493 if (fWhat & CPUMCTX_EXTRN_DS) 494 ADD_SEG(DS, pVCpu->cpum.GstCtx.ds); 495 if (fWhat & CPUMCTX_EXTRN_FS) 496 ADD_SEG(FS, pVCpu->cpum.GstCtx.fs); 497 if (fWhat & CPUMCTX_EXTRN_GS) 498 ADD_SEG(GS, pVCpu->cpum.GstCtx.gs); 499 } 500 501 /* Descriptor tables & task segment. */ 502 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 503 { 504 if (fWhat & CPUMCTX_EXTRN_LDTR) 505 ADD_SEG(LDTR, pVCpu->cpum.GstCtx.ldtr); 506 if (fWhat & CPUMCTX_EXTRN_TR) 507 ADD_SEG(TR, pVCpu->cpum.GstCtx.tr); 508 if (fWhat & CPUMCTX_EXTRN_IDTR) 509 { 510 WRITE_VMCS_FIELD(VMCS_GUEST_IDTR_LIMIT, pVCpu->cpum.GstCtx.idtr.cbIdt); 511 WRITE_VMCS_FIELD(VMCS_GUEST_IDTR_BASE, pVCpu->cpum.GstCtx.idtr.pIdt); 512 } 513 if (fWhat & CPUMCTX_EXTRN_GDTR) 514 { 515 WRITE_VMCS_FIELD(VMCS_GUEST_GDTR_LIMIT, pVCpu->cpum.GstCtx.gdtr.cbGdt); 516 WRITE_VMCS_FIELD(VMCS_GUEST_GDTR_BASE, pVCpu->cpum.GstCtx.gdtr.pGdt); 517 } 518 } 519 520 /* Control registers. */ 521 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 522 { 523 if (fWhat & CPUMCTX_EXTRN_CR0) 524 { 525 uint64_t u64GuestCr0 = pVCpu->cpum.GstCtx.cr0; 526 527 /* Apply the hardware specified CR0 fixed bits and enable caching. */ 528 u64GuestCr0 |= VMX_V_CR0_FIXED0_UX; 529 u64GuestCr0 &= ~0; 530 u64GuestCr0 &= ~(uint64_t)(X86_CR0_CD | X86_CR0_NW); 531 WRITE_GREG(HV_X86_CR0, u64GuestCr0); 532 } 533 if (fWhat & CPUMCTX_EXTRN_CR2) 534 WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2); 535 if (fWhat & CPUMCTX_EXTRN_CR3) 536 WRITE_GREG(HV_X86_CR3, pVCpu->cpum.GstCtx.cr3); 537 if (fWhat & CPUMCTX_EXTRN_CR4) 538 { 539 uint64_t u64GuestCr4 = pVCpu->cpum.GstCtx.cr4; 540 541 u64GuestCr4 |= VMX_V_CR4_FIXED0; 542 u64GuestCr4 &= ~0; 543 544 WRITE_GREG(HV_X86_CR4, u64GuestCr4); 545 } 546 } 547 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 548 WRITE_GREG(HV_X86_TPR, CPUMGetGuestCR8(pVCpu)); 549 550 /* Debug registers. */ 551 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 552 { 553 WRITE_GREG(HV_X86_DR0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu)); 554 WRITE_GREG(HV_X86_DR1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu)); 555 WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu)); 556 WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu)); 557 } 558 if (fWhat & CPUMCTX_EXTRN_DR6) 559 WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu)); 560 if (fWhat & CPUMCTX_EXTRN_DR7) 561 WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu)); 562 563 /* MSRs */ 564 // WHvX64RegisterTsc - don't touch 565 if (fWhat & CPUMCTX_EXTRN_EFER) 566 WRITE_VMCS_FIELD(VMCS_GUEST_IA32_EFER, pVCpu->cpum.GstCtx.msrEFER); 567 #if 0 568 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 569 ADD_REG64(WHvX64RegisterKernelGsBase, pVCpu->cpum.GstCtx.msrKERNELGSBASE); 570 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 571 { 572 ADD_REG64(WHvX64RegisterSysenterCs, pVCpu->cpum.GstCtx.SysEnter.cs); 573 ADD_REG64(WHvX64RegisterSysenterEip, pVCpu->cpum.GstCtx.SysEnter.eip); 574 ADD_REG64(WHvX64RegisterSysenterEsp, pVCpu->cpum.GstCtx.SysEnter.esp); 575 } 576 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 577 { 578 ADD_REG64(WHvX64RegisterStar, pVCpu->cpum.GstCtx.msrSTAR); 579 ADD_REG64(WHvX64RegisterLstar, pVCpu->cpum.GstCtx.msrLSTAR); 580 ADD_REG64(WHvX64RegisterCstar, pVCpu->cpum.GstCtx.msrCSTAR); 581 ADD_REG64(WHvX64RegisterSfmask, pVCpu->cpum.GstCtx.msrSFMASK); 582 } 583 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 584 { 585 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu)); 586 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT); 587 #if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */ 588 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu)); 420 return nemR3DarwinHvSts2Rc(hrc); 421 } 422 423 424 DECLINLINE(int) nemR3DarwinMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Val) 425 { 426 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, idMsr, u64Val); 427 if (RT_LIKELY(hrc == HV_SUCCESS)) 428 return VINF_SUCCESS; 429 430 return nemR3DarwinHvSts2Rc(hrc); 431 } 589 432 #endif 590 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);591 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);592 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);593 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);594 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);595 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);596 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);597 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);598 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);599 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);600 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);601 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);602 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);603 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);604 #if 0 /** @todo these registers aren't available? Might explain something.. .*/605 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM);606 if (enmCpuVendor != CPUMCPUVENDOR_AMD)607 {608 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);609 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));610 }611 #endif612 }613 #endif614 615 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR0_MASK, 0x60000000);616 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR0_READ_SHADOW, 0x00000000);617 618 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR4_MASK, VMX_V_CR4_FIXED0);619 WRITE_VMCS_FIELD(VMX_VMCS_CTRL_CR4_READ_SHADOW, 0);620 621 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, MSR_IA32_DEBUGCTL_LBR);622 623 #if 0 /** @todo */624 WRITE_GREG(HV_X86_TSS_BASE, );625 WRITE_GREG(HV_X86_TSS_LIMIT, );626 WRITE_GREG(HV_X86_TSS_AR, );627 WRITE_GREG(HV_X86_XCR0, );628 #endif629 630 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId);631 hv_vcpu_flush(pVCpu->nem.s.hVCpuId);632 633 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;634 return VINF_SUCCESS;635 #undef WRITE_GREG636 #undef WRITE_VMCS_FIELD637 }638 433 639 434 … … 678 473 return VERR_INTERNAL_ERROR; \ 679 474 } while(0) 475 #define READ_MSR(a_Msr, a_Value) \ 476 do \ 477 { \ 478 hrc = hv_vcpu_read_msr(pVCpu->nem.s.hVCpuId, (a_Msr), &(a_Value)); \ 479 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 480 { /* likely */ } \ 481 else \ 482 AssertFailedReturn(VERR_INTERNAL_ERROR); \ 483 } while(0) 680 484 681 485 RT_NOREF(pVM); … … 863 667 } 864 668 865 #if 0866 /* Floating point state. */867 if (fWhat & CPUMCTX_EXTRN_X87)868 {869 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);870 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);871 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);872 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);873 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);874 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);875 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);876 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[0], pVCpu->cpum.GstCtx.XState.x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);877 878 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);879 pVCpu->cpum.GstCtx.XState.x87.FCW = aValues[iReg].FpControlStatus.FpControl;880 pVCpu->cpum.GstCtx.XState.x87.FSW = aValues[iReg].FpControlStatus.FpStatus;881 pVCpu->cpum.GstCtx.XState.x87.FTW = aValues[iReg].FpControlStatus.FpTag882 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;883 pVCpu->cpum.GstCtx.XState.x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;884 pVCpu->cpum.GstCtx.XState.x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;885 pVCpu->cpum.GstCtx.XState.x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);886 pVCpu->cpum.GstCtx.XState.x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);887 iReg++;888 }889 890 669 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX)) 891 670 { 892 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus); 893 if (fWhat & CPUMCTX_EXTRN_X87) 894 { 895 pVCpu->cpum.GstCtx.XState.x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp; 896 pVCpu->cpum.GstCtx.XState.x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32); 897 pVCpu->cpum.GstCtx.XState.x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48); 898 } 899 pVCpu->cpum.GstCtx.XState.x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl; 900 pVCpu->cpum.GstCtx.XState.x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */ 901 iReg++; 902 } 903 904 /* Vector state. */ 905 if (fWhat & CPUMCTX_EXTRN_SSE_AVX) 906 { 907 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0); 908 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1); 909 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2); 910 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3); 911 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4); 912 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5); 913 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6); 914 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7); 915 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8); 916 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9); 917 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10); 918 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11); 919 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12); 920 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13); 921 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14); 922 GET_REG128(pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Lo, pVCpu->cpum.GstCtx.XState.x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15); 923 } 924 #endif 671 hrc = hv_vcpu_read_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState)); 672 if (hrc == HV_SUCCESS) 673 { /* likely */ } 674 else 675 return nemR3DarwinHvSts2Rc(hrc); 676 } 925 677 926 678 /* MSRs */ 927 // WHvX64RegisterTsc - don't touch928 679 if (fWhat & CPUMCTX_EXTRN_EFER) 929 680 { … … 940 691 } 941 692 } 693 694 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 695 READ_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE); 696 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 697 { 698 uint64_t u64Tmp; 699 READ_MSR(MSR_IA32_SYSENTER_EIP, u64Tmp); 700 pVCpu->cpum.GstCtx.SysEnter.eip = u64Tmp; 701 READ_MSR(MSR_IA32_SYSENTER_ESP, u64Tmp); 702 pVCpu->cpum.GstCtx.SysEnter.esp = u64Tmp; 703 READ_MSR(MSR_IA32_SYSENTER_CS, u64Tmp); 704 pVCpu->cpum.GstCtx.SysEnter.cs = u64Tmp; 705 } 706 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 707 { 708 READ_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR); 709 READ_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR); 710 READ_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR); 711 READ_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK); 712 } 942 713 #if 0 943 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)944 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");945 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)946 {947 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");948 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");949 GET_REG64_LOG7(pVCpu->cpum.GstCtx.SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");950 }951 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)952 {953 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSTAR, WHvX64RegisterStar, "MSR STAR");954 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");955 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");956 GET_REG64_LOG7(pVCpu->cpum.GstCtx.msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");957 }958 714 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 959 715 { … … 989 745 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */ 990 746 } 991 992 /* Interruptibility. */993 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))994 {995 Assert(aenmNames[iReg] == WHvRegisterInterruptState);996 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);997 998 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))999 {1000 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;1001 if (aValues[iReg].InterruptState.InterruptShadow)1002 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);1003 else1004 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);1005 }1006 1007 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))1008 {1009 if (aValues[iReg].InterruptState.NmiMasked)1010 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);1011 else1012 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);1013 }1014 1015 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;1016 iReg += 2;1017 }1018 747 #endif 1019 748 1020 749 /* Almost done, just update extrn flags and maybe change PGM mode. */ 1021 750 pVCpu->cpum.GstCtx.fExtrn &= ~fWhat; 1022 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))751 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)) 1023 752 pVCpu->cpum.GstCtx.fExtrn = 0; 1024 753 … … 1050 779 #undef READ_VMCS32_FIELD 1051 780 #undef READ_SEG 1052 } 1053 1054 1055 /** 1056 * Wrapper around nemR3DarwinCopyStateFromHv. 1057 * 1058 * Unlike the wrapped APIs, this checks whether it's necessary. 1059 * 1060 * @returns VBox strict status code. 1061 * @param pVCpu The cross context per CPU structure. 1062 * @param fWhat What to import. 1063 */ 1064 DECLINLINE(VBOXSTRICTRC) nemR3DarwinImportStateIfNeededStrict(PVMCPUCC pVCpu, uint64_t fWhat) 1065 { 1066 if (pVCpu->cpum.GstCtx.fExtrn & fWhat) 1067 { 1068 int rc = nemR3DarwinCopyStateFromHv(pVCpu->pVMR3, pVCpu, fWhat); 1069 AssertRCReturn(rc, rc); 1070 } 1071 return VINF_SUCCESS; 781 #undef READ_MSR 1072 782 } 1073 783 … … 1190 900 1191 901 902 DECL_FORCE_INLINE(bool) vmxHCShouldSwapEferMsr(PCVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient) 903 { 904 RT_NOREF(pVCpu, pVmxTransient); 905 return true; 906 } 907 908 909 DECL_FORCE_INLINE(bool) nemR3DarwinIsUnrestrictedGuest(PCVMCC pVM) 910 { 911 RT_NOREF(pVM); 912 return true; 913 } 914 915 916 DECL_FORCE_INLINE(bool) nemR3DarwinIsNestedPaging(PCVMCC pVM) 917 { 918 RT_NOREF(pVM); 919 return true; 920 } 921 922 923 DECL_FORCE_INLINE(bool) nemR3DarwinIsPreemptTimerUsed(PCVMCC pVM) 924 { 925 RT_NOREF(pVM); 926 return false; 927 } 928 929 930 DECL_FORCE_INLINE(bool) nemR3DarwinIsVmxLbr(PCVMCC pVM) 931 { 932 RT_NOREF(pVM); 933 return false; 934 } 935 936 1192 937 /* 1193 938 * Instantiate the code we share with ring-0. 1194 939 */ 1195 #define HMVMX_ALWAYS_TRAP_ALL_XCPTS 1196 #define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s 940 //#define HMVMX_ALWAYS_TRAP_ALL_XCPTS 941 #define HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 942 #define VCPU_2_VMXSTATE(a_pVCpu) (a_pVCpu)->nem.s 943 #define VM_IS_VMX_UNRESTRICTED_GUEST(a_pVM) nemR3DarwinIsUnrestrictedGuest((a_pVM)) 944 #define VM_IS_VMX_NESTED_PAGING(a_pVM) nemR3DarwinIsNestedPaging((a_pVM)) 945 #define VM_IS_VMX_PREEMPT_TIMER_USED(a_pVM) nemR3DarwinIsPreemptTimerUsed((a_pVM)) 946 #define VM_IS_VMX_LBR(a_pVM) nemR3DarwinIsVmxLbr((a_pVM)) 1197 947 1198 948 #define VMX_VMCS_WRITE_16(a_pVCpu, a_FieldEnc, a_Val) nemR3DarwinWriteVmcs16((a_pVCpu), (a_FieldEnc), (a_Val)) … … 1218 968 #undef VMX_VMCS_READ_NW 1219 969 970 #undef VM_IS_VMX_PREEMPT_TIMER_USED 971 #undef VM_IS_VMX_NESTED_PAGING 972 #undef VM_IS_VMX_UNRESTRICTED_GUEST 973 #undef VCPU_2_VMXSTATE 974 975 976 /** 977 * Exports the guest GP registers to HV for execution. 978 * 979 * @returns VBox status code. 980 * @param pVCpu The cross context virtual CPU structure of the 981 * calling EMT. 982 */ 983 static int nemR3DarwinExportGuestGprs(PVMCPUCC pVCpu) 984 { 985 #define WRITE_GREG(a_GReg, a_Value) \ 986 do \ 987 { \ 988 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \ 989 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 990 { /* likely */ } \ 991 else \ 992 return VERR_INTERNAL_ERROR; \ 993 } while(0) 994 995 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->nem.s.fCtxChanged); 996 if (fCtxChanged & HM_CHANGED_GUEST_GPRS_MASK) 997 { 998 if (fCtxChanged & HM_CHANGED_GUEST_RAX) 999 WRITE_GREG(HV_X86_RAX, pVCpu->cpum.GstCtx.rax); 1000 if (fCtxChanged & HM_CHANGED_GUEST_RCX) 1001 WRITE_GREG(HV_X86_RCX, pVCpu->cpum.GstCtx.rcx); 1002 if (fCtxChanged & HM_CHANGED_GUEST_RDX) 1003 WRITE_GREG(HV_X86_RDX, pVCpu->cpum.GstCtx.rdx); 1004 if (fCtxChanged & HM_CHANGED_GUEST_RBX) 1005 WRITE_GREG(HV_X86_RBX, pVCpu->cpum.GstCtx.rbx); 1006 if (fCtxChanged & HM_CHANGED_GUEST_RSP) 1007 WRITE_GREG(HV_X86_RSP, pVCpu->cpum.GstCtx.rsp); 1008 if (fCtxChanged & HM_CHANGED_GUEST_RBP) 1009 WRITE_GREG(HV_X86_RBP, pVCpu->cpum.GstCtx.rbp); 1010 if (fCtxChanged & HM_CHANGED_GUEST_RSI) 1011 WRITE_GREG(HV_X86_RSI, pVCpu->cpum.GstCtx.rsi); 1012 if (fCtxChanged & HM_CHANGED_GUEST_RDI) 1013 WRITE_GREG(HV_X86_RDI, pVCpu->cpum.GstCtx.rdi); 1014 if (fCtxChanged & HM_CHANGED_GUEST_R8_R15) 1015 { 1016 WRITE_GREG(HV_X86_R8, pVCpu->cpum.GstCtx.r8); 1017 WRITE_GREG(HV_X86_R9, pVCpu->cpum.GstCtx.r9); 1018 WRITE_GREG(HV_X86_R10, pVCpu->cpum.GstCtx.r10); 1019 WRITE_GREG(HV_X86_R11, pVCpu->cpum.GstCtx.r11); 1020 WRITE_GREG(HV_X86_R12, pVCpu->cpum.GstCtx.r12); 1021 WRITE_GREG(HV_X86_R13, pVCpu->cpum.GstCtx.r13); 1022 WRITE_GREG(HV_X86_R14, pVCpu->cpum.GstCtx.r14); 1023 WRITE_GREG(HV_X86_R15, pVCpu->cpum.GstCtx.r15); 1024 } 1025 1026 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_GPRS_MASK); 1027 } 1028 1029 if (fCtxChanged & HM_CHANGED_GUEST_CR2) 1030 { 1031 WRITE_GREG(HV_X86_CR2, pVCpu->cpum.GstCtx.cr2); 1032 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~HM_CHANGED_GUEST_CR2); 1033 } 1034 1035 return VINF_SUCCESS; 1036 #undef WRITE_GREG 1037 } 1038 1039 1040 /** 1041 * Converts the given CPUM externalized bitmask to the appropriate HM changed bitmask. 1042 * 1043 * @returns Bitmask of HM changed flags. 1044 * @param fCpumExtrn The CPUM extern bitmask. 1045 */ 1046 static uint64_t nemR3DarwinCpumExtrnToHmChanged(uint64_t fCpumExtrn) 1047 { 1048 uint64_t fHmChanged = 0; 1049 1050 /* Invert to gt a mask of things which are kept in CPUM. */ 1051 uint64_t fCpumIntern = ~fCpumExtrn; 1052 1053 if (fCpumIntern & CPUMCTX_EXTRN_GPRS_MASK) 1054 { 1055 if (fCpumIntern & CPUMCTX_EXTRN_RAX) 1056 fHmChanged |= HM_CHANGED_GUEST_RAX; 1057 if (fCpumIntern & CPUMCTX_EXTRN_RCX) 1058 fHmChanged |= HM_CHANGED_GUEST_RCX; 1059 if (fCpumIntern & CPUMCTX_EXTRN_RDX) 1060 fHmChanged |= HM_CHANGED_GUEST_RDX; 1061 if (fCpumIntern & CPUMCTX_EXTRN_RBX) 1062 fHmChanged |= HM_CHANGED_GUEST_RBX; 1063 if (fCpumIntern & CPUMCTX_EXTRN_RSP) 1064 fHmChanged |= HM_CHANGED_GUEST_RSP; 1065 if (fCpumIntern & CPUMCTX_EXTRN_RBP) 1066 fHmChanged |= HM_CHANGED_GUEST_RBP; 1067 if (fCpumIntern & CPUMCTX_EXTRN_RSI) 1068 fHmChanged |= HM_CHANGED_GUEST_RSI; 1069 if (fCpumIntern & CPUMCTX_EXTRN_RDI) 1070 fHmChanged |= HM_CHANGED_GUEST_RDI; 1071 if (fCpumIntern & CPUMCTX_EXTRN_R8_R15) 1072 fHmChanged |= HM_CHANGED_GUEST_R8_R15; 1073 } 1074 1075 /* RIP & Flags */ 1076 if (fCpumIntern & CPUMCTX_EXTRN_RIP) 1077 fHmChanged |= HM_CHANGED_GUEST_RIP; 1078 if (fCpumIntern & CPUMCTX_EXTRN_RFLAGS) 1079 fHmChanged |= HM_CHANGED_GUEST_RFLAGS; 1080 1081 /* Segments */ 1082 if (fCpumIntern & CPUMCTX_EXTRN_SREG_MASK) 1083 { 1084 if (fCpumIntern & CPUMCTX_EXTRN_ES) 1085 fHmChanged |= HM_CHANGED_GUEST_ES; 1086 if (fCpumIntern & CPUMCTX_EXTRN_CS) 1087 fHmChanged |= HM_CHANGED_GUEST_CS; 1088 if (fCpumIntern & CPUMCTX_EXTRN_SS) 1089 fHmChanged |= HM_CHANGED_GUEST_SS; 1090 if (fCpumIntern & CPUMCTX_EXTRN_DS) 1091 fHmChanged |= HM_CHANGED_GUEST_DS; 1092 if (fCpumIntern & CPUMCTX_EXTRN_FS) 1093 fHmChanged |= HM_CHANGED_GUEST_FS; 1094 if (fCpumIntern & CPUMCTX_EXTRN_GS) 1095 fHmChanged |= HM_CHANGED_GUEST_GS; 1096 } 1097 1098 /* Descriptor tables & task segment. */ 1099 if (fCpumIntern & CPUMCTX_EXTRN_TABLE_MASK) 1100 { 1101 if (fCpumIntern & CPUMCTX_EXTRN_LDTR) 1102 fHmChanged |= HM_CHANGED_GUEST_LDTR; 1103 if (fCpumIntern & CPUMCTX_EXTRN_TR) 1104 fHmChanged |= HM_CHANGED_GUEST_TR; 1105 if (fCpumIntern & CPUMCTX_EXTRN_IDTR) 1106 fHmChanged |= HM_CHANGED_GUEST_IDTR; 1107 if (fCpumIntern & CPUMCTX_EXTRN_GDTR) 1108 fHmChanged |= HM_CHANGED_GUEST_GDTR; 1109 } 1110 1111 /* Control registers. */ 1112 if (fCpumIntern & CPUMCTX_EXTRN_CR_MASK) 1113 { 1114 if (fCpumIntern & CPUMCTX_EXTRN_CR0) 1115 fHmChanged |= HM_CHANGED_GUEST_CR0; 1116 if (fCpumIntern & CPUMCTX_EXTRN_CR2) 1117 fHmChanged |= HM_CHANGED_GUEST_CR2; 1118 if (fCpumIntern & CPUMCTX_EXTRN_CR3) 1119 fHmChanged |= HM_CHANGED_GUEST_CR3; 1120 if (fCpumIntern & CPUMCTX_EXTRN_CR4) 1121 fHmChanged |= HM_CHANGED_GUEST_CR4; 1122 } 1123 if (fCpumIntern & CPUMCTX_EXTRN_APIC_TPR) 1124 fHmChanged |= HM_CHANGED_GUEST_APIC_TPR; 1125 1126 /* Debug registers. */ 1127 if (fCpumIntern & CPUMCTX_EXTRN_DR0_DR3) 1128 fHmChanged |= HM_CHANGED_GUEST_DR0_DR3; 1129 if (fCpumIntern & CPUMCTX_EXTRN_DR6) 1130 fHmChanged |= HM_CHANGED_GUEST_DR6; 1131 if (fCpumIntern & CPUMCTX_EXTRN_DR7) 1132 fHmChanged |= HM_CHANGED_GUEST_DR7; 1133 1134 /* Floating point state. */ 1135 if (fCpumIntern & CPUMCTX_EXTRN_X87) 1136 fHmChanged |= HM_CHANGED_GUEST_X87; 1137 if (fCpumIntern & CPUMCTX_EXTRN_SSE_AVX) 1138 fHmChanged |= HM_CHANGED_GUEST_SSE_AVX; 1139 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_XSAVE) 1140 fHmChanged |= HM_CHANGED_GUEST_OTHER_XSAVE; 1141 if (fCpumIntern & CPUMCTX_EXTRN_XCRx) 1142 fHmChanged |= HM_CHANGED_GUEST_XCRx; 1143 1144 /* MSRs */ 1145 if (fCpumIntern & CPUMCTX_EXTRN_EFER) 1146 fHmChanged |= HM_CHANGED_GUEST_EFER_MSR; 1147 if (fCpumIntern & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1148 fHmChanged |= HM_CHANGED_GUEST_KERNEL_GS_BASE; 1149 if (fCpumIntern & CPUMCTX_EXTRN_SYSENTER_MSRS) 1150 fHmChanged |= HM_CHANGED_GUEST_SYSENTER_MSR_MASK; 1151 if (fCpumIntern & CPUMCTX_EXTRN_SYSCALL_MSRS) 1152 fHmChanged |= HM_CHANGED_GUEST_SYSCALL_MSRS; 1153 if (fCpumIntern & CPUMCTX_EXTRN_TSC_AUX) 1154 fHmChanged |= HM_CHANGED_GUEST_TSC_AUX; 1155 if (fCpumIntern & CPUMCTX_EXTRN_OTHER_MSRS) 1156 fHmChanged |= HM_CHANGED_GUEST_OTHER_MSRS; 1157 1158 return fHmChanged; 1159 } 1160 1161 1162 /** 1163 * Exports the guest state to HV for execution. 1164 * 1165 * @returns VBox status code. 1166 * @param pVM The cross context VM structure. 1167 * @param pVCpu The cross context virtual CPU structure of the 1168 * calling EMT. 1169 * @param pVmxTransient The transient VMX structure. 1170 */ 1171 static int nemR3DarwinExportGuestState(PVMCC pVM, PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient) 1172 { 1173 #define WRITE_GREG(a_GReg, a_Value) \ 1174 do \ 1175 { \ 1176 hv_return_t hrc = hv_vcpu_write_register(pVCpu->nem.s.hVCpuId, (a_GReg), (a_Value)); \ 1177 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 1178 { /* likely */ } \ 1179 else \ 1180 return VERR_INTERNAL_ERROR; \ 1181 } while(0) 1182 #define WRITE_VMCS_FIELD(a_Field, a_Value) \ 1183 do \ 1184 { \ 1185 hv_return_t hrc = hv_vmx_vcpu_write_vmcs(pVCpu->nem.s.hVCpuId, (a_Field), (a_Value)); \ 1186 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 1187 { /* likely */ } \ 1188 else \ 1189 return VERR_INTERNAL_ERROR; \ 1190 } while(0) 1191 #define WRITE_MSR(a_Msr, a_Value) \ 1192 do \ 1193 { \ 1194 hv_return_t hrc = hv_vcpu_write_msr(pVCpu->nem.s.hVCpuId, (a_Msr), (a_Value)); \ 1195 if (RT_LIKELY(hrc == HV_SUCCESS)) \ 1196 { /* likely */ } \ 1197 else \ 1198 AssertFailedReturn(VERR_INTERNAL_ERROR); \ 1199 } while(0) 1200 1201 RT_NOREF(pVM); 1202 1203 uint64_t const fWhat = ~pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL; 1204 if (!fWhat) 1205 return VINF_SUCCESS; 1206 1207 pVCpu->nem.s.fCtxChanged |= nemR3DarwinCpumExtrnToHmChanged(pVCpu->cpum.GstCtx.fExtrn); 1208 1209 int rc = vmxHCExportGuestEntryExitCtls(pVCpu, pVmxTransient); 1210 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 1211 1212 rc = nemR3DarwinExportGuestGprs(pVCpu); 1213 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 1214 1215 rc = vmxHCExportGuestCR0(pVCpu, pVmxTransient); 1216 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 1217 1218 VBOXSTRICTRC rcStrict = vmxHCExportGuestCR3AndCR4(pVCpu, pVmxTransient); 1219 if (rcStrict == VINF_SUCCESS) 1220 { /* likely */ } 1221 else 1222 { 1223 Assert(rcStrict == VINF_EM_RESCHEDULE_REM || RT_FAILURE_NP(rcStrict)); 1224 return VBOXSTRICTRC_VAL(rcStrict); 1225 } 1226 1227 vmxHCExportGuestXcptIntercepts(pVCpu, pVmxTransient); 1228 vmxHCExportGuestRip(pVCpu); 1229 //vmxHCExportGuestRsp(pVCpu); 1230 vmxHCExportGuestRflags(pVCpu, pVmxTransient); 1231 1232 rc = vmxHCExportGuestSegRegsXdtr(pVCpu, pVmxTransient); 1233 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 1234 1235 if (fWhat & CPUMCTX_EXTRN_APIC_TPR) 1236 WRITE_GREG(HV_X86_TPR, CPUMGetGuestCR8(pVCpu)); 1237 1238 /* Debug registers. */ 1239 if (fWhat & CPUMCTX_EXTRN_DR0_DR3) 1240 { 1241 WRITE_GREG(HV_X86_DR0, pVCpu->cpum.GstCtx.dr[0]); // CPUMGetHyperDR0(pVCpu)); 1242 WRITE_GREG(HV_X86_DR1, pVCpu->cpum.GstCtx.dr[1]); // CPUMGetHyperDR1(pVCpu)); 1243 WRITE_GREG(HV_X86_DR2, pVCpu->cpum.GstCtx.dr[2]); // CPUMGetHyperDR2(pVCpu)); 1244 WRITE_GREG(HV_X86_DR3, pVCpu->cpum.GstCtx.dr[3]); // CPUMGetHyperDR3(pVCpu)); 1245 } 1246 if (fWhat & CPUMCTX_EXTRN_DR6) 1247 WRITE_GREG(HV_X86_DR6, pVCpu->cpum.GstCtx.dr[6]); // CPUMGetHyperDR6(pVCpu)); 1248 if (fWhat & CPUMCTX_EXTRN_DR7) 1249 WRITE_GREG(HV_X86_DR7, pVCpu->cpum.GstCtx.dr[7]); // CPUMGetHyperDR7(pVCpu)); 1250 1251 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX)) 1252 { 1253 hv_return_t hrc = hv_vcpu_write_fpstate(pVCpu->nem.s.hVCpuId, &pVCpu->cpum.GstCtx.XState, sizeof(pVCpu->cpum.GstCtx.XState)); 1254 if (hrc == HV_SUCCESS) 1255 { /* likely */ } 1256 else 1257 return nemR3DarwinHvSts2Rc(hrc); 1258 } 1259 1260 /* MSRs */ 1261 if (fWhat & CPUMCTX_EXTRN_EFER) 1262 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_EFER_FULL, pVCpu->cpum.GstCtx.msrEFER); 1263 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 1264 WRITE_MSR(MSR_K8_KERNEL_GS_BASE, pVCpu->cpum.GstCtx.msrKERNELGSBASE); 1265 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 1266 { 1267 WRITE_MSR(MSR_IA32_SYSENTER_CS, pVCpu->cpum.GstCtx.SysEnter.cs); 1268 WRITE_MSR(MSR_IA32_SYSENTER_EIP, pVCpu->cpum.GstCtx.SysEnter.eip); 1269 WRITE_MSR(MSR_IA32_SYSENTER_ESP, pVCpu->cpum.GstCtx.SysEnter.esp); 1270 } 1271 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 1272 { 1273 WRITE_MSR(MSR_K6_STAR, pVCpu->cpum.GstCtx.msrSTAR); 1274 WRITE_MSR(MSR_K8_LSTAR, pVCpu->cpum.GstCtx.msrLSTAR); 1275 WRITE_MSR(MSR_K8_CSTAR, pVCpu->cpum.GstCtx.msrCSTAR); 1276 WRITE_MSR(MSR_K8_SF_MASK, pVCpu->cpum.GstCtx.msrSFMASK); 1277 } 1278 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS) 1279 { 1280 #if 0 1281 hv_return_t hrc = hv_vmx_vcpu_set_apic_address(pVCpu->nem.s.hVCpuId, APICGetBaseMsrNoCheck(pVCpu)); 1282 if (RT_UNLIKELY(hrc != HV_SUCCESS)) 1283 return nemR3DarwinHvSts2Rc(hrc); 1284 #endif 1285 1286 #if 0 1287 ADD_REG64(WHvX64RegisterPat, pVCpu->cpum.GstCtx.msrPAT); 1288 #if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */ 1289 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu)); 1290 #endif 1291 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu); 1292 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType); 1293 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000); 1294 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000); 1295 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000); 1296 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000); 1297 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000); 1298 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000); 1299 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000); 1300 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000); 1301 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000); 1302 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000); 1303 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000); 1304 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux); 1305 #if 0 /** @todo these registers aren't available? Might explain something.. .*/ 1306 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pVM); 1307 if (enmCpuVendor != CPUMCPUVENDOR_AMD) 1308 { 1309 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable); 1310 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu)); 1311 } 1312 #endif 1313 #endif 1314 } 1315 1316 WRITE_VMCS_FIELD(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0 /*MSR_IA32_DEBUGCTL_LBR*/); 1317 1318 #if 0 /** @todo */ 1319 WRITE_GREG(HV_X86_TSS_BASE, ); 1320 WRITE_GREG(HV_X86_TSS_LIMIT, ); 1321 WRITE_GREG(HV_X86_TSS_AR, ); 1322 WRITE_GREG(HV_X86_XCR0, ); 1323 #endif 1324 1325 hv_vcpu_invalidate_tlb(pVCpu->nem.s.hVCpuId); 1326 hv_vcpu_flush(pVCpu->nem.s.hVCpuId); 1327 1328 pVCpu->cpum.GstCtx.fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_NEM; 1329 1330 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 1331 ASMAtomicUoAndU64(&pVCpu->nem.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) 1332 | HM_CHANGED_GUEST_CR2 1333 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7) 1334 | HM_CHANGED_GUEST_X87 1335 | HM_CHANGED_GUEST_SSE_AVX 1336 | HM_CHANGED_GUEST_OTHER_XSAVE 1337 | HM_CHANGED_GUEST_XCRx 1338 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */ 1339 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */ 1340 | HM_CHANGED_GUEST_TSC_AUX 1341 | HM_CHANGED_GUEST_OTHER_MSRS 1342 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 1343 1344 return VINF_SUCCESS; 1345 #undef WRITE_GREG 1346 #undef WRITE_VMCS_FIELD 1347 } 1348 1220 1349 1221 1350 /** … … 1226 1355 * @param pVCpu The cross context virtual CPU structure of the 1227 1356 * calling EMT. 1228 */ 1229 static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu) 1230 { 1231 VMXTRANSIENT VmxTransient; 1232 RT_ZERO(VmxTransient); 1233 1357 * @param pVmxTransient The transient VMX structure. 1358 */ 1359 static VBOXSTRICTRC nemR3DarwinHandleExit(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 1360 { 1234 1361 uint32_t uExitReason; 1235 1362 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason); 1236 1363 AssertRC(rc); 1237 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo; 1238 VmxTransient.uExitReason = VMX_EXIT_REASON_BASIC(uExitReason); 1239 VmxTransient.fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason); 1240 1241 if (RT_UNLIKELY(VmxTransient.fVMEntryFailed)) 1364 pVmxTransient->fVmcsFieldsRead = 0; 1365 pVmxTransient->fIsNestedGuest = false; 1366 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason); 1367 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason); 1368 1369 if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed)) 1242 1370 AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n", 1243 pVCpu->idCpu, VmxTransient.uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)),1371 pVCpu->idCpu, pVmxTransient->uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)), 1244 1372 VERR_NEM_IPE_0); 1245 1373 … … 1251 1379 1252 1380 #ifndef HMVMX_USE_FUNCTION_TABLE 1253 return vmxHCHandleExit(pVCpu, &VmxTransient);1381 return vmxHCHandleExit(pVCpu, pVmxTransient); 1254 1382 #else 1255 return g_aVMExitHandlers[ VmxTransient.uExitReason].pfn(pVCpu, &VmxTransient);1383 return g_aVMExitHandlers[pVmxTransient->uExitReason].pfn(pVCpu, pVmxTransient); 1256 1384 #endif 1257 1385 } … … 1302 1430 hrc = hv_vmx_read_capability(HV_VMX_CAP_TRUE_EXIT, &g_HmMsrs.u.vmx.TrueExitCtls.u); 1303 1431 } 1432 #else /** @todo Not available with the current SDK used (available with 11.0+) but required for setting the CRx values properly. */ 1433 g_HmMsrs.u.vmx.u64Cr0Fixed0 = 0x80000021; 1434 g_HmMsrs.u.vmx.u64Cr0Fixed1 = 0xffffffff; 1435 g_HmMsrs.u.vmx.u64Cr4Fixed0 = 0x2000; 1436 g_HmMsrs.u.vmx.u64Cr4Fixed1 = 0x1767ff; 1304 1437 #endif 1305 1438 … … 1317 1450 } 1318 1451 1452 if (hrc == HV_SUCCESS) 1453 { 1454 /* 1455 * Check for EFER swapping support. 1456 */ 1457 g_fHmVmxSupportsVmcsEfer = true; //(g_HmMsrs.u.vmx.EntryCtls.n.allowed1 & VMX_ENTRY_CTLS_LOAD_EFER_MSR) 1458 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_LOAD_EFER_MSR) 1459 //&& (g_HmMsrs.u.vmx.ExitCtls.n.allowed1 & VMX_EXIT_CTLS_SAVE_EFER_MSR); 1460 } 1461 1319 1462 return nemR3DarwinHvSts2Rc(hrc); 1320 1463 } … … 1419 1562 hmR0VmxSetupVmcsApicAccessAddr(pVCpu); 1420 1563 } 1564 #endif 1421 1565 1422 1566 /* Enable the RDTSCP instruction if we expose it to the guest and is supported … … 1426 1570 fVal |= VMX_PROC_CTLS2_RDTSCP; 1427 1571 1572 #if 0 1428 1573 /* Enable Pause-Loop exiting. */ 1429 1574 if ( (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_PAUSE_LOOP_EXIT) … … 1456 1601 1457 1602 /** 1458 * Sets up processor-based VM-execution controls in the VMCS. 1603 * Enables native access for the given MSR. 1604 * 1605 * @returns VBox status code. 1606 * @param pVCpu The cross context virtual CPU structure. 1607 * @param idMsr The MSR to enable native access for. 1608 */ 1609 static int nemR3DarwinMsrSetNative(PVMCPUCC pVCpu, uint32_t idMsr) 1610 { 1611 hv_return_t hrc = hv_vcpu_enable_native_msr(pVCpu->nem.s.hVCpuId, idMsr, true /*enable*/); 1612 if (hrc == HV_SUCCESS) 1613 return VINF_SUCCESS; 1614 1615 return nemR3DarwinHvSts2Rc(hrc); 1616 } 1617 1618 1619 /** 1620 * Sets up the MSR permissions which don't change through the lifetime of the VM. 1459 1621 * 1460 1622 * @returns VBox status code. … … 1462 1624 * @param pVmcsInfo The VMCS info. object. 1463 1625 */ 1626 static int nemR3DarwinSetupVmcsMsrPermissions(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 1627 { 1628 RT_NOREF(pVmcsInfo); 1629 1630 /* 1631 * The guest can access the following MSRs (read, write) without causing 1632 * VM-exits; they are loaded/stored automatically using fields in the VMCS. 1633 */ 1634 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1635 int rc; 1636 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_CS); AssertRCReturn(rc, rc); 1637 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_ESP); AssertRCReturn(rc, rc); 1638 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SYSENTER_EIP); AssertRCReturn(rc, rc); 1639 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_GS_BASE); AssertRCReturn(rc, rc); 1640 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_FS_BASE); AssertRCReturn(rc, rc); 1641 1642 /* 1643 * The IA32_PRED_CMD and IA32_FLUSH_CMD MSRs are write-only and has no state 1644 * associated with then. We never need to intercept access (writes need to be 1645 * executed without causing a VM-exit, reads will #GP fault anyway). 1646 * 1647 * The IA32_SPEC_CTRL MSR is read/write and has state. We allow the guest to 1648 * read/write them. We swap the guest/host MSR value using the 1649 * auto-load/store MSR area. 1650 */ 1651 if (pVM->cpum.ro.GuestFeatures.fIbpb) 1652 { 1653 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_PRED_CMD); 1654 AssertRCReturn(rc, rc); 1655 } 1656 #if 0 /* Doesn't work. */ 1657 if (pVM->cpum.ro.GuestFeatures.fFlushCmd) 1658 { 1659 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_FLUSH_CMD); 1660 AssertRCReturn(rc, rc); 1661 } 1662 #endif 1663 if (pVM->cpum.ro.GuestFeatures.fIbrs) 1664 { 1665 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_IA32_SPEC_CTRL); 1666 AssertRCReturn(rc, rc); 1667 } 1668 1669 /* 1670 * Allow full read/write access for the following MSRs (mandatory for VT-x) 1671 * required for 64-bit guests. 1672 */ 1673 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_LSTAR); AssertRCReturn(rc, rc); 1674 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K6_STAR); AssertRCReturn(rc, rc); 1675 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_SF_MASK); AssertRCReturn(rc, rc); 1676 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_KERNEL_GS_BASE); AssertRCReturn(rc, rc); 1677 1678 /* Required for enabling the RDTSCP instruction. */ 1679 rc = nemR3DarwinMsrSetNative(pVCpu, MSR_K8_TSC_AUX); AssertRCReturn(rc, rc); 1680 1681 return VINF_SUCCESS; 1682 } 1683 1684 1685 /** 1686 * Sets up processor-based VM-execution controls in the VMCS. 1687 * 1688 * @returns VBox status code. 1689 * @param pVCpu The cross context virtual CPU structure. 1690 * @param pVmcsInfo The VMCS info. object. 1691 */ 1464 1692 static int nemR3DarwinVmxSetupVmcsProcCtls(PVMCPUCC pVCpu, PVMXVMCSINFO pVmcsInfo) 1465 1693 { 1466 PVMCC pVM = pVCpu->CTX_SUFF(pVM);1694 //PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1467 1695 uint32_t fVal = g_HmMsrs.u.vmx.ProcCtls.n.allowed0; /* Bits set here must be set in the VMCS. */ 1468 1696 uint32_t const fZap = g_HmMsrs.u.vmx.ProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ … … 1502 1730 } 1503 1731 1504 #if 0 /** @todo */1505 /* Use MSR-bitmaps if supported by the CPU. */1506 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_MSR_BITMAPS)1507 {1508 fVal |= VMX_PROC_CTLS_USE_MSR_BITMAPS;1509 hmR0VmxSetupVmcsMsrBitmapAddr(pVmcsInfo);1510 }1511 #endif1512 1513 1732 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ 1514 1733 if (g_HmMsrs.u.vmx.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS) … … 1528 1747 pVmcsInfo->u32ProcCtls = fVal; 1529 1748 1530 #if 01531 1749 /* Set up MSR permissions that don't change through the lifetime of the VM. */ 1532 if (pVmcsInfo->u32ProcCtls & VMX_PROC_CTLS_USE_MSR_BITMAPS) 1533 hmR0VmxSetupVmcsMsrPermissions(pVCpu, pVmcsInfo); 1534 #endif 1750 rc = nemR3DarwinSetupVmcsMsrPermissions(pVCpu, pVmcsInfo); 1751 AssertRCReturn(rc, rc); 1535 1752 1536 1753 /* … … 1631 1848 if (RT_SUCCESS(rc)) 1632 1849 { 1633 nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo); 1634 return VINF_SUCCESS; 1850 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_ENTRY, &pVCpu->nem.s.VmcsInfo.u32EntryCtls); 1851 if (RT_SUCCESS(rc)) 1852 { 1853 rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_CTRL_EXIT, &pVCpu->nem.s.VmcsInfo.u32ExitCtls); 1854 if (RT_SUCCESS(rc)) 1855 { 1856 nemR3DarwinVmxSetupVmcsXcptBitmap(pVCpu, &pVCpu->nem.s.VmcsInfo); 1857 return VINF_SUCCESS; 1858 } 1859 else 1860 LogRelFunc(("Failed to read the exit controls. rc=%Rrc\n", rc)); 1861 } 1862 else 1863 LogRelFunc(("Failed to read the entry controls. rc=%Rrc\n", rc)); 1635 1864 } 1636 1865 else … … 1763 1992 AssertRCReturn(rc, rc); 1764 1993 1994 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 1995 1765 1996 return VINF_SUCCESS; 1766 1997 } … … 1884 2115 void nemR3NativeResetCpu(PVMCPU pVCpu, bool fInitIpi) 1885 2116 { 1886 RT_NOREF(pVCpu, fInitIpi); 2117 RT_NOREF(fInitIpi); 2118 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 1887 2119 } 1888 2120 … … 1914 2146 * everything every time. This will be optimized later. 1915 2147 */ 2148 2149 VMXTRANSIENT VmxTransient; 2150 RT_ZERO(VmxTransient); 2151 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo; 2152 1916 2153 const bool fSingleStepping = DBGFIsStepping(pVCpu); 1917 2154 VBOXSTRICTRC rcStrict = VINF_SUCCESS; … … 1960 2197 /** @todo Only copy the state selectively. */ 1961 2198 { 1962 int rc 2 = nemR3DarwinCopyStateToHv(pVM, pVCpu);1963 AssertRCReturn(rc 2, rc2);2199 int rc = nemR3DarwinCopyStateToHv(pVM, pVCpu, &VmxTransient); 2200 AssertRCReturn(rc, rc); 1964 2201 } 1965 2202 … … 1985 2222 * Deal with the message. 1986 2223 */ 1987 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu );2224 rcStrict = nemR3DarwinHandleExit(pVM, pVCpu, &VmxTransient); 1988 2225 if (rcStrict == VINF_SUCCESS) 1989 2226 { /* hopefully likely */ } … … 1997 2234 else 1998 2235 { 1999 AssertLogRelMsgFailedReturn(("hv_vcpu_run failed for CPU #%u: %#x\n", pVCpu->idCpu, hrc), 2236 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n", 2237 pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)), 2000 2238 VERR_NEM_IPE_0); 2001 2239 } … … 2049 2287 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); 2050 2288 2051 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))2289 if (pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL)) 2052 2290 { 2053 2291 /* Try anticipate what we might need. */ 2054 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;2292 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK; 2055 2293 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) 2056 2294 || RT_FAILURE(rcStrict)) 2057 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);2295 fImport = CPUMCTX_EXTRN_ALL; 2058 2296 else if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC 2059 2297 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI)) 2060 2298 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK; 2061 2299 2062 fImport = CPUMCTX_EXTRN_ALL;2063 2300 if (pVCpu->cpum.GstCtx.fExtrn & fImport) 2064 2301 { 2065 2302 /* Only import what is external currently. */ 2066 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);2303 int rc2 = nemR3DarwinCopyStateFromHv(pVM, pVCpu, fImport); 2067 2304 if (RT_SUCCESS(rc2)) 2068 2305 pVCpu->cpum.GstCtx.fExtrn &= ~fImport; 2069 2306 else if (RT_SUCCESS(rcStrict)) 2070 2307 rcStrict = rc2; 2071 if (!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))) 2308 if (!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL)) 2309 { 2072 2310 pVCpu->cpum.GstCtx.fExtrn = 0; 2311 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 2312 } 2073 2313 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn); 2074 2314 } 2075 2315 else 2076 {2077 2316 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped); 2078 pVCpu->cpum.GstCtx.fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;2079 }2080 2317 } 2081 2318 else … … 2083 2320 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped); 2084 2321 pVCpu->cpum.GstCtx.fExtrn = 0; 2322 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 2085 2323 } 2086 2324
Note:
See TracChangeset
for help on using the changeset viewer.