VirtualBox

Changeset 45496 in vbox for trunk


Ignore:
Timestamp:
Apr 11, 2013 8:08:37 PM (12 years ago)
Author:
vboxsync
Message:

VMMR0/HMVMXR0: leaner, less parameter passing, part 1.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45487 r45496  
    47514751 *
    47524752 * @returns VBox status code.
    4753  * @param   pVM         Pointer to the VM.
    47544753 * @param   pVCpu       Pointer to the VMCPU.
    47554754 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    47594758 * @remarks No-long-jump zone!!!
    47604759 */
    4761 DECLINLINE(int) hmR0VmxSaveGuestCR0(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4760DECLINLINE(int) hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    47624761{
    47634762    int rc = VINF_SUCCESS;
     
    47824781 *
    47834782 * @returns VBox status code.
    4784  * @param   pVM         Pointer to the VM.
    47854783 * @param   pVCpu       Pointer to the VMCPU.
    47864784 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    47904788 * @remarks No-long-jump zone!!!
    47914789 */
    4792 DECLINLINE(int) hmR0VmxSaveGuestCR4(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4790DECLINLINE(int) hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    47934791{
    47944792    int rc = VINF_SUCCESS;
     
    48124810 *
    48134811 * @returns VBox status code.
    4814  * @param   pVM         Pointer to the VM.
    48154812 * @param   pVCpu       Pointer to the VMCPU.
    48164813 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    48204817 * @remarks No-long-jump zone!!!
    48214818 */
    4822 DECLINLINE(int) hmR0VmxSaveGuestRip(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4819DECLINLINE(int) hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    48234820{
    48244821    if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RIP)
     
    48384835 *
    48394836 * @returns VBox status code.
    4840  * @param   pVM         Pointer to the VM.
    48414837 * @param   pVCpu       Pointer to the VMCPU.
    48424838 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    48464842 * @remarks No-long-jump zone!!!
    48474843 */
    4848 DECLINLINE(int) hmR0VmxSaveGuestRsp(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4844DECLINLINE(int) hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    48494845{
    48504846    if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RSP)
     
    48644860 *
    48654861 * @returns VBox status code.
    4866  * @param   pVM         Pointer to the VM.
    48674862 * @param   pVCpu       Pointer to the VMCPU.
    48684863 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    48724867 * @remarks No-long-jump zone!!!
    48734868 */
    4874 DECLINLINE(int) hmR0VmxSaveGuestRflags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4869DECLINLINE(int) hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    48754870{
    48764871    if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_RFLAGS)
     
    48854880    if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
    48864881    {
     4882        PVM pVM = pVCpu->CTX_SUFF(pVM);
    48874883        Assert(pVM->hm.s.vmx.pRealModeTSS);
    48884884        Log(("Saving real-mode RFLAGS VT-x view=%#RX64\n", pMixedCtx->rflags.u64));
     
    49004896 * guest-CPU context.
    49014897 */
    4902 static int hmR0VmxSaveGuestGprs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    4903 {
    4904     int rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    4905     rc    |= hmR0VmxSaveGuestRsp(pVM, pVCpu, pMixedCtx);
    4906     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
     4898static int hmR0VmxSaveGuestGprs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4899{
     4900    int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     4901    rc    |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
     4902    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    49074903    return rc;
    49084904}
     
    49134909 * from the guest-state area in the VMCS.
    49144910 *
    4915  * @param   pVM         Pointer to the VM.
    49164911 * @param   pVCpu       Pointer to the VMCPU.
    49174912 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    49214916 * @remarks No-long-jump zone!!!
    49224917 */
    4923 DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVM pVM,  PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4918DECLINLINE(void) hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    49244919{
    49254920    uint32_t uIntrState = 0;
     
    49334928        Assert(   uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
    49344929               || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
    4935         rc  = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    4936         rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);    /* for hmR0VmxLoadGuestIntrState(). */
     4930        rc  = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     4931        rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);    /* for hmR0VmxLoadGuestIntrState(). */
    49374932        AssertRC(rc);
    49384933        EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
     
    49464941 *
    49474942 * @returns VBox status code.
    4948  * @param   pVM         Pointer to the VM.
    49494943 * @param   pVCpu       Pointer to the VMCPU.
    49504944 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    49544948 * @remarks No-long-jump zone!!!
    49554949 */
    4956 DECLINLINE(int) hmR0VmxSaveGuestActivityState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4950DECLINLINE(int) hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    49574951{
    49584952    /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
     
    49674961 *
    49684962 * @returns VBox status code.
    4969  * @param   pVM         Pointer to the VM.
    49704963 * @param   pVCpu       Pointer to the VMCPU.
    49714964 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    49754968 * @remarks No-long-jump zone!!!
    49764969 */
    4977 DECLINLINE(int) hmR0VmxSaveGuestSysenterMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     4970DECLINLINE(int) hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    49784971{
    49794972    int rc = VINF_SUCCESS;
     
    50085001 *
    50095002 * @returns VBox status code.
    5010  * @param   pVM         Pointer to the VM.
    50115003 * @param   pVCpu       Pointer to the VMCPU.
    50125004 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    50165008 * @remarks No-long-jump zone!!!
    50175009 */
    5018 DECLINLINE(int) hmR0VmxSaveGuestFSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5010DECLINLINE(int) hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    50195011{
    50205012    RTGCUINTREG uVal = 0;
     
    50355027 *
    50365028 * @returns VBox status code.
    5037  * @param   pVM         Pointer to the VM.
    50385029 * @param   pVCpu       Pointer to the VMCPU.
    50395030 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    50435034 * @remarks No-long-jump zone!!!
    50445035 */
    5045 DECLINLINE(int) hmR0VmxSaveGuestGSBaseMsr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5036DECLINLINE(int) hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    50465037{
    50475038    RTGCUINTREG uVal = 0;
     
    50625053 *
    50635054 * @returns VBox status code.
    5064  * @param   pVM         Pointer to the VM.
    50655055 * @param   pVCpu       Pointer to the VMCPU.
    50665056 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    50705060 * @remarks No-long-jump zone!!!
    50715061 */
    5072 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5062static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    50735063{
    50745064    if (pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
     
    51075097 *
    51085098 * @returns VBox status code.
    5109  * @param   pVM         Pointer to the VM.
    51105099 * @param   pVCpu       Pointer to the VMCPU.
    51115100 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    51155104 * @remarks No-long-jump zone!!!
    51165105 */
    5117 DECLINLINE(int) hmR0VmxSaveGuestControlRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5106DECLINLINE(int) hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    51185107{
    51195108    RTGCUINTREG uVal    = 0;
     
    51225111
    51235112    /* Guest CR0. Guest FPU. */
    5124     rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     5113    rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    51255114
    51265115    /* Guest CR4. */
    5127     rc |= hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);
     5116    rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    51285117    AssertRCReturn(rc, rc);
    51295118
     
    51315120    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_CR3))
    51325121    {
     5122        PVM pVM = pVCpu->CTX_SUFF(pVM);
    51335123        if (   pVM->hm.s.fNestedPaging
    51345124            && CPUMIsGuestPagingEnabledEx(pMixedCtx))
     
    51435133
    51445134            /* We require EFER to check PAE mode. */
    5145             rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
     5135            rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    51465136
    51475137            /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
     
    52175207 *
    52185208 * @returns VBox status code.
    5219  * @param   pVM         Pointer to the VM.
    52205209 * @param   pVCpu       Pointer to the VMCPU.
    52215210 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    52255214 * @remarks No-long-jump zone!!!
    52265215 */
    5227 static int hmR0VmxSaveGuestSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5216static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    52285217{
    52295218#ifdef VMX_USE_CACHED_VMCS_ACCESSES
     
    52425231    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_SEGMENT_REGS))
    52435232    {
    5244         rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     5233        rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    52455234        rc |= VMXLOCAL_READ_SEG(CS, cs);
    52465235        rc |= VMXLOCAL_READ_SEG(SS, ss);
     
    52975286    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & VMX_UPDATED_GUEST_TR))
    52985287    {
    5299         rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     5288        rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    53005289
    53015290        /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
     
    53145303 *
    53155304 * @returns VBox status code.
    5316  * @param   pVM         Pointer to the VM.
    53175305 * @param   pVCpu       Pointer to the VMCPU.
    53185306 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    53225310 * @remarks No-long-jump zone!!!
    53235311 */
    5324 DECLINLINE(int) hmR0VmxSaveGuestDebugRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5312DECLINLINE(int) hmR0VmxSaveGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    53255313{
    53265314    int rc = VINF_SUCCESS;
     
    53415329 *
    53425330 * @returns VBox status code.
    5343  * @param   pVM         Pointer to the VM.
    53445331 * @param   pVCpu       Pointer to the VMCPU.
    53455332 * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
     
    53495336 * @remarks No-long-jump zone!!!
    53505337 */
    5351 DECLINLINE(int) hmR0VmxSaveGuestApicState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5338DECLINLINE(int) hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    53525339{
    53535340    /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
     
    53625349 *
    53635350 * @returns VBox status code.
    5364  * @param   pVM         Pointer to the VM.
    53655351 * @param   pVCpu       Pointer to the VMCPU.
    53665352 * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
     
    53685354 *                      before using them.
    53695355 */
    5370 static int hmR0VmxSaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    5371 {
    5372     Assert(pVM);
     5356static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5357{
    53735358    Assert(pVCpu);
    53745359    Assert(pMixedCtx);
     
    53795364    VMMRZCallRing3Disable(pVCpu);
    53805365
    5381     int rc = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
    5382     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGprs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5383 
    5384     rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    5385     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5386 
    5387     rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
    5388     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5389 
    5390     rc = hmR0VmxSaveGuestDebugRegs(pVM, pVCpu, pMixedCtx);
    5391     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5392 
    5393     rc = hmR0VmxSaveGuestSysenterMsrs(pVM, pVCpu, pMixedCtx);
    5394     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5395 
    5396     rc = hmR0VmxSaveGuestFSBaseMsr(pVM, pVCpu, pMixedCtx);
    5397     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5398 
    5399     rc = hmR0VmxSaveGuestGSBaseMsr(pVM, pVCpu, pMixedCtx);
    5400     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5401 
    5402     rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
    5403     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5404 
    5405     rc = hmR0VmxSaveGuestActivityState(pVM, pVCpu, pMixedCtx);
    5406     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    5407 
    5408     rc = hmR0VmxSaveGuestApicState(pVM, pVCpu, pMixedCtx);
    5409     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     5366    int rc = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
     5367    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGprs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5368
     5369    rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     5370    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5371
     5372    rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     5373    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5374
     5375    rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);
     5376    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5377
     5378    rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
     5379    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5380
     5381    rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
     5382    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5383
     5384    rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
     5385    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5386
     5387    rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     5388    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5389
     5390    rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
     5391    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
     5392
     5393    rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
     5394    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDebugRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    54105395
    54115396    AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL,
     
    54495434    {
    54505435        /* We need the control registers now, make sure the guest-CPU context is updated. */
    5451         rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     5436        rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    54525437        AssertRCReturn(rc, rc);
    54535438
     
    55905575    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    55915576
    5592     int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     5577    int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    55935578    Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == VMX_UPDATED_GUEST_ALL);
    55945579    AssertRC(rc);
     
    56395624    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    56405625
    5641     /* We want to see what the guest-state was before VM-entry, don't resync here, as we will never continue guest execution.*/
    5642     if (rcExit == VERR_VMX_INVALID_GUEST_STATE)
     5626    if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
     5627    {
     5628        /* We want to see what the guest-state was before VM-entry, don't resync here, as we won't continue guest execution. */
    56435629        return;
     5630    }
     5631    else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
     5632    {
     5633        VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
     5634        pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
     5635        pVCpu->hm.s.vmx.lasterror.idEnteredCpu    = pVCpu->hm.s.idEnteredCpu;
     5636        pVCpu->hm.s.vmx.lasterror.idCurrentCpu    = RTMpCpuId();
     5637        return;
     5638    }
    56445639
    56455640    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
     
    58125807
    58135808    /* We need the guests's RFLAGS for sure from this point on, make sure it is updated. */
    5814     int rc = hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
     5809    int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    58155810    AssertRCReturn(rc, rc);
    58165811
     
    60526047
    60536048    /* We require CR0 to check if the guest is in real-mode. */
    6054     int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     6049    int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    60556050    AssertRCReturn(rc, rc);
    60566051
     
    60726067
    60736068            /* Save the required guest state bits from the VMCS. */
    6074             rc  = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
    6075             rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
     6069            rc  = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     6070            rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
    60766071            AssertRCReturn(rc, rc);
    60776072
     
    64916486
    64926487        /* Need guest's LSTAR MSR (which is part of the auto load/store MSRs in the VMCS), ensure we have the updated one. */
    6493         rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
     6488        rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    64946489        AssertRC(rc);
    64956490
     
    65476542    if (pVM->hm.s.fTPRPatchingActive)
    65486543    {
    6549         int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);
     6544        int rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    65506545        AssertRC(rc);
    65516546        pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);           /* MSR_K8_LSTAR contains the guest TPR. */
     
    65776572    {
    65786573        /* Update the guest interruptibility-state from the VMCS. */
    6579         hmR0VmxSaveGuestIntrState(pVM, pVCpu, pMixedCtx);
     6574        hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
    65806575
    65816576        /*
     
    67846779
    67856780
     6781/**
     6782 * Advances the guest RIP after reading it from the VMCS.
     6783 *
     6784 * @returns VBox status code.
     6785 * @param   pVCpu           Pointer to the VMCPU.
     6786 * @param   pMixedCtx       Pointer to the guest-CPU context. The data maybe
     6787 *                          out-of-sync. Make sure to update the required fields
     6788 *                          before using them.
     6789 * @param   pVmxTransient   Pointer to the VMX transient structure.
     6790 *
     6791 * @remarks No-long-jump zone!!!
     6792 */
     6793DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
     6794{
     6795    int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     6796    rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     6797    AssertRCReturn(rc, rc);
     6798
     6799    pMixedCtx->rip += pVmxTransient->cbInstr;
     6800    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     6801    return rc;
     6802}
     6803
     6804
    67866805/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
    67876806/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
     
    68526871                default:
    68536872                {
    6854                     rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     6873                    rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    68556874                    AssertRCReturn(rc, rc);
    68566875
     
    69276946{
    69286947    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    6929     int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    6930     rc    |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    6931     AssertRCReturn(rc, rc);
    6932 
    6933     pMixedCtx->rip += pVmxTransient->cbInstr;
    6934     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    6935 
    69366948    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
    6937     return VINF_SUCCESS;
     6949    return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    69386950}
    69396951
     
    69456957{
    69466958    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    6947     int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    6948     rc    |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    6949     AssertRCReturn(rc, rc);
    6950 
    6951     pMixedCtx->rip += pVmxTransient->cbInstr;
    6952     pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    6953 
    69546959    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
    6955     return VINF_SUCCESS;
     6960    return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    69566961}
    69576962
     
    69666971    if (RT_LIKELY(rc == VINF_SUCCESS))
    69676972    {
    6968         rc  = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    6969         rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    6970         AssertRCReturn(rc, rc);
     6973        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    69716974        Assert(pVmxTransient->cbInstr == 2);
    6972 
    6973         Log(("hmR0VmxExitCpuid: RIP=%#RX64\n", pMixedCtx->rip));
    6974         pMixedCtx->rip += pVmxTransient->cbInstr;
    6975         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    69766975    }
    69776976    else
     
    69916990{
    69926991    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    6993     int rc  = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);
     6992    int rc  = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
    69946993    AssertRCReturn(rc, rc);
    69956994
     
    70087007{
    70097008    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7010     int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);    /** @todo review if CR4 is really required by EM. */
     7009    int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);    /** @todo review if CR4 is really required by EM. */
    70117010    AssertRCReturn(rc, rc);
    70127011
     
    70147013    if (RT_LIKELY(rc == VINF_SUCCESS))
    70157014    {
    7016         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7017         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7018         AssertRCReturn(rc, rc);
     7015        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    70197016        Assert(pVmxTransient->cbInstr == 2);
    7020 
    7021         pMixedCtx->rip += pVmxTransient->cbInstr;
    7022         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    7023 
    70247017        /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    70257018        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
     
    70427035{
    70437036    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7044     int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);    /** @todo review if CR4 is really required by EM. */
    7045     rc    |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVM, pVCpu, pMixedCtx);  /* For MSR_K8_TSC_AUX */
     7037    int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);                /** @todo review if CR4 is really required by EM. */
     7038    rc    |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);  /* For MSR_K8_TSC_AUX */
    70467039    AssertRCReturn(rc, rc);
    70477040
     
    70497042    if (RT_LIKELY(rc == VINF_SUCCESS))
    70507043    {
    7051         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7052         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7053         AssertRCReturn(rc, rc);
     7044        rc  = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    70547045        Assert(pVmxTransient->cbInstr == 3);
    7055 
    7056         pMixedCtx->rip += pVmxTransient->cbInstr;
    7057         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    7058 
    70597046        /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
    70607047        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TSC_OFFSETTING)
     
    70777064{
    70787065    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7079     int rc = hmR0VmxSaveGuestCR4(pVM, pVCpu, pMixedCtx);    /** @todo review if CR4 is really required by EM. */
    7080     rc    |= hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);    /** @todo review if CR0 is really required by EM. */
     7066    int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);    /** @todo review if CR4 is really required by EM. */
     7067    rc    |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);    /** @todo review if CR0 is really required by EM. */
    70817068    AssertRCReturn(rc, rc);
    70827069
     
    70847071    if (RT_LIKELY(rc == VINF_SUCCESS))
    70857072    {
    7086         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7087         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7088         AssertRCReturn(rc, rc);
     7073        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    70897074        Assert(pVmxTransient->cbInstr == 2);
    7090 
    7091         pMixedCtx->rip += pVmxTransient->cbInstr;
    7092         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    70937075    }
    70947076    else
     
    71097091    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    71107092    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    7111     rc    |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     7093    rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    71127094    AssertRCReturn(rc, rc);
    71137095
     
    71157097    rc = VBOXSTRICTRC_VAL(rc2);
    71167098    if (RT_LIKELY(rc == VINF_SUCCESS))
    7117     {
    7118         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7119         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7120         AssertRCReturn(rc, rc);
    7121 
    7122         pMixedCtx->rip += pVmxTransient->cbInstr;
    7123         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    7124     }
     7099        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    71257100    else
    71267101    {
     
    71407115{
    71417116    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7142     int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
    7143     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
    7144     rc    |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7117    int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     7118    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     7119    rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    71457120    AssertRCReturn(rc, rc);
    71467121
    71477122    rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
    71487123    if (RT_LIKELY(rc == VINF_SUCCESS))
    7149     {
    7150         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7151         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7152         AssertRCReturn(rc, rc);
    7153 
    7154         pMixedCtx->rip += pVmxTransient->cbInstr;
    7155         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    7156     }
     7124        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    71577125    else
    71587126    {
     
    71717139{
    71727140    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7173     int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
    7174     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
    7175     rc    |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7141    int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     7142    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     7143    rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    71767144    AssertRCReturn(rc, rc);
    71777145
     
    71817149                  || rc == VINF_EM_HALT))
    71827150    {
    7183         int rc3  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7184         rc3     |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
     7151        int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    71857152        AssertRCReturn(rc3, rc3);
    7186 
    7187         pMixedCtx->rip += pVmxTransient->cbInstr;
    7188         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    71897153
    71907154        if (   rc == VINF_EM_HALT
     
    72977261    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    72987262    Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT);
    7299     int rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7300     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
     7263    int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     7264    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    73017265    AssertRCReturn(rc, rc);
    73027266
     
    73797343    rc     |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
    73807344    rc     |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
    7381     rc     |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     7345    rc     |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    73827346    AssertRCReturn(rc, rc);
    73837347
     
    74797443    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    74807444    /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
    7481     int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
    7482     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
    7483     rc    |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7445    int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     7446    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     7447    rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    74847448    AssertRCReturn(rc, rc);
    74857449
     
    74927456    if (RT_LIKELY(rc == VINF_SUCCESS))
    74937457    {
    7494         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7495         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7496         AssertRCReturn(rc, rc);
    7497 
     7458        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    74987459        Assert(pVmxTransient->cbInstr == 2);
    7499         pMixedCtx->rip += pVmxTransient->cbInstr;
    7500         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    75017460    }
    75027461    return rc;
     
    75107469{
    75117470    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    7512     int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7513     AssertRCReturn(rc, rc);
    7514     Assert(pVmxTransient->cbInstr == 2);
    7515 
     7471    int rc = VINF_SUCCESS;
    75167472    /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
    75177473    if (   pVM->hm.s.fTPRPatchingActive
     
    75257481        }
    75267482
    7527         rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7528         AssertRCReturn(rc, rc);
    7529         pMixedCtx->rip += pVmxTransient->cbInstr;
    7530         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     7483        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
     7484        Assert(pVmxTransient->cbInstr == 2);
    75317485        STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
    75327486        return VINF_SUCCESS;
     
    75697523
    75707524    /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
    7571     rc  = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
    7572     rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
    7573     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7525    rc  = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
     7526    rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
     7527    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    75747528    AssertRCReturn(rc, rc);
    75757529
     
    75817535    if (RT_LIKELY(rc == VINF_SUCCESS))
    75827536    {
    7583         rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7584         AssertRCReturn(rc, rc);
    7585 
    7586         pMixedCtx->rip += pVmxTransient->cbInstr;
    7587         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
     7537        rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    75887538
    75897539        /* If this is an X2APIC WRMSR access, update the APIC state as well. */
     
    76577607#if 0
    76587608            /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
    7659             rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     7609            rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    76607610#else
    7661             rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
    7662             rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    7663             rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7611            rc  = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
     7612            rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     7613            rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    76647614#endif
    76657615            AssertRCReturn(rc, rc);
     
    76757625                    Log(("CR0 write rc=%d\n", rc));
    76767626                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
     7627                    break;
     7628                case 2: /* C2 **/
     7629                    /* Nothing to do here, CR2 it's not part of the VMCS. */
    76777630                    break;
    76787631                case 3: /* CR3 */
     
    77027655        {
    77037656            /* EMInterpretCRxRead() requires EFER MSR, CS. */
    7704             rc = hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7657            rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    77057658            AssertRCReturn(rc, rc);
    77067659            Assert(   !pVM->hm.s.fNestedPaging
     
    77237676        case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:        /* CLTS (Clear Task-Switch Flag in CR0) */
    77247677        {
    7725             rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     7678            rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    77267679            AssertRCReturn(rc, rc);
    77277680            rc = EMInterpretCLTS(pVM, pVCpu);
     
    77357688        case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:        /* LMSW (Load Machine-Status Word into CR0) */
    77367689        {
    7737             rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     7690            rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    77387691            AssertRCReturn(rc, rc);
    77397692            rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
     
    77567709    if (RT_SUCCESS(rc))
    77577710    {
    7758         int rc2  = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7759         rc2     |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     7711        int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    77607712        AssertRCReturn(rc2, rc2);
    7761         pMixedCtx->rip += pVmxTransient->cbInstr;
    7762         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    77637713    }
    77647714
     
    77777727    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    77787728    rc    |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    7779     rc    |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    7780     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);         /* Eflag checks in EMInterpretDisasCurrent(). */
    7781     rc    |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);    /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
    7782     rc    |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);    /* SELM checks in EMInterpretDisasCurrent(). */
     7729    rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
     7730    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);         /* Eflag checks in EMInterpretDisasCurrent(). */
     7731    rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);    /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
     7732    rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);    /* SELM checks in EMInterpretDisasCurrent(). */
    77837733    /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    77847734    AssertRCReturn(rc, rc);
     
    78667816        if (RT_LIKELY(rc == VINF_SUCCESS))
    78677817        {
    7868             rc = hmR0VmxSaveGuestDebugRegs(pVM, pVCpu, pMixedCtx);      /* For DR7. */
     7818            rc = hmR0VmxSaveGuestDebugRegs(pVCpu, pMixedCtx);      /* For DR7. */
    78697819            AssertRCReturn(rc, rc);
    78707820
     
    80107960    /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
    80117961     *   just sync the whole thing. */
    8012     rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     7962    rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    80137963#else
    80147964    /* Aggressive state sync. for now. */
    8015     rc  = hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
    8016     rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    8017     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     7965    rc  = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
     7966    rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     7967    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    80187968#endif
    80197969    AssertRCReturn(rc, rc);
     
    81118061     */
    81128062    rc  = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    8113     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     8063    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    81148064    AssertRCReturn(rc, rc);
    81158065
     
    81348084    if (RT_SUCCESS(rc))
    81358085    {
    8136         int rc2  = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
    8137         rc2     |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     8086        int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
    81388087        AssertRCReturn(rc2, rc2);
    8139         pMixedCtx->rip += pVmxTransient->cbInstr;
    8140         pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
    81418088    }
    81428089    return rc;
     
    81648111
    81658112#if 0
    8166     rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
     8113    rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);     /** @todo Can we do better?  */
    81678114#else
    81688115    /* Aggressive state sync. for now. */
    8169     rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
    8170     rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    8171     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     8116    rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
     8117    rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     8118    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    81728119#endif
    81738120    AssertRCReturn(rc, rc);
     
    82158162    rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    82168163#if 0
    8217     rc |= hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);     /** @todo Can we do better?  */
     8164    rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);     /** @todo Can we do better?  */
    82188165#else
    82198166    /* Aggressive state sync. for now. */
    8220     rc |= hmR0VmxSaveGuestGprs(pVM, pVCpu, pMixedCtx);
    8221     rc |= hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    8222     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
     8167    rc |= hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx);
     8168    rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     8169    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    82238170#endif
    82248171    AssertRCReturn(rc, rc);
     
    82738220    VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    82748221
    8275     int rc = hmR0VmxSaveGuestCR0(pVM, pVCpu, pMixedCtx);
     8222    int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
    82768223    AssertRCReturn(rc, rc);
    82778224    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
     
    83008247    /** @todo Try optimize this by not saving the entire guest state unless
    83018248     *        really needed. */
    8302     int rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     8249    int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    83038250    AssertRCReturn(rc, rc);
    83048251    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
     
    83318278
    83328279    int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    8333     rc    |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
    8334     rc    |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
     8280    rc    |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     8281    rc    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    83358282    AssertRCReturn(rc, rc);
    83368283
     
    83878334
    83888335    /* We require CR0 and EFER. EFER is always up-to-date. */
    8389     int rc = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
     8336    int rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    83908337    AssertRCReturn(rc, rc);
    83918338
     
    84318378        rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
    84328379        rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    8433         rc |= hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
     8380        rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    84348381        Log(("#GP Gst: RIP %#RX64\n", pMixedCtx->rip));
    84358382        rc |= hmR0VmxInjectEventVmcs(pVM, pVCpu, pMixedCtx,
     
    84498396
    84508397    /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
    8451     rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     8398    rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    84528399    AssertRCReturn(rc, rc);
    84538400
     
    87108657
    87118658#ifdef VBOX_HM_WITH_GUEST_PATCHING
    8712     rc  = hmR0VmxSaveGuestControlRegs(pVM, pVCpu, pMixedCtx);
    8713     rc |= hmR0VmxSaveGuestSegmentRegs(pVM, pVCpu, pMixedCtx);
    8714     rc |= hmR0VmxSaveGuestRflags(pVM, pVCpu, pMixedCtx);
     8659    rc  = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
     8660    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     8661    rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    87158662    AssertRCReturn(rc, rc);
    87168663    /* Shortcut for APIC TPR access, only for 32-bit guests. */
     
    87298676            &&  GCPhys == GCPhysApicBase)
    87308677        {
    8731             rc = hmR0VmxSaveGuestRip(pVM, pVCpu, pMixedCtx);
     8678            rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    87328679            AssertRCReturn(rc, rc);
    87338680
     
    87448691    TRPMSetErrorCode(pVCpu, pVmxTransient->uExitIntrErrorCode);
    87458692
    8746     rc = hmR0VmxSaveGuestState(pVM, pVCpu, pMixedCtx);
     8693    rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    87478694    AssertRCReturn(rc, rc);
    87488695
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette