VirtualBox

Changeset 45701 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 24, 2013 2:21:09 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
85247
Message:

VMM: SELM and VMM early HM init changes.

Location:
trunk/src/VBox/VMM
Files:
2 added
17 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r45529 r45701  
    241241        VMMSwitcher/PAETo32Bit.asm \
    242242        VMMSwitcher/PAEToAMD64.asm \
    243         VMMSwitcher/PAEToPAE.asm
     243        VMMSwitcher/PAEToPAE.asm \
     244        VMMSwitcher/X86Stub.asm
    244245 VBoxVMM_SOURCES.amd64 = \
    245246        VMMSwitcher/AMD64To32Bit.asm \
    246         VMMSwitcher/AMD64ToPAE.asm
     247        VMMSwitcher/AMD64ToPAE.asm \
     248        VMMSwitcher/AMD64Stub.asm
    247249 VBoxVMM_SOURCES.darwin.x86 += \
    248250        VMMSwitcher/AMD64ToPAE.asm
  • trunk/src/VBox/VMM/VMMAll/EMAll.cpp

    r45620 r45701  
    15381538# endif
    15391539# ifdef VBOX_WITH_RAW_MODE
    1540         if ((val ^ oldval) & X86_CR4_VME)
     1540        if (((val ^ oldval) & X86_CR4_VME) && !HMIsEnabled(pVM))
    15411541            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    15421542# endif
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r45618 r45701  
    3838
    3939/**
    40  * Query HM state (enabled/disabled)
    41  *
    42  * @returns @c false if disabled, @c true if enabled.
     40 * Checks whether HM (VT-x/AMD-V) is being used by this VM.
     41 *
     42 * @retval  @c true if used.
     43 * @retval  @c false if software virtualization (raw-mode) is used.
    4344 * @param   pVM         The cross context VM structure.
    4445 * @sa      HMIsEnabled, HMR3IsEnabled
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r45531 r45701  
    8787#include <VBox/vmm/iom.h>
    8888#include <VBox/vmm/em.h>
     89#include <VBox/vmm/hm.h>
    8990#include <VBox/vmm/tm.h>
    9091#include <VBox/vmm/dbgf.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r45533 r45701  
    34173417                         RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
    34183418#ifdef VBOX_WITH_RAW_MODE
    3419                     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     3419                    if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
     3420                        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    34203421#endif
    34213422                }
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r45485 r45701  
    2525#include <VBox/vmm/em.h>
    2626#include <VBox/vmm/mm.h>
     27#include <VBox/vmm/hm.h>
    2728#include <VBox/vmm/pgm.h>
    2829#include <VBox/vmm/hm.h>
     
    6465{
    6566    Assert(pVM->cCpus == 1 && !CPUMIsGuestInLongMode(VMMGetCpu(pVM)));    /* DON'T USE! */
     67    Assert(!HMIsEnabled(pVM));
    6668
    6769    /** @todo check the limit. */
     
    322324{
    323325    Assert(!CPUMIsGuestInLongMode(pVCpu));    /* DON'T USE! (Accessing shadow GDT/LDT.) */
     326    Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
    324327
    325328    /*
     
    488491                                                   RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg)
    489492{
     493    Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM)));
     494
    490495    /*
    491496     * Try read the entry.
     
    541546    PVM pVM = pVCpu->CTX_SUFF(pVM);
    542547    Assert(pVM->cCpus == 1);
     548    Assert(!HMIsEnabled(pVM));
    543549
    544550
     
    634640{
    635641    NOREF(pVCpu);
     642    Assert(!HMIsEnabled(pVM));
     643
    636644    /** @todo validate limit! */
    637645    X86DESC    Desc;
     
    836844void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
    837845{
     846    Assert(!HMIsEnabled(pVM));
    838847    Assert((ss & 1) || esp == 0);
    839848    pVM->selm.s.Tss.ss1  = ss;
     
    852861void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp)
    853862{
     863    Assert(!HMIsEnabled(pVM));
    854864    Assert((ss & 3) == 2 || esp == 0);
    855865    pVM->selm.s.Tss.ss2  = ss;
     
    858868#endif
    859869
    860 
    861870#ifdef VBOX_WITH_RAW_MODE_NOT_R0
     871
    862872/**
    863873 * Gets ss:esp for ring1 in main Hypervisor's TSS.
     
    872882VMMDECL(int) SELMGetRing1Stack(PVM pVM, uint32_t *pSS, PRTGCPTR32 pEsp)
    873883{
     884    Assert(!HMIsEnabled(pVM));
    874885    Assert(pVM->cCpus == 1);
    875886    PVMCPU pVCpu = &pVM->aCpus[0];
     
    944955    return VINF_SUCCESS;
    945956}
    946 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
    947 
    948 
    949 /**
    950  * Returns Guest TSS pointer
    951  *
    952  * @returns Pointer to the guest TSS, RTRCPTR_MAX if not being monitored.
    953  * @param   pVM     Pointer to the VM.
    954  */
    955 VMMDECL(RTGCPTR) SELMGetGuestTSS(PVM pVM)
    956 {
    957     return (RTGCPTR)pVM->selm.s.GCPtrGuestTss;
    958 }
    959 
    960 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
     957
    961958
    962959/**
     
    10791076VMMDECL(void) SELMShadowCR3Changed(PVM pVM, PVMCPU pVCpu)
    10801077{
    1081     /** @todo SMP support!! */
     1078    /** @todo SMP support!! (64-bit guest scenario, primarily) */
    10821079    pVM->selm.s.Tss.cr3       = PGMGetHyperCR3(pVCpu);
    10831080    pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu);
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r44528 r45701  
    660660        case VMMR0_DO_RAW_RUN:
    661661        {
     662#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    662663            /* Some safety precautions first. */
    663 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    664             if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled /* hm */
    665                           && pVM->cCpus == 1               /* !smp */
    666                           && PGMGetHyperCR3(pVCpu)))
    667 #else
    668             if (RT_LIKELY(   !pVM->vmm.s.fSwitcherDisabled
    669                           && pVM->cCpus == 1))
    670 #endif
     664            if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
    671665            {
    672                 /* Disable preemption and update the periodic preemption timer. */
    673                 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
    674                 RTThreadPreemptDisable(&PreemptState);
    675                 RTCPUID idHostCpu = RTMpCpuId();
     666                pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
     667                break;
     668            }
     669#endif
     670
     671            /* Disable preemption and update the periodic preemption timer. */
     672            RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
     673            RTThreadPreemptDisable(&PreemptState);
     674            RTCPUID idHostCpu = RTMpCpuId();
    676675#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
    677                 CPUMR0SetLApic(pVM, idHostCpu);
    678 #endif
    679                 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
    680                 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
    681                     GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
    682 
    683                 /* We might need to disable VT-x if the active switcher turns off paging. */
    684                 bool fVTxDisabled;
    685                 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
    686                 if (RT_SUCCESS(rc))
     676            CPUMR0SetLApic(pVM, idHostCpu);
     677#endif
     678            ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
     679            if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
     680                GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
     681
     682            /* We might need to disable VT-x if the active switcher turns off paging. */
     683            bool fVTxDisabled;
     684            int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
     685            if (RT_SUCCESS(rc))
     686            {
     687                RTCCUINTREG uFlags = ASMIntDisableFlags();
     688
     689                for (;;)
    687690                {
    688                     RTCCUINTREG uFlags = ASMIntDisableFlags();
    689 
    690                     for (;;)
    691                     {
    692                         VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
    693                         TMNotifyStartOfExecution(pVCpu);
    694 
    695                         rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
    696                         pVCpu->vmm.s.iLastGZRc = rc;
    697 
    698                         TMNotifyEndOfExecution(pVCpu);
    699                         VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
    700 
    701                         if (rc != VINF_VMM_CALL_TRACER)
    702                             break;
    703                         SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
    704                     }
    705 
    706                     /* Re-enable VT-x if previously turned off. */
    707                     HMR0LeaveSwitcher(pVM, fVTxDisabled);
    708 
    709                     if (    rc == VINF_EM_RAW_INTERRUPT
    710                         ||  rc == VINF_EM_RAW_INTERRUPT_HYPER)
    711                         TRPMR0DispatchHostInterrupt(pVM);
    712 
    713                     ASMSetFlags(uFlags);
     691                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     692                    TMNotifyStartOfExecution(pVCpu);
     693
     694                    rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
     695                    pVCpu->vmm.s.iLastGZRc = rc;
     696
     697                    TMNotifyEndOfExecution(pVCpu);
     698                    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
     699
     700                    if (rc != VINF_VMM_CALL_TRACER)
     701                        break;
     702                    SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
     703                }
     704
     705                /* Re-enable VT-x if previously turned off. */
     706                HMR0LeaveSwitcher(pVM, fVTxDisabled);
     707
     708                if (    rc == VINF_EM_RAW_INTERRUPT
     709                    ||  rc == VINF_EM_RAW_INTERRUPT_HYPER)
     710                    TRPMR0DispatchHostInterrupt(pVM);
     711
     712                ASMSetFlags(uFlags);
    714713
    715714#ifdef VBOX_WITH_STATISTICS
    716                     STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
    717                     vmmR0RecordRC(pVM, pVCpu, rc);
    718 #endif
    719                 }
    720                 else
    721                     pVCpu->vmm.s.iLastGZRc = rc;
    722                 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
    723                 RTThreadPreemptRestore(&PreemptState);
     715                STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
     716                vmmR0RecordRC(pVM, pVCpu, rc);
     717#endif
    724718            }
    725719            else
    726             {
    727                 Assert(!pVM->vmm.s.fSwitcherDisabled);
    728                 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
    729                 if (pVM->cCpus != 1)
    730                     pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_INVALID_SMP;
    731 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    732                 if (!PGMGetHyperCR3(pVCpu))
    733                     pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
    734 #endif
    735             }
     720                pVCpu->vmm.s.iLastGZRc = rc;
     721            ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
     722            RTThreadPreemptRestore(&PreemptState);
    736723            break;
    737724        }
     
    980967            int rc;
    981968            bool fVTxDisabled;
    982 
    983             /* Safety precaution as HM can disable the switcher. */
    984             Assert(!pVM->vmm.s.fSwitcherDisabled);
    985             if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled))
    986                 return VERR_NOT_SUPPORTED;
    987969
    988970#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
  • trunk/src/VBox/VMM/VMMR3/DBGF.cpp

    r45692 r45701  
    360360
    361361        /*
    362          * Commands?
     362         * Command pending? Process it.
    363363         */
    364364        if (pVM->dbgf.s.enmVMMCmd != DBGFCMD_NO_COMMAND)
    365365        {
    366 #ifdef VBOX_WITH_RAW_MODE
    367             /** @todo stupid GDT/LDT sync hack. go away! */
    368             SELMR3UpdateFromCPUM(pVM, pVCpu);
    369 #endif
    370 
    371             /*
    372              * Process the command.
    373              */
    374366            bool            fResumeExecution;
    375367            DBGFCMDDATA     CmdData = pVM->dbgf.s.VMMCmdData;
     
    672664
    673665    LogFlow(("dbgfR3VMMWait:\n"));
    674 
    675 #ifdef VBOX_WITH_RAW_MODE
    676     /** @todo stupid GDT/LDT sync hack. go away! */
    677     SELMR3UpdateFromCPUM(pVM, pVCpu);
    678 #endif
    679666    int rcRet = VINF_SUCCESS;
    680667
  • trunk/src/VBox/VMM/VMMR3/EMHM.cpp

    r45533 r45701  
    8484    int         rc;
    8585    PCPUMCTX    pCtx   = pVCpu->em.s.pCtx;
    86     VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
     86# ifdef VBOX_WITH_RAW_MODE
     87    Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS));
     88# endif
    8789
    8890    /*
     
    501503        /** @todo change this FF hack into an assertion, they simply SHALL NOT be set in
    502504         *        HM mode. */
     505        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
    503506        VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HM mode; shouldn't be set really. */
    504507#endif
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r45665 r45701  
    487487                AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps),
    488488                                            VERR_INTERNAL_ERROR_5);
     489
     490            /*
     491             * Do we require a little bit or raw-mode for 64-bit guest execution?
     492             */
     493            pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32
     494                                  && pVM->fHMEnabled
     495                                  && pVM->hm.s.fAllow64BitGuests;
    489496        }
    490497        else
     
    831838{
    832839#ifdef VBOX_WITH_RAW_MODE
    833     /* Disable PATM & CSAM. */
    834     PATMR3AllowPatching(pVM->pUVM, false);
    835     CSAMDisableScanning(pVM);
    836 
    837840    /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */
    838841    SELMR3DisableMonitoring(pVM);
     
    840843#endif
    841844
    842     /* Disable the switcher code (safety precaution). */
    843     VMMR3DisableSwitcher(pVM);
    844 
    845845    /* Disable mapping of the hypervisor into the shadow page table. */
    846846    PGMR3MappingsDisable(pVM);
    847 
    848     /* Disable the switcher */
    849     VMMR3DisableSwitcher(pVM);
    850847
    851848    /* Reinit the paging mode to force the new shadow mode. */
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r45618 r45701  
    33793379
    33803380#ifdef VBOX_WITH_RAW_MODE
    3381     if (enmSwitcher != VMMSWITCHER_INVALID)
     3381    if (   enmSwitcher != VMMSWITCHER_INVALID
     3382        && !HMIsEnabled(pVM))
    33823383    {
    33833384        /*
  • trunk/src/VBox/VMM/VMMR3/SELM.cpp

    r45618 r45701  
    123123VMMR3DECL(int) SELMR3Init(PVM pVM)
    124124{
     125    int rc;
    125126    LogFlow(("SELMR3Init\n"));
    126127
     
    149150    pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3;
    150151
    151     /*
    152      * Allocate GDT table.
    153      */
    154     int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
     152    if (HMIsRawModeCtxNeeded(pVM))
     153    {
     154        /*
     155         * Allocate GDT table.
     156         */
     157        rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS,
    155158                                     PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3);
    156     AssertRCReturn(rc, rc);
    157 
    158     /*
    159      * Allocate LDT area.
    160      */
    161     rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
    162     AssertRCReturn(rc, rc);
     159        AssertRCReturn(rc, rc);
     160
     161        /*
     162         * Allocate LDT area.
     163         */
     164        rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3);
     165        AssertRCReturn(rc, rc);
     166    }
    163167
    164168    /*
     
    178182    pVM->selm.s.fSyncTSSRing0Stack = false;
    179183
    180     /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
    181      * for I/O operations. */
     184    /* The I/O bitmap starts right after the virtual interrupt redirection
     185       bitmap. Outside the TSS on purpose; the CPU will not check it for
     186       I/O operations. */
    182187    pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
    183188    /* bit set to 1 means no redirection */
     
    197202     * Statistics.
    198203     */
    199     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled,     STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt",  STAMUNIT_OCCURENCES,     "The number of handled writes to the Guest GDT.");
    200     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled,   STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu",  STAMUNIT_OCCURENCES,     "The number of unhandled writes to the Guest GDT.");
    201     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT,            STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT",     STAMUNIT_OCCURENCES,     "The number of writes to the Guest LDT was detected.");
    202     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled,     STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt",  STAMUNIT_OCCURENCES,     "The number of handled writes to the Guest TSS.");
    203     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir,       STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES,     "The number of handled redir bitmap writes to the Guest TSS.");
    204     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
    205     STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled,   STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu",  STAMUNIT_OCCURENCES,     "The number of unhandled writes to the Guest TSS.");
    206     STAM_REG(pVM, &pVM->selm.s.StatTSSSync,                    STAMTYPE_PROFILE, "/PROF/SELM/TSSSync",           STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
    207     STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM,             STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM",    STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
    208 
    209     STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged,       STAMTYPE_COUNTER, "/SELM/HyperSels/Changed",      STAMUNIT_OCCURENCES,     "The number of times we had to relocate our hypervisor selectors.");
    210     STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels,       STAMTYPE_COUNTER, "/SELM/HyperSels/Scan",         STAMUNIT_OCCURENCES,     "The number of times we had find free hypervisor selectors.");
    211 
    212     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM.");
    213     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM.");
    214     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM.");
    215     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM.");
    216     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM.");
    217     STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM.");
    218 
    219     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM.");
    220     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM.");
    221     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM.");
    222     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM.");
    223     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM.");
    224     STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM.");
    225 
    226     STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg,              STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM.");
    227 
    228     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM.");
    229     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM.");
    230     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM.");
    231     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM.");
    232     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM.");
    233     STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM.");
     204    if (!HMIsEnabled(pVM))
     205    {
     206        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled,     STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt",  STAMUNIT_OCCURENCES,     "The number of handled writes to the Guest GDT.");
     207        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled,   STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu",  STAMUNIT_OCCURENCES,     "The number of unhandled writes to the Guest GDT.");
     208        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT,            STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT",     STAMUNIT_OCCURENCES,     "The number of writes to the Guest LDT was detected.");
     209        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled,     STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt",  STAMUNIT_OCCURENCES,     "The number of handled writes to the Guest TSS.");
     210        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir,       STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES,     "The number of handled redir bitmap writes to the Guest TSS.");
     211        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed.");
     212        STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled,   STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu",  STAMUNIT_OCCURENCES,     "The number of unhandled writes to the Guest TSS.");
     213        STAM_REG(pVM, &pVM->selm.s.StatTSSSync,                    STAMTYPE_PROFILE, "/PROF/SELM/TSSSync",           STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body.");
     214        STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM,             STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM",    STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body.");
     215
     216        STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged,       STAMTYPE_COUNTER, "/SELM/HyperSels/Changed",      STAMUNIT_OCCURENCES,     "The number of times we had to relocate our hypervisor selectors.");
     217        STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels,       STAMTYPE_COUNTER, "/SELM/HyperSels/Scan",         STAMUNIT_OCCURENCES,     "The number of times we had find free hypervisor selectors.");
     218
     219        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM.");
     220        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM.");
     221        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM.");
     222        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM.");
     223        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM.");
     224        STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM.");
     225
     226        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM.");
     227        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM.");
     228        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM.");
     229        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM.");
     230        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM.");
     231        STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM.");
     232
     233        STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg,              STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM.");
     234
     235        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM.");
     236        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM.");
     237        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM.");
     238        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM.");
     239        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM.");
     240        STAM_REG(    pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS],  STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM.");
     241    }
    234242
    235243    STAM_REG(    pVM, &pVM->selm.s.StatLoadHidSelGst,              STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest",   STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables.");
     
    242250     * Default action when entering raw mode for the first time
    243251     */
    244     PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
    245     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    246     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    247     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     252    if (!HMIsEnabled(pVM))
     253    {
     254        PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     255        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     256        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     257        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     258    }
    248259#endif
    249260
     
    251262     * Register info handlers.
    252263     */
    253     DBGFR3InfoRegisterInternal(pVM, "gdt",      "Displays the shadow GDT. No arguments.",   &selmR3InfoGdt);
     264    if (HMIsRawModeCtxNeeded(pVM))
     265    {
     266        DBGFR3InfoRegisterInternal(pVM, "gdt",      "Displays the shadow GDT. No arguments.",   &selmR3InfoGdt);
     267        DBGFR3InfoRegisterInternal(pVM, "ldt",      "Displays the shadow LDT. No arguments.",   &selmR3InfoLdt);
     268        //DBGFR3InfoRegisterInternal(pVM, "tss",      "Displays the shadow TSS. No arguments.",   &selmR3InfoTss);
     269    }
    254270    DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.",    &selmR3InfoGdtGuest);
    255     DBGFR3InfoRegisterInternal(pVM, "ldt",      "Displays the shadow LDT. No arguments.",   &selmR3InfoLdt);
    256271    DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.",    &selmR3InfoLdtGuest);
    257     //DBGFR3InfoRegisterInternal(pVM, "tss",      "Displays the shadow TSS. No arguments.",   &selmR3InfoTss);
    258272    //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.",    &selmR3InfoTssGuest);
    259273
     
    281295     */
    282296    bool f;
    283 #if defined(DEBUG_bird)
     297# if defined(DEBUG_bird)
    284298    int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true);
    285 #else
     299# else
    286300    int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false);
    287 #endif
     301# endif
    288302    AssertLogRelRCReturn(rc, rc);
    289     if (f)
     303    if (f && HMIsRawModeCtxNeeded(pVM))
    290304    {
    291305        PX86DESC paGdt = pVM->selm.s.paGdtR3;
     
    419433    LogFlow(("SELMR3Relocate\n"));
    420434
    421     for (VMCPUID i = 0; i < pVM->cCpus; i++)
    422     {
    423         PVMCPU pVCpu = &pVM->aCpus[i];
    424 
    425         /*
    426          * Update GDTR and selector.
    427          */
    428         CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
    429 
    430         /** @todo selector relocations should be a separate operation? */
    431         CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
    432         CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
    433         CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
    434         CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
    435         CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
    436     }
    437 
    438     selmR3SetupHyperGDTSelectors(pVM);
     435    if (HMIsRawModeCtxNeeded(pVM))
     436    {
     437        for (VMCPUID i = 0; i < pVM->cCpus; i++)
     438        {
     439            PVMCPU pVCpu = &pVM->aCpus[i];
     440
     441            /*
     442             * Update GDTR and selector.
     443             */
     444            CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1);
     445
     446            /** @todo selector relocations should be a separate operation? */
     447            CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]);
     448            CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
     449            CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
     450            CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]);
     451            CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]);
     452        }
     453
     454        selmR3SetupHyperGDTSelectors(pVM);
    439455
    440456/** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */
    441457/** @todo PGM knows the proper CR3 values these days, not CPUM. */
    442     /*
    443      * Update the TSSes.
    444      */
    445     /* Only applies to raw mode which supports only 1 VCPU */
    446     PVMCPU pVCpu = &pVM->aCpus[0];
    447 
    448     /* Current TSS */
    449     pVM->selm.s.Tss.cr3     = PGMGetHyperCR3(pVCpu);
    450     pVM->selm.s.Tss.ss0     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    451     pVM->selm.s.Tss.esp0    = VMMGetStackRC(pVCpu);
    452     pVM->selm.s.Tss.cs      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
    453     pVM->selm.s.Tss.ds      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    454     pVM->selm.s.Tss.es      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    455     pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
    456 
    457     /* trap 08 */
    458     pVM->selm.s.TssTrap08.cr3    = PGMGetInterRCCR3(pVM, pVCpu);                   /* this should give use better survival chances. */
    459     pVM->selm.s.TssTrap08.ss0    = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    460     pVM->selm.s.TssTrap08.ss     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    461     pVM->selm.s.TssTrap08.esp0   = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2;  /* upper half can be analysed this way. */
    462     pVM->selm.s.TssTrap08.esp    = pVM->selm.s.TssTrap08.esp0;
    463     pVM->selm.s.TssTrap08.ebp    = pVM->selm.s.TssTrap08.esp0;
    464     pVM->selm.s.TssTrap08.cs     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
    465     pVM->selm.s.TssTrap08.ds     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    466     pVM->selm.s.TssTrap08.es     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
    467     pVM->selm.s.TssTrap08.fs     = 0;
    468     pVM->selm.s.TssTrap08.gs     = 0;
    469     pVM->selm.s.TssTrap08.selLdt = 0;
    470     pVM->selm.s.TssTrap08.eflags = 0x2;    /* all cleared */
    471     pVM->selm.s.TssTrap08.ecx    = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);       /* setup ecx to normal Hypervisor TSS address. */
    472     pVM->selm.s.TssTrap08.edi    = pVM->selm.s.TssTrap08.ecx;
    473     pVM->selm.s.TssTrap08.eax    = pVM->selm.s.TssTrap08.ecx;
    474     pVM->selm.s.TssTrap08.edx    = VM_RC_ADDR(pVM, pVM);                    /* setup edx VM address. */
    475     pVM->selm.s.TssTrap08.edi    = pVM->selm.s.TssTrap08.edx;
    476     pVM->selm.s.TssTrap08.ebx    = pVM->selm.s.TssTrap08.edx;
    477     pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
    478     /* TRPM will be updating the eip */
    479 
    480     if (    !pVM->selm.s.fDisableMonitoring
    481         &&  !HMIsEnabled(pVM))
     458        /*
     459         * Update the TSSes.
     460         */
     461        /* Only applies to raw mode which supports only 1 VCPU */
     462        PVMCPU pVCpu = &pVM->aCpus[0];
     463
     464        /* Current TSS */
     465        pVM->selm.s.Tss.cr3     = PGMGetHyperCR3(pVCpu);
     466        pVM->selm.s.Tss.ss0     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     467        pVM->selm.s.Tss.esp0    = VMMGetStackRC(pVCpu);
     468        pVM->selm.s.Tss.cs      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
     469        pVM->selm.s.Tss.ds      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     470        pVM->selm.s.Tss.es      = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     471        pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);
     472
     473        /* trap 08 */
     474        pVM->selm.s.TssTrap08.cr3    = PGMGetInterRCCR3(pVM, pVCpu);                   /* this should give use better survival chances. */
     475        pVM->selm.s.TssTrap08.ss0    = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     476        pVM->selm.s.TssTrap08.ss     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     477        pVM->selm.s.TssTrap08.esp0   = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2;  /* upper half can be analysed this way. */
     478        pVM->selm.s.TssTrap08.esp    = pVM->selm.s.TssTrap08.esp0;
     479        pVM->selm.s.TssTrap08.ebp    = pVM->selm.s.TssTrap08.esp0;
     480        pVM->selm.s.TssTrap08.cs     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];
     481        pVM->selm.s.TssTrap08.ds     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     482        pVM->selm.s.TssTrap08.es     = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];
     483        pVM->selm.s.TssTrap08.fs     = 0;
     484        pVM->selm.s.TssTrap08.gs     = 0;
     485        pVM->selm.s.TssTrap08.selLdt = 0;
     486        pVM->selm.s.TssTrap08.eflags = 0x2;    /* all cleared */
     487        pVM->selm.s.TssTrap08.ecx    = VM_RC_ADDR(pVM, &pVM->selm.s.Tss);       /* setup ecx to normal Hypervisor TSS address. */
     488        pVM->selm.s.TssTrap08.edi    = pVM->selm.s.TssTrap08.ecx;
     489        pVM->selm.s.TssTrap08.eax    = pVM->selm.s.TssTrap08.ecx;
     490        pVM->selm.s.TssTrap08.edx    = VM_RC_ADDR(pVM, pVM);                    /* setup edx VM address. */
     491        pVM->selm.s.TssTrap08.edi    = pVM->selm.s.TssTrap08.edx;
     492        pVM->selm.s.TssTrap08.ebx    = pVM->selm.s.TssTrap08.edx;
     493        pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);
     494        /* TRPM will be updating the eip */
     495    }
     496
     497    if (!HMIsEnabled(pVM))
    482498    {
    483499        /*
     
    543559{
    544560    NOREF(pVM);
    545     return 0;
     561    return VINF_SUCCESS;
    546562}
    547563
     
    602618
    603619#ifdef VBOX_WITH_RAW_MODE
    604     /*
    605      * Default action when entering raw mode for the first time
    606      */
    607     PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
    608     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    609     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    610     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     620    if (!HMIsEnabled(pVM))
     621    {
     622        /*
     623         * Default action when entering raw mode for the first time
     624         */
     625        PVMCPU pVCpu = &pVM->aCpus[0];  /* raw mode implies on VCPU */
     626        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     627        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     628        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     629    }
    611630#endif
    612631}
     
    793812{
    794813#ifdef VBOX_WITH_RAW_MODE
    795     PVMCPU pVCpu = VMMGetCpu(pVM);
    796 
    797     LogFlow(("selmR3LoadDone:\n"));
    798 
    799     /*
    800      * Don't do anything if it's a load failure.
    801      */
    802     int rc = SSMR3HandleGetStatus(pSSM);
    803     if (RT_FAILURE(rc))
    804         return VINF_SUCCESS;
    805 
    806     /*
    807      * Do the syncing if we're in protected mode.
    808      */
    809     if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
    810     {
     814    if (!HMIsEnabled(pVM))
     815    {
     816        PVMCPU pVCpu = VMMGetCpu(pVM);
     817
     818        LogFlow(("selmR3LoadDone:\n"));
     819
     820        /*
     821         * Don't do anything if it's a load failure.
     822         */
     823        int rc = SSMR3HandleGetStatus(pSSM);
     824        if (RT_FAILURE(rc))
     825            return VINF_SUCCESS;
     826
     827        /*
     828         * Do the syncing if we're in protected mode and using raw-mode.
     829         */
     830        if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL)
     831        {
     832            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
     833            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
     834            VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
     835            SELMR3UpdateFromCPUM(pVM, pVCpu);
     836        }
     837
     838        /*
     839         * Flag everything for resync on next raw mode entry.
     840         */
    811841        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    812842        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    813843        VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    814         SELMR3UpdateFromCPUM(pVM, pVCpu);
    815     }
    816 
    817     /*
    818      * Flag everything for resync on next raw mode entry.
    819      */
    820     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    821     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    822     VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    823 
     844    }
    824845#endif /*VBOX_WITH_RAW_MODE*/
    825846    return VINF_SUCCESS;
     
    837858static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu)
    838859{
     860    Assert(!HMIsEnabled(pVM));
     861
    839862    /*
    840863     * Always assume the best...
     
    11001123{
    11011124    int rc = VINF_SUCCESS;
     1125    Assert(!HMIsEnabled(pVM));
    11021126
    11031127    /*
     
    13341358{
    13351359    Assert(CPUMIsGuestInProtectedMode(pVCpu));
     1360    Assert(!HMIsEnabled(pVM));
    13361361
    13371362    /*
     
    14351460VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu)
    14361461{
    1437     if (pVM->selm.s.fDisableMonitoring)
    1438     {
    1439         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
    1440         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
    1441         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    1442         return VINF_SUCCESS;
    1443     }
    1444 
    14451462    STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a);
     1463    AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE);
    14461464
    14471465    /*
     
    15991617VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu)
    16001618{
    1601     int    rc;
    1602 
    1603     if (pVM->selm.s.fDisableMonitoring)
    1604     {
    1605         VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
    1606         return VINF_SUCCESS;
    1607     }
     1619    int rc;
     1620    AssertReturnStmt(!HMIsEnabled(pVM), VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS), VINF_SUCCESS);
    16081621
    16091622    STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a);
     
    18421855#ifdef VBOX_STRICT
    18431856    PVMCPU pVCpu = VMMGetCpu(pVM);
     1857    AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE);
    18441858
    18451859    /*
  • trunk/src/VBox/VMM/VMMR3/VMM.cpp

    r45618 r45701  
    211211    rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller);
    212212    AssertRCReturn(rc, rc);
    213 
    214     /* GC switchers are enabled by default. Turned off by HM. */
    215     pVM->vmm.s.fSwitcherDisabled = false;
    216213
    217214    /*
     
    549546
    550547    /* In VMX mode, there's no need to init RC. */
    551     if (pVM->vmm.s.fSwitcherDisabled)
     548    if (HMIsEnabled(pVM))
    552549        return VINF_SUCCESS;
    553550
  • trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp

    r44168 r45701  
    55
    66/*
    7  * Copyright (C) 2006-2012 Oracle Corporation
     7 * Copyright (C) 2006-2013 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    1616 */
    1717
     18
    1819/*******************************************************************************
    1920*   Header Files                                                               *
     
    2223#include <VBox/vmm/vmm.h>
    2324#include <VBox/vmm/pgm.h>
     25#include <VBox/vmm/hm.h>
    2426#include <VBox/vmm/selm.h>
    2527#include <VBox/vmm/mm.h>
     
    4648 * The type and index shall match!
    4749 */
    48 static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =
     50static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] =
    4951{
    5052    NULL, /* invalid entry */
     
    7375    &vmmR3SwitcherAMD64To32Bit_Def,
    7476    &vmmR3SwitcherAMD64ToPAE_Def,
    75     NULL    //&vmmR3SwitcherAMD64ToAMD64_Def,
     77    NULL,   //&vmmR3SwitcherAMD64ToAMD64_Def,
    7678# endif /* RT_ARCH_AMD64 */
    7779#else  /* !VBOX_WITH_RAW_MODE */
     
    8486    NULL,
    8587    NULL,
    86     NULL
     88    NULL,
    8789#endif /* !VBOX_WITH_RAW_MODE */
     90#ifndef RT_ARCH_AMD64
     91    &vmmR3SwitcherX64Stub_Def,
     92    NULL,
     93#else
     94    NULL,
     95    &vmmR3SwitcherAMD64Stub_Def,
     96#endif
     97};
     98
     99/** Array of switcher definitions.
     100 * The type and index shall match!
     101 */
     102static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] =
     103{
     104    NULL, /* invalid entry */
     105#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     106    NULL,   //&vmmR3Switcher32BitTo32Bit_Def,
     107    NULL,   //&vmmR3Switcher32BitToPAE_Def,
     108    &vmmR3Switcher32BitToAMD64_Def,
     109    NULL,   //&vmmR3SwitcherPAETo32Bit_Def,
     110    NULL,   //&vmmR3SwitcherPAEToPAE_Def,
     111    &vmmR3SwitcherPAEToAMD64_Def,
     112    NULL,   //&vmmR3SwitcherPAETo32Bit_Def,
     113    NULL,   //&vmmR3SwitcherAMD64ToPAE_Def,
     114    NULL,   //&vmmR3SwitcherAMD64ToAMD64_Def,
     115#else  /* !VBOX_WITH_RAW_MODE */
     116    NULL,
     117    NULL,
     118    NULL,
     119    NULL,
     120    NULL,
     121    NULL,
     122    NULL,
     123    NULL,
     124    NULL,
     125#endif /* !VBOX_WITH_RAW_MODE */
     126#ifndef RT_ARCH_AMD64
     127    &vmmR3SwitcherX64Stub_Def,
     128    NULL,
     129#else
     130    NULL,
     131    &vmmR3SwitcherAMD64Stub_Def,
     132#endif
    88133};
    89134
     
    100145int vmmR3SwitcherInit(PVM pVM)
    101146{
    102 #ifndef VBOX_WITH_RAW_MODE
     147#ifndef VBOX_WITH_RAW_MODE /** @todo 64-bit on 32-bit. */
    103148    return VINF_SUCCESS;
    104149#else
     
    106151     * Calc the size.
    107152     */
     153    const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
    108154    unsigned cbCoreCode = 0;
    109     for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
     155    for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
    110156    {
    111157        pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode;
    112         PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
     158        PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
    113159        if (pSwitcher)
    114160        {
     
    180226         * copy the code.
    181227         */
    182         for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
     228        for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
    183229        {
    184             PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
     230            PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
    185231            if (pSwitcher)
    186232                memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher],
     
    205251             * to get the routine addresses, so we'll reselect it.
    206252             * This may legally fail so, we're ignoring the rc.
     253             * Note! See HMIsEnabled hack in selector function.
    207254             */
    208255            VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher);
     
    238285     * Relocate all the switchers.
    239286     */
    240     for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)
    241     {
    242         PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];
     287    const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
     288    for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++)
     289    {
     290        PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher];
    243291        if (pSwitcher && pSwitcher->pfnRelocate)
    244292        {
     
    256304     * Recalc the RC address for the current switcher.
    257305     */
    258     PVMMSWITCHERDEF pSwitcher   = s_apSwitchers[pVM->vmm.s.enmSwitcher];
    259     RTRCPTR         RCPtr       = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
    260     pVM->vmm.s.pfnRCToHost              = RCPtr + pSwitcher->offRCToHost;
    261     pVM->vmm.s.pfnCallTrampolineRC      = RCPtr + pSwitcher->offRCCallTrampoline;
    262     pVM->pfnVMMRCToHostAsm              = RCPtr + pSwitcher->offRCToHostAsm;
    263     pVM->pfnVMMRCToHostAsmNoReturn      = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
     306    PVMMSWITCHERDEF pSwitcher   = papSwitchers[pVM->vmm.s.enmSwitcher];
     307    if (pSwitcher)
     308    {
     309        RTRCPTR     RCPtr       = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher];
     310        pVM->vmm.s.pfnRCToHost              = RCPtr + pSwitcher->offRCToHost;
     311        pVM->vmm.s.pfnCallTrampolineRC      = RCPtr + pSwitcher->offRCCallTrampoline;
     312        pVM->pfnVMMRCToHostAsm              = RCPtr + pSwitcher->offRCToHostAsm;
     313        pVM->pfnVMMRCToHostAsmNoReturn      = RCPtr + pSwitcher->offRCToHostAsmNoReturn;
     314    }
     315    else
     316        AssertRelease(HMIsEnabled(pVM));
    264317
    265318//    AssertFailed();
     
    9621015    }
    9631016
    964     /* Do nothing if the switcher is disabled. */
    965     if (pVM->vmm.s.fSwitcherDisabled)
    966         return VINF_SUCCESS;
     1017    /*
     1018     * Override it if HM is active.
     1019     */
     1020    if (HMIsEnabled(pVM))
     1021        pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB;
    9671022
    9681023    /*
    9691024     * Select the new switcher.
    9701025     */
    971     PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
     1026    const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
     1027    PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
    9721028    if (pSwitcher)
    9731029    {
     
    9931049
    9941050/**
    995  * Disable the switcher logic permanently.
    996  *
    997  * @returns VBox status code.
    998  * @param   pVM             Pointer to the VM.
    999  */
    1000 VMMR3_INT_DECL(int) VMMR3DisableSwitcher(PVM pVM)
    1001 {
    1002 /** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:
    1003  * @code
    1004  *       mov eax, VERR_VMM_DUMMY_SWITCHER
    1005  *       ret
    1006  * @endcode
    1007  * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.
    1008  */
    1009     pVM->vmm.s.fSwitcherDisabled = true;
    1010     return VINF_SUCCESS;
    1011 }
    1012 
    1013 
    1014 /**
    10151051 * Gets the switcher to be used for switching to GC.
    10161052 *
     
    10341070     * Select the new switcher.
    10351071     */
    1036     PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher];
     1072    const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers;
     1073    PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher];
    10371074    if (pSwitcher)
    10381075    {
  • trunk/src/VBox/VMM/include/VMMInternal.h

    r45097 r45701  
    217217    /** Pointer to core code guest context mapping. */
    218218    RTRCPTR                     pvCoreCodeRC;
    219     RTRCPTR                     pRCPadding0; /**< Alignment padding */
     219    RTRCPTR                     pRCPadding0; /**< Alignment padding. */
    220220#ifdef VBOX_WITH_NMI
    221221    /** The guest context address of the APIC (host) mapping. */
    222222    RTRCPTR                     GCPtrApicBase;
    223     RTRCPTR                     pRCPadding1; /**< Alignment padding */
     223    RTRCPTR                     pRCPadding1; /**< Alignment padding. */
    224224#endif
    225225    /** The current switcher.
    226226     * This will be set before the VMM is fully initialized. */
    227227    VMMSWITCHER                 enmSwitcher;
    228     /** Flag to disable the switcher permanently (VMX) (boolean) */
    229     bool                        fSwitcherDisabled;
    230228    /** Array of offsets to the different switchers within the core code. */
    231     RTUINT                      aoffSwitchers[VMMSWITCHER_MAX];
     229    uint32_t                    aoffSwitchers[VMMSWITCHER_MAX];
     230    uint32_t                    u32Padding2; /**< Alignment padding. */
    232231
    233232    /** Resume Guest Execution. See CPUMGCResumeGuest(). */
  • trunk/src/VBox/VMM/include/VMMSwitcher.h

    r44528 r45701  
    128128
    129129RT_C_DECLS_BEGIN
     130extern VMMSWITCHERDEF vmmR3SwitcherX86Stub_Def;
    130131extern VMMSWITCHERDEF vmmR3Switcher32BitTo32Bit_Def;
    131132extern VMMSWITCHERDEF vmmR3Switcher32BitToPAE_Def;
     
    134135extern VMMSWITCHERDEF vmmR3SwitcherPAEToPAE_Def;
    135136extern VMMSWITCHERDEF vmmR3SwitcherPAEToAMD64_Def;
     137extern VMMSWITCHERDEF vmmR3SwitcherAMD64Stub_Def;
    136138extern VMMSWITCHERDEF vmmR3SwitcherAMD64To32Bit_Def;
    137139extern VMMSWITCHERDEF vmmR3SwitcherAMD64ToPAE_Def;
  • trunk/src/VBox/VMM/include/VMMSwitcher.mac

    r44528 r45701  
    55
    66;
    7 ; Copyright (C) 2006-2012 Oracle Corporation
     7; Copyright (C) 2006-2013 Oracle Corporation
    88;
    99; This file is part of VirtualBox Open Source Edition (OSE), as
     
    3232%define VMMSWITCHER_AMD64_TO_PAE        8
    3333%define VMMSWITCHER_AMD64_TO_AMD64      9
    34 ;; @todo the rest are all wrong. sync with vmm.h.
    35 %define VMMSWITCHER_HOST_TO_VMX         9
    36 %define VMMSWITCHER_HOST_TO_SVM         10
     34%define VMMSWITCHER_X86_STUB            10
     35%define VMMSWITCHER_AMD64_STUB          11
    3736%define VMMSWITCHER_MAX                 12
    3837; }
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette