VirtualBox

Changeset 45786 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Apr 26, 2013 10:35:59 PM (12 years ago)
Author:
vboxsync
Message:

Move HMRCA.asm into the switcher code so we don't need VMMRC.rc.

Location:
trunk/src/VBox/VMM
Files:
1 deleted
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r45739 r45786  
    238238        VMMSwitcher/32BitTo32Bit.asm \
    239239        VMMSwitcher/32BitToPAE.asm \
    240         VMMSwitcher/32BitToAMD64.asm \
    241240        VMMSwitcher/PAETo32Bit.asm \
    242         VMMSwitcher/PAEToAMD64.asm \
    243241        VMMSwitcher/PAEToPAE.asm
    244242 VBoxVMM_SOURCES.amd64 = \
     
    249247endif
    250248VBoxVMM_SOURCES.x86 += \
     249        VMMSwitcher/32BitToAMD64.asm \
     250        VMMSwitcher/PAEToAMD64.asm \
    251251        VMMSwitcher/X86Stub.asm
    252252VBoxVMM_SOURCES.amd64 += \
     
    411411        VMMRC/VMMRC.cpp \
    412412        VMMRC/VMMRCA.asm \
    413         VMMRC/HMRCA.asm \
    414413        $(if-expr defined(VBOX_WITH_RAW_MODE), \
    415414        VMMRC/CSAMRC.cpp \
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r45739 r45786  
    259259}
    260260
    261 
    262 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
     261#ifndef PGM_WITHOUT_MAPPINGS
     262
    263263/**
    264264 * Sets all PDEs involved with the mapping in the shadow page table.
     
    357357                if (!pgmPoolIsPageLocked(pPoolPagePd))
    358358                    pgmPoolLockPage(pPool, pPoolPagePd);
    359 #ifdef VBOX_STRICT
     359# ifdef VBOX_STRICT
    360360                else if (pShwPaePd->a[iPaePde].u & PGM_PDFLAGS_MAPPING)
    361361                {
     
    369369                                   ("%RX64 vs %RX64\n", pShwPaePd->a[iPaePde+1].u & X86_PDE_PAE_PG_MASK, pMap->aPTs[i].HCPhysPaePT1));
    370370                }
    371 #endif
     371# endif
    372372
    373373                /*
     
    542542    PGM_DYNMAP_UNUSED_HINT_VM(pVM, pCurrentShwPdpt);
    543543}
    544 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
    545 
     544
     545#endif /* PGM_WITHOUT_MAPPINGS */
    546546#if defined(VBOX_STRICT) && !defined(IN_RING0)
     547
    547548/**
    548549 * Clears all PDEs involved with the mapping in the shadow page table.
     
    653654    pgmUnlock(pVM);
    654655}
     656
    655657#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
    656 
    657 #ifdef VBOX_WITH_RAW_MODE_NOT_R0
     658#ifndef PGM_WITHOUT_MAPPINGS
    658659
    659660/**
     
    741742    if (!pgmMapAreMappingsFloating(pVM))
    742743        return false;
    743 
    744     Assert(pVM->cCpus == 1);
     744    AssertReturn(pgmMapAreMappingsEnabled(pVM), false);
    745745
    746746    /* This only applies to raw mode where we only support 1 VCPU. */
     
    771771                    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
    772772
    773 #ifdef IN_RING3
     773# ifdef IN_RING3
    774774                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
    775775                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
    776776                        (iPT + iPDE) << X86_PD_SHIFT, pCur->pszDesc,
    777777                        iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
    778 #else
     778# else
    779779                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
    780780                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
    781781                        (iPT + iPDE) << X86_PD_SHIFT,
    782782                        iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
    783 #endif
     783# endif
    784784                    return true;
    785785                }
     
    802802                {
    803803                    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
    804 #ifdef IN_RING3
     804# ifdef IN_RING3
    805805                    Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping %s (PAE)\n"
    806806                         "                        PDE=%016RX64.\n",
    807807                        GCPtr, pCur->pszDesc, Pde.u));
    808 #else
     808# else
    809809                    Log(("PGMHasMappingConflicts: Conflict was detected at %RGv for mapping (PAE)\n"
    810810                         "                        PDE=%016RX64.\n",
    811811                        GCPtr, Pde.u));
    812 #endif
     812# endif
    813813                    return true;
    814814                }
     
    866866                    STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatR3DetectedConflicts);
    867867
    868 #ifdef IN_RING3
     868# ifdef IN_RING3
    869869                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping %s (32 bits)\n"
    870870                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
     
    874874                    AssertRCReturn(rc, rc);
    875875                    break;
    876 #else
     876# else
    877877                    Log(("PGMHasMappingConflicts: Conflict was detected at %08RX32 for mapping (32 bits)\n"
    878878                         "                        iPDE=%#x iPT=%#x PDE=%RGp.\n",
     
    880880                         iPDE, iPT, pPD->a[iPDE + iPT].au32[0]));
    881881                    return VINF_PGM_SYNC_CR3;
    882 #endif
     882# endif
    883883                }
    884884            }
     
    931931}
    932932
    933 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
    934 
     933#endif /* PGM_WITHOUT_MAPPINGS */
     934
  • trunk/src/VBox/VMM/VMMAll/SELMAll.cpp

    r45725 r45786  
    867867#endif
    868868
     869
    869870#ifdef VBOX_WITH_RAW_MODE_NOT_R0
    870 
    871871/**
    872872 * Gets ss:esp for ring1 in main Hypervisor's TSS.
     
    954954    return VINF_SUCCESS;
    955955}
    956 
     956#endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
     957
     958
     959#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
    957960
    958961/**
     
    10281031}
    10291032
    1030 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
     1033#endif /* defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
    10311034
    10321035/**
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r45749 r45786  
    15851585    STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack);
    15861586    if (pVM->hm.s.vmx.fSupported)
    1587         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
    1588     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL);
     1587        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);
     1588    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);
    15891589}
    15901590
     
    16021602    STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack);
    16031603    if (pVM->hm.s.vmx.fSupported)
    1604         return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
    1605     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL);
     1604        return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);
     1605    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);
    16061606}
    16071607
     
    16221622    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
    16231623    if (pVM->hm.s.vmx.fSupported)
    1624         rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
     1624        rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);
    16251625    else
    1626         rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]);
     1626        rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);
    16271627    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
    16281628
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45785 r45786  
    40244024 * @param   pVCpu       Pointer to the VMCPU.
    40254025 * @param   pCtx        Pointer to the guest CPU context.
    4026  * @param   pfnHandler  Pointer to the RC handler function.
     4026 * @param   enmOp       The operation to perform.
    40274027 * @param   cbParam     Number of parameters.
    40284028 * @param   paParam     Array of 32-bit parameters.
    40294029 */
    4030 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     4030VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    40314031                                         uint32_t *paParam)
    40324032{
     
    40374037
    40384038    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    4039     Assert(pfnHandler);
     4039    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
    40404040    Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
    40414041    Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
     
    40694069
    40704070    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
    4071     CPUMSetHyperEIP(pVCpu, pfnHandler);
     4071    CPUMSetHyperEIP(pVCpu, enmOp);
    40724072    for (int i = (int)cbParam - 1; i >= 0; i--)
    40734073        CPUMPushHyper(pVCpu, paParam[i]);
     
    41174117    RTHCPHYS        HCPhysCpuPage = 0;
    41184118    int             rc            = VERR_INTERNAL_ERROR_5;
     4119    AssertReturn(pVM->hm.s.pfnVMXGCStartVM64, VERR_HM_IPE_5);
    41194120
    41204121    pCpu = HMR0GetCurrentCpu();
     
    41484149    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
    41494150#endif
    4150     rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
     4151    rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
    41514152
    41524153#ifdef VBOX_WITH_CRASHDUMP_MAGIC
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r45749 r45786  
    31223122    aParam[3] = (uint32_t)(HCPhysVMCB >> 32);                /* Param 2: HCPhysVMCB - Hi. */
    31233123
    3124     return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
     3124    return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]);
    31253125}
    31263126
     
    31333133 * @param   pVCpu       Pointer to the VMCPU.
    31343134 * @param   pCtx        Pointer to the guest CPU context.
    3135  * @param   pfnHandler  Pointer to the RC handler function.
     3135 * @param   enmOp       The operation to perform.
    31363136 * @param   cbParam     Number of parameters.
    31373137 * @param   paParam     Array of 32-bit parameters.
    31383138 */
    3139 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     3139VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    31403140                                         uint32_t *paParam)
    31413141{
     
    31433143    RTHCUINTREG     uOldEFlags;
    31443144
    3145     Assert(pfnHandler);
     3145    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     3146    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
    31463147
    31473148    /* Disable interrupts. */
     
    31543155
    31553156    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
    3156     CPUMSetHyperEIP(pVCpu, pfnHandler);
     3157    CPUMSetHyperEIP(pVCpu, enmOp);
    31573158    for (int i = (int)cbParam - 1; i >= 0; i--)
    31583159        CPUMPushHyper(pVCpu, paParam[i]);
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r43455 r45786  
    127127
    128128#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    129 /**
    130  * Prepares for and executes VMRUN (64-bit guests from a 32-bit host).
    131  *
    132  * @returns VBox status code.
    133  * @param   pVMCBHostPhys   Physical address of host VMCB.
    134  * @param   pVMCBPhys       Physical address of the VMCB.
    135  * @param   pCtx            Pointer to the guest CPU context.
    136  * @param   pVM             Pointer to the VM.
    137  * @param   pVCpu           Pointer to the VMCPU. (not used)
    138  */
    139 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    140 
    141 /**
    142  * Executes the specified handler in 64-bit mode.
    143  *
    144  * @returns VBox status code.
    145  * @param   pVM         Pointer to the VM.
    146  * @param   pVCpu       Pointer to the VMCPU.
    147  * @param   pCtx        Pointer to the guest CPU context.
    148  * @param   pfnHandler  Pointer to the RC handler function.
    149  * @param   cbParam     Number of parameters.
    150  * @param   paParam     Array of 32-bit parameters.
    151  */
    152 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     129DECLASM(int)   SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
     130VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    153131                                         uint32_t *paParam);
    154132#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r45749 r45786  
    54785478    *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
    54795479#endif
    5480     rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
     5480    rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
    54815481
    54825482#ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    56105610 * @param   pVCpu       Pointer to the VMCPU.
    56115611 * @param   pCtx        Pointer to the guest CPU context.
    5612  * @param   pfnHandler  Pointer to the RC handler function.
     5612 * @param   enmOp       The operation to perform.
    56135613 * @param   cbParam     Number of parameters.
    56145614 * @param   paParam     Array of 32-bit parameters.
    56155615 */
    5616 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     5616VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    56175617                                         uint32_t *paParam)
    56185618{
     
    56235623
    56245624    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
    5625     Assert(pfnHandler);
     5625    Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
    56265626    Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
    56275627    Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
     
    56555655
    56565656    CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
    5657     CPUMSetHyperEIP(pVCpu, pfnHandler);
     5657    CPUMSetHyperEIP(pVCpu, enmOp);
    56585658    for (int i=(int)cbParam-1;i>=0;i--)
    56595659        CPUMPushHyper(pVCpu, paParam[i]);
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r45681 r45786  
    4646# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4747DECLASM(int)    VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    48 VMMR0DECL(int)  VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     48VMMR0DECL(int)  VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    4949                                         uint32_t *paParam);
    5050# endif
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r45781 r45786  
    14961496    if (HMIsEnabled(pVM))
    14971497    {
    1498         int rc;
    14991498        switch (PGMGetHostMode(pVM))
    15001499        {
     
    15121511                break;
    15131512        }
    1514         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "VMXGCStartVM64", &pVM->hm.s.pfnVMXGCStartVM64);
    1515         AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc));
    1516 
    1517         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "SVMGCVMRun64",   &pVM->hm.s.pfnSVMGCVMRun64);
    1518         AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc));
    1519 
    1520         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMSaveGuestFPU64",   &pVM->hm.s.pfnSaveGuestFPU64);
    1521         AssertReleaseMsgRC(rc, ("HMSetupFPU64 -> rc=%Rrc\n", rc));
    1522 
    1523         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMSaveGuestDebug64",   &pVM->hm.s.pfnSaveGuestDebug64);
    1524         AssertReleaseMsgRC(rc, ("HMSetupDebug64 -> rc=%Rrc\n", rc));
    1525 
    1526 # ifdef DEBUG
    1527         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       "HMTestSwitcher64",   &pVM->hm.s.pfnTest64);
    1528         AssertReleaseMsgRC(rc, ("HMTestSwitcher64 -> rc=%Rrc\n", rc));
    1529 # endif
    15301513    }
    15311514#endif
  • trunk/src/VBox/VMM/VMMR3/VM.cpp

    r45618 r45786  
    774774{
    775775    int         rc;
    776     bool        fEnabled;
    777776    PCFGMNODE   pRoot = CFGMR3GetRoot(pVM);
    778777
     
    794793    Assert(pVM->fRecompileUser == false); /* ASSUMES all zeros at this point */
    795794#ifdef VBOX_WITH_RAW_MODE
     795    bool        fEnabled;
    796796    rc = CFGMR3QueryBoolDef(pRoot, "RawR3Enabled", &fEnabled, false); AssertRCReturn(rc, rc);
    797797    pVM->fRecompileUser       = !fEnabled;
  • trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp

    r45750 r45786  
    145145int vmmR3SwitcherInit(PVM pVM)
    146146{
    147 #ifndef VBOX_WITH_RAW_MODE /** @todo 64-bit on 32-bit. */
     147#if !defined(VBOX_WITH_RAW_MODE) && (HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
    148148    return VINF_SUCCESS;
    149149#else
     150
    150151    /*
    151152     * Calc the size.
     
    281282void vmmR3SwitcherRelocate(PVM pVM, RTGCINTPTR offDelta)
    282283{
    283 #ifdef VBOX_WITH_RAW_MODE
     284#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
    284285    /*
    285286     * Relocate all the switchers.
     
    316317        AssertRelease(HMIsEnabled(pVM));
    317318
    318 //    AssertFailed();
    319319#else
    320320    NOREF(pVM);
     
    324324
    325325
    326 #ifdef VBOX_WITH_RAW_MODE
     326#if defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL))
    327327
    328328/**
     
    675675            }
    676676
    677 #if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     677# if defined(RT_ARCH_AMD64) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    678678            /*
    679679             * 64-bit HC Code Selector (no argument).
     
    682682            {
    683683                Assert(offSrc < pSwitcher->cbCode);
    684 # if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     684#  if defined(RT_OS_DARWIN) && defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    685685                *uSrc.pu16 = 0x80; /* KERNEL64_CS from i386/seg.h */
    686 # else
     686#  else
    687687                AssertFatalMsgFailed(("FIX_HC_64BIT_CS not implemented for this host\n"));
    688 # endif
     688#  endif
    689689                break;
    690690            }
     
    699699                break;
    700700            }
    701 #endif
     701# endif
    702702            /*
    703703             * 64-bit HC pointer fixup to (HC) target within the code (32-bit offset).
     
    712712            }
    713713
    714 #ifdef RT_ARCH_X86
     714# ifdef RT_ARCH_X86
    715715            case FIX_GC_64_BIT_CPUM_OFF:
    716716            {
     
    720720                break;
    721721            }
    722 #endif
     722# endif
    723723
    724724            /*
     
    761761            }
    762762
    763 #ifdef VBOX_WITH_NMI
     763# ifdef VBOX_WITH_NMI
    764764            /*
    765765             * 32-bit address to the APIC base.
     
    770770                break;
    771771            }
    772 #endif
     772# endif
    773773
    774774            default:
     
    778778    }
    779779
    780 #ifdef LOG_ENABLED
     780# ifdef LOG_ENABLED
    781781    /*
    782782     * If Log2 is enabled disassemble the switcher code.
     
    913913        }
    914914    }
    915 #endif
     915# endif
    916916}
    917917
     
    927927    if (HMIsRawModeCtxNeeded(pVM))
    928928        return SELMGetHyperGDT(pVM);
    929 #if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     929# if HC_ARCH_BITS != 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    930930    AssertFailed(); /* This path is only applicable to some 32-bit hosts. */
    931 #endif
     931# endif
    932932    return NIL_RTRCPTR;
    933933}
     
    10621062}
    10631063
    1064 #endif /* VBOX_WITH_RAW_MODE */
     1064#endif /* #defined(VBOX_WITH_RAW_MODE) || (HC_ARCH_BITS != 64 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)) */
    10651065
    10661066
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r45745 r45786  
    11; $Id$
    22;; @file
    3 ; VMM - World Switchers, 32Bit to AMD64 intermediate context.
    4 ;
    5 ; This is used for running 64-bit guest on 32-bit hosts, not normal raw-mode.
    6 ;
    7 
    8 ;
    9 ; Copyright (C) 2006-2012 Oracle Corporation
     3; VMM - World Switchers, 32-bit to AMD64 intermediate context.
     4;
     5; This is used for running 64-bit guest on 32-bit hosts, not
     6; normal raw-mode.  All the code involved is contained in this
     7; file.
     8;
     9
     10;
     11; Copyright (C) 2006-2013 Oracle Corporation
    1012;
    1113; This file is part of VirtualBox Open Source Edition (OSE), as
     
    1820;
    1921
    20 ;%define DEBUG_STUFF 1
    21 ;%define STRICT_IF 1
    2222
    2323;*******************************************************************************
    2424;*  Defined Constants And Macros                                               *
    2525;*******************************************************************************
     26;; @note These values are from the HM64ON32OP enum in hm.h.
     27%define HM64ON32OP_VMXRCStartVM64       1
     28%define HM64ON32OP_SVMRCVMRun64         2
     29%define HM64ON32OP_HMRCSaveGuestFPU64   3
     30%define HM64ON32OP_HMRCSaveGuestDebug64 4
     31%define HM64ON32OP_HMRCTestSwitcher64   5
     32
     33;; Stubs for making OS/2 compile (though, not work).
     34%ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely.
     35 %macro vmwrite 2,
     36    int3
     37 %endmacro
     38 %define vmlaunch int3
     39 %define vmresume int3
     40 %define vmsave int3
     41 %define vmload int3
     42 %define vmrun int3
     43 %define clgi int3
     44 %define stgi int3
     45 %macro invlpga 2,
     46    int3
     47 %endmacro
     48%endif
     49
     50;; Debug options
     51;%define DEBUG_STUFF 1
     52;%define STRICT_IF 1
    2653
    2754
     
    3057;*******************************************************************************
    3158%include "VBox/asmdefs.mac"
     59%include "iprt/x86.mac"
     60%include "VBox/err.mac"
    3261%include "VBox/apic.mac"
    33 %include "iprt/x86.mac"
     62
    3463%include "VBox/vmm/cpum.mac"
    3564%include "VBox/vmm/stam.mac"
    3665%include "VBox/vmm/vm.mac"
     66%include "VBox/vmm/hm_vmx.mac"
    3767%include "CPUMInternal.mac"
     68%include "HMInternal.mac"
    3869%include "VMMSwitcher.mac"
    3970
     
    175206    mov     [edx + CPUMCPU.Host.ss], ss
    176207    ; special registers.
     208    DEBUG32_S_CHAR('s')
     209    DEBUG32_S_CHAR(';')
    177210    sldt    [edx + CPUMCPU.Host.ldtr]
    178211    sidt    [edx + CPUMCPU.Host.idtr]
     
    185218
    186219%ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
     220    DEBUG32_S_CHAR('f')
     221    DEBUG32_S_CHAR(';')
    187222    CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp
    188223    mov     ebx, [edx + CPUM.pvApicBase]
     
    246281    mov     eax, cr4
    247282    mov     [edx + CPUMCPU.Host.cr4], eax
     283    DEBUG32_S_CHAR('c')
     284    DEBUG32_S_CHAR(';')
    248285
    249286    ; save the host EFER msr
     
    254291    mov     [ebx + CPUMCPU.Host.efer + 4], edx
    255292    mov     edx, ebx
     293    DEBUG32_S_CHAR('e')
     294    DEBUG32_S_CHAR(';')
    256295
    257296%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    262301    lgdt    [edx + CPUMCPU.Hyper.gdtr]
    263302
     303    DEBUG32_S_CHAR('g')
     304    DEBUG32_S_CHAR('!')
    264305%ifdef VBOX_WITH_CRASHDUMP_MAGIC
    265306    mov dword [edx + CPUMCPU.Guest.dr + 4*8], 4
     
    272313    mov     eax, 0ffffffffh
    273314    mov     cr3, eax
    274     DEBUG_CHAR('?')
     315    DEBUG32_CHAR('?')
    275316
    276317    ;;
     
    284325ALIGNCODE(16)
    285326GLOBALNAME IDEnterTarget
    286     DEBUG_CHAR('2')
     327    DEBUG32_CHAR('1')
    287328
    288329    ; 1. Disable paging.
     
    290331    and     ebx, ~X86_CR0_PG
    291332    mov     cr0, ebx
    292     DEBUG_CHAR('2')
     333    DEBUG32_CHAR('2')
    293334
    294335%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    306347    mov     ecx, 0ffffffffh
    307348    mov     cr3, ecx
    308     DEBUG_CHAR('3')
     349    DEBUG32_CHAR('3')
    309350
    310351%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    322363    wrmsr
    323364    mov     edx, esi
    324     DEBUG_CHAR('4')
     365    DEBUG32_CHAR('4')
    325366
    326367%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     
    334375    and     ebx, ~X86_CR0_WRITE_PROTECT
    335376    mov     cr0, ebx
    336     DEBUG_CHAR('5')
     377    DEBUG32_CHAR('5')
    337378
    338379    ; Jump from compatibility mode to 64-bit mode.
     
    345386ALIGNCODE(16)
    346387NAME(IDEnter64Mode):
    347     DEBUG_CHAR('6')
     388    DEBUG64_CHAR('6')
    348389    jmp     [NAME(pICEnterTarget) wrt rip]
    349390
     
    386427
    387428    ; Setup stack.
    388     DEBUG_CHAR('7')
     429    DEBUG64_CHAR('7')
    389430    mov     rsp, 0
    390431    mov     eax, [rdx + CPUMCPU.Hyper.ss.Sel]
     
    399440    ; load the hypervisor function address
    400441    mov     r9, [rdx + CPUMCPU.Hyper.eip]
     442    DEBUG64_S_CHAR('8')
    401443
    402444    ; Check if we need to restore the guest FPU state
     
    447489
    448490    ; parameter for all helper functions (pCtx)
     491    DEBUG64_CHAR('9')
    449492    lea     rsi, [rdx + CPUMCPU.Guest.fpu]
    450     call    r9
     493    lea     rax, [gth_return wrt rip]
     494    push    rax                         ; return address
     495
     496    cmp     r9d, HM64ON32OP_VMXRCStartVM64
     497    jz      NAME(VMXRCStartVM64)
     498    cmp     r9d, HM64ON32OP_SVMRCVMRun64
     499    jz      NAME(SVMRCVMRun64)
     500    cmp     r9d, HM64ON32OP_HMRCSaveGuestFPU64
     501    jz      NAME(HMRCSaveGuestFPU64)
     502    cmp     r9d, HM64ON32OP_HMRCSaveGuestDebug64
     503    jz      NAME(HMRCSaveGuestDebug64)
     504    cmp     r9d, HM64ON32OP_HMRCTestSwitcher64
     505    jz      NAME(HMRCTestSwitcher64)
     506    mov     eax, VERR_HM_INVALID_HM64ON32OP
     507gth_return:
     508    DEBUG64_CHAR('r')
    451509
    452510    ; Load CPUM pointer into rdx
     
    465523
    466524ENDPROC vmmR0ToRawModeAsm
     525
     526
     527
     528
     529;
     530;
     531; HM code (used to be HMRCA.asm at one point).
     532; HM code (used to be HMRCA.asm at one point).
     533; HM code (used to be HMRCA.asm at one point).
     534;
     535;
     536
     537
     538
     539; Load the corresponding guest MSR (trashes rdx & rcx)
     540%macro LOADGUESTMSR 2
     541    mov     rcx, %1
     542    mov     edx, dword [rsi + %2 + 4]
     543    mov     eax, dword [rsi + %2]
     544    wrmsr
     545%endmacro
     546
     547; Save a guest MSR (trashes rdx & rcx)
     548; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
     549%macro SAVEGUESTMSR 2
     550    mov     rcx, %1
     551    rdmsr
     552    mov     dword [rsi + %2], eax
     553    mov     dword [rsi + %2 + 4], edx
     554%endmacro
     555
     556;; @def MYPUSHSEGS
     557; Macro saving all segment registers on the stack.
     558; @param 1  full width register name
     559%macro MYPUSHSEGS 1
     560    mov     %1, es
     561    push    %1
     562    mov     %1, ds
     563    push    %1
     564%endmacro
     565
     566;; @def MYPOPSEGS
     567; Macro restoring all segment registers on the stack
     568; @param 1  full width register name
     569%macro MYPOPSEGS 1
     570    pop     %1
     571    mov     ds, %1
     572    pop     %1
     573    mov     es, %1
     574%endmacro
     575
     576
     577;/**
     578; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
     579; *
     580; * @returns VBox status code
     581; * @param   HCPhysCpuPage  VMXON physical address  [rsp+8]
     582; * @param   HCPhysVmcs     VMCS physical address   [rsp+16]
     583; * @param   pCache         VMCS cache              [rsp+24]
     584; * @param   pCtx           Guest context (rsi)
     585; */
     586BEGINPROC VMXRCStartVM64
     587    push    rbp
     588    mov     rbp, rsp
     589
     590    ; Make sure VT-x instructions are allowed
     591    mov     rax, cr4
     592    or      rax, X86_CR4_VMXE
     593    mov     cr4, rax
     594
     595    ;/* Enter VMX Root Mode */
     596    vmxon   [rbp + 8 + 8]
     597    jnc     .vmxon_success
     598    mov     rax, VERR_VMX_INVALID_VMXON_PTR
     599    jmp     .vmstart64_vmxon_failed
     600
     601.vmxon_success:
     602    jnz     .vmxon_success2
     603    mov     rax, VERR_VMX_VMXON_FAILED
     604    jmp     .vmstart64_vmxon_failed
     605
     606.vmxon_success2:
     607    ; Activate the VMCS pointer
     608    vmptrld [rbp + 16 + 8]
     609    jnc     .vmptrld_success
     610    mov     rax, VERR_VMX_INVALID_VMCS_PTR
     611    jmp     .vmstart64_vmxoff_end
     612
     613.vmptrld_success:
     614    jnz     .vmptrld_success2
     615    mov     rax, VERR_VMX_VMPTRLD_FAILED
     616    jmp     .vmstart64_vmxoff_end
     617
     618.vmptrld_success2:
     619
     620    ; Save the VMCS pointer on the stack
     621    push    qword [rbp + 16 + 8];
     622
     623    ;/* Save segment registers */
     624    MYPUSHSEGS rax
     625
     626%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     627    ; Flush the VMCS write cache first (before any other vmreads/vmwrites!)
     628    mov     rbx, [rbp + 24 + 8]                             ; pCache
     629
     630 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     631    mov     qword [rbx + VMCSCACHE.uPos], 2
     632 %endif
     633
     634 %ifdef DEBUG
     635    mov     rax, [rbp + 8 + 8]                              ; HCPhysCpuPage
     636    mov     [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax
     637    mov     rax, [rbp + 16 + 8]                             ; HCPhysVmcs
     638    mov     [rbx + VMCSCACHE.TestIn.HCPhysVmcs], rax
     639    mov     [rbx + VMCSCACHE.TestIn.pCache], rbx
     640    mov     [rbx + VMCSCACHE.TestIn.pCtx], rsi
     641 %endif
     642
     643    mov     ecx, [rbx + VMCSCACHE.Write.cValidEntries]
     644    cmp     ecx, 0
     645    je      .no_cached_writes
     646    mov     rdx, rcx
     647    mov     rcx, 0
     648    jmp     .cached_write
     649
     650ALIGN(16)
     651.cached_write:
     652    mov     eax, [rbx + VMCSCACHE.Write.aField + rcx*4]
     653    vmwrite rax, qword [rbx + VMCSCACHE.Write.aFieldVal + rcx*8]
     654    inc     rcx
     655    cmp     rcx, rdx
     656    jl     .cached_write
     657
     658    mov     dword [rbx + VMCSCACHE.Write.cValidEntries], 0
     659.no_cached_writes:
     660
     661 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     662    mov     qword [rbx + VMCSCACHE.uPos], 3
     663 %endif
     664    ; Save the pCache pointer
     665    push    rbx
     666%endif
     667
     668    ; Save the host state that's relevant in the temporary 64 bits mode
     669    mov     rdx, cr0
     670    mov     eax, VMX_VMCS_HOST_CR0
     671    vmwrite rax, rdx
     672
     673    mov     rdx, cr3
     674    mov     eax, VMX_VMCS_HOST_CR3
     675    vmwrite rax, rdx
     676
     677    mov     rdx, cr4
     678    mov     eax, VMX_VMCS_HOST_CR4
     679    vmwrite rax, rdx
     680
     681    mov     rdx, cs
     682    mov     eax, VMX_VMCS_HOST_FIELD_CS
     683    vmwrite rax, rdx
     684
     685    mov     rdx, ss
     686    mov     eax, VMX_VMCS_HOST_FIELD_SS
     687    vmwrite rax, rdx
     688
     689    sub     rsp, 8*2
     690    sgdt    [rsp]
     691    mov     eax, VMX_VMCS_HOST_GDTR_BASE
     692    vmwrite rax, [rsp+2]
     693    add     rsp, 8*2
     694
     695%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     696    mov     qword [rbx + VMCSCACHE.uPos], 4
     697%endif
     698
     699    ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
     700
     701    ;/* First we have to save some final CPU context registers. */
     702    lea     rdx, [.vmlaunch64_done wrt rip]
     703    mov     rax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
     704    vmwrite rax, rdx
     705    ;/* Note: assumes success... */
     706
     707    ;/* Manual save and restore:
     708    ; * - General purpose registers except RIP, RSP
     709    ; *
     710    ; * Trashed:
     711    ; * - CR2 (we don't care)
     712    ; * - LDTR (reset to 0)
     713    ; * - DRx (presumably not changed at all)
     714    ; * - DR7 (reset to 0x400)
     715    ; * - EFLAGS (reset to RT_BIT(1); not relevant)
     716    ; *
     717    ; */
     718
     719%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     720    ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
     721    LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     722    LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
     723    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     724%endif
     725    ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
     726    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     727
     728%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     729    mov     qword [rbx + VMCSCACHE.uPos], 5
     730%endif
     731
     732    ; Save the pCtx pointer
     733    push    rsi
     734
     735    ; Restore CR2
     736    mov     rbx, qword [rsi + CPUMCTX.cr2]
     737    mov     rdx, cr2
     738    cmp     rdx, rbx
     739    je      .skipcr2write64
     740    mov     cr2, rbx
     741
     742.skipcr2write64:
     743    mov     eax, VMX_VMCS_HOST_RSP
     744    vmwrite rax, rsp
     745    ;/* Note: assumes success... */
     746    ;/* Don't mess with ESP anymore!! */
     747
     748    ;/* Restore Guest's general purpose registers. */
     749    mov     rax, qword [rsi + CPUMCTX.eax]
     750    mov     rbx, qword [rsi + CPUMCTX.ebx]
     751    mov     rcx, qword [rsi + CPUMCTX.ecx]
     752    mov     rdx, qword [rsi + CPUMCTX.edx]
     753    mov     rbp, qword [rsi + CPUMCTX.ebp]
     754    mov     r8,  qword [rsi + CPUMCTX.r8]
     755    mov     r9,  qword [rsi + CPUMCTX.r9]
     756    mov     r10, qword [rsi + CPUMCTX.r10]
     757    mov     r11, qword [rsi + CPUMCTX.r11]
     758    mov     r12, qword [rsi + CPUMCTX.r12]
     759    mov     r13, qword [rsi + CPUMCTX.r13]
     760    mov     r14, qword [rsi + CPUMCTX.r14]
     761    mov     r15, qword [rsi + CPUMCTX.r15]
     762
     763    ;/* Restore rdi & rsi. */
     764    mov     rdi, qword [rsi + CPUMCTX.edi]
     765    mov     rsi, qword [rsi + CPUMCTX.esi]
     766
     767    vmlaunch
     768    jmp     .vmlaunch64_done;      ;/* here if vmlaunch detected a failure. */
     769
     770ALIGNCODE(16)
     771.vmlaunch64_done:
     772    jc      near .vmstart64_invalid_vmxon_ptr
     773    jz      near .vmstart64_start_failed
     774
     775    push    rdi
     776    mov     rdi, [rsp + 8]         ; pCtx
     777
     778    mov     qword [rdi + CPUMCTX.eax], rax
     779    mov     qword [rdi + CPUMCTX.ebx], rbx
     780    mov     qword [rdi + CPUMCTX.ecx], rcx
     781    mov     qword [rdi + CPUMCTX.edx], rdx
     782    mov     qword [rdi + CPUMCTX.esi], rsi
     783    mov     qword [rdi + CPUMCTX.ebp], rbp
     784    mov     qword [rdi + CPUMCTX.r8],  r8
     785    mov     qword [rdi + CPUMCTX.r9],  r9
     786    mov     qword [rdi + CPUMCTX.r10], r10
     787    mov     qword [rdi + CPUMCTX.r11], r11
     788    mov     qword [rdi + CPUMCTX.r12], r12
     789    mov     qword [rdi + CPUMCTX.r13], r13
     790    mov     qword [rdi + CPUMCTX.r14], r14
     791    mov     qword [rdi + CPUMCTX.r15], r15
     792%ifndef VBOX_WITH_OLD_VTX_CODE
     793    mov     rax, cr2
     794    mov     qword [rdi + CPUMCTX.cr2], rax
     795%endif
     796
     797    pop     rax                                 ; the guest edi we pushed above
     798    mov     qword [rdi + CPUMCTX.edi], rax
     799
     800    pop     rsi         ; pCtx (needed in rsi by the macros below)
     801
     802%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     803    SAVEGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     804    SAVEGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
     805    SAVEGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     806%endif
     807    ; Kernel GS Base is special, we need to manually load/store it, see @bugref{6208}.
     808    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     809
     810%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     811    pop     rdi         ; saved pCache
     812
     813 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     814    mov     dword [rdi + VMCSCACHE.uPos], 7
     815 %endif
     816 %ifdef DEBUG
     817    mov     [rdi + VMCSCACHE.TestOut.pCache], rdi
     818    mov     [rdi + VMCSCACHE.TestOut.pCtx], rsi
     819    mov     rax, cr8
     820    mov     [rdi + VMCSCACHE.TestOut.cr8], rax
     821 %endif
     822
     823    mov     ecx, [rdi + VMCSCACHE.Read.cValidEntries]
     824    cmp     ecx, 0  ; can't happen
     825    je      .no_cached_reads
     826    jmp     .cached_read
     827
     828ALIGN(16)
     829.cached_read:
     830    dec     rcx
     831    mov     eax, [rdi + VMCSCACHE.Read.aField + rcx*4]
     832    vmread  qword [rdi + VMCSCACHE.Read.aFieldVal + rcx*8], rax
     833    cmp     rcx, 0
     834    jnz     .cached_read
     835.no_cached_reads:
     836
     837 %ifdef VBOX_WITH_OLD_VTX_CODE
     838    ; Save CR2 for EPT
     839    mov     rax, cr2
     840    mov     [rdi + VMCSCACHE.cr2], rax
     841 %endif
     842 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     843    mov     dword [rdi + VMCSCACHE.uPos], 8
     844 %endif
     845%endif
     846
     847    ; Restore segment registers
     848    MYPOPSEGS rax
     849
     850    mov     eax, VINF_SUCCESS
     851
     852%ifdef VBOX_WITH_CRASHDUMP_MAGIC
     853    mov     dword [rdi + VMCSCACHE.uPos], 9
     854%endif
     855.vmstart64_end:
     856
     857%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     858 %ifdef DEBUG
     859    mov     rdx, [rsp]                             ; HCPhysVmcs
     860    mov     [rdi + VMCSCACHE.TestOut.HCPhysVmcs], rdx
     861 %endif
     862%endif
     863
     864    ; Write back the data and disable the VMCS
     865    vmclear qword [rsp]  ;Pushed pVMCS
     866    add     rsp, 8
     867
     868.vmstart64_vmxoff_end:
     869    ; Disable VMX root mode
     870    vmxoff
     871.vmstart64_vmxon_failed:
     872%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     873 %ifdef DEBUG
     874    cmp     eax, VINF_SUCCESS
     875    jne     .skip_flags_save
     876
     877    pushf
     878    pop     rdx
     879    mov     [rdi + VMCSCACHE.TestOut.eflags], rdx
     880  %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     881    mov     dword [rdi + VMCSCACHE.uPos], 12
     882  %endif
     883.skip_flags_save:
     884 %endif
     885%endif
     886    pop     rbp
     887    ret
     888
     889
     890.vmstart64_invalid_vmxon_ptr:
     891    pop     rsi         ; pCtx (needed in rsi by the macros below)
     892
     893%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     894    pop     rdi         ; pCache
     895 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     896    mov     dword [rdi + VMCSCACHE.uPos], 10
     897 %endif
     898
     899 %ifdef DEBUG
     900    mov     [rdi + VMCSCACHE.TestOut.pCache], rdi
     901    mov     [rdi + VMCSCACHE.TestOut.pCtx], rsi
     902 %endif
     903%endif
     904
     905    ; Restore segment registers
     906    MYPOPSEGS rax
     907
     908    ; Restore all general purpose host registers.
     909    mov     eax, VERR_VMX_INVALID_VMXON_PTR
     910    jmp     .vmstart64_end
     911
     912.vmstart64_start_failed:
     913    pop     rsi         ; pCtx (needed in rsi by the macros below)
     914
     915%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     916    pop     rdi         ; pCache
     917
     918 %ifdef DEBUG
     919    mov     [rdi + VMCSCACHE.TestOut.pCache], rdi
     920    mov     [rdi + VMCSCACHE.TestOut.pCtx], rsi
     921 %endif
     922 %ifdef VBOX_WITH_CRASHDUMP_MAGIC
     923    mov     dword [rdi + VMCSCACHE.uPos], 11
     924 %endif
     925%endif
     926
     927    ; Restore segment registers
     928    MYPOPSEGS rax
     929
     930    ; Restore all general purpose host registers.
     931    mov     eax, VERR_VMX_UNABLE_TO_START_VM
     932    jmp     .vmstart64_end
     933ENDPROC VMXRCStartVM64
     934
     935
     936;/**
     937; * Prepares for and executes VMRUN (64 bits guests)
     938; *
     939; * @returns VBox status code
     940; * @param   HCPhysVMCB     Physical address of host VMCB       (rsp+8)
     941; * @param   HCPhysVMCB     Physical address of guest VMCB      (rsp+16)
     942; * @param   pCtx           Guest context                       (rsi)
     943; */
     944BEGINPROC SVMRCVMRun64
     945    push    rbp
     946    mov     rbp, rsp
     947    pushf
     948
     949    ;/* Manual save and restore:
     950    ; * - General purpose registers except RIP, RSP, RAX
     951    ; *
     952    ; * Trashed:
     953    ; * - CR2 (we don't care)
     954    ; * - LDTR (reset to 0)
     955    ; * - DRx (presumably not changed at all)
     956    ; * - DR7 (reset to 0x400)
     957    ; */
     958
     959    ;/* Save the Guest CPU context pointer. */
     960    push    rsi                     ; push for saving the state at the end
     961
     962    ; save host fs, gs, sysenter msr etc
     963    mov     rax, [rbp + 8 + 8]              ; pVMCBHostPhys (64 bits physical address)
     964    push    rax                             ; save for the vmload after vmrun
     965    vmsave
     966
     967    ; setup eax for VMLOAD
     968    mov     rax, [rbp + 8 + 8 + RTHCPHYS_CB]   ; pVMCBPhys (64 bits physical address)
     969
     970    ;/* Restore Guest's general purpose registers. */
     971    ;/* RAX is loaded from the VMCB by VMRUN */
     972    mov     rbx, qword [rsi + CPUMCTX.ebx]
     973    mov     rcx, qword [rsi + CPUMCTX.ecx]
     974    mov     rdx, qword [rsi + CPUMCTX.edx]
     975    mov     rdi, qword [rsi + CPUMCTX.edi]
     976    mov     rbp, qword [rsi + CPUMCTX.ebp]
     977    mov     r8,  qword [rsi + CPUMCTX.r8]
     978    mov     r9,  qword [rsi + CPUMCTX.r9]
     979    mov     r10, qword [rsi + CPUMCTX.r10]
     980    mov     r11, qword [rsi + CPUMCTX.r11]
     981    mov     r12, qword [rsi + CPUMCTX.r12]
     982    mov     r13, qword [rsi + CPUMCTX.r13]
     983    mov     r14, qword [rsi + CPUMCTX.r14]
     984    mov     r15, qword [rsi + CPUMCTX.r15]
     985    mov     rsi, qword [rsi + CPUMCTX.esi]
     986
     987    ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
     988    clgi
     989    sti
     990
     991    ; load guest fs, gs, sysenter msr etc
     992    vmload
     993    ; run the VM
     994    vmrun
     995
     996    ;/* RAX is in the VMCB already; we can use it here. */
     997
     998    ; save guest fs, gs, sysenter msr etc
     999    vmsave
     1000
     1001    ; load host fs, gs, sysenter msr etc
     1002    pop     rax                     ; pushed above
     1003    vmload
     1004
     1005    ; Set the global interrupt flag again, but execute cli to make sure IF=0.
     1006    cli
     1007    stgi
     1008
     1009    pop     rax                     ; pCtx
     1010
     1011    mov     qword [rax + CPUMCTX.ebx], rbx
     1012    mov     qword [rax + CPUMCTX.ecx], rcx
     1013    mov     qword [rax + CPUMCTX.edx], rdx
     1014    mov     qword [rax + CPUMCTX.esi], rsi
     1015    mov     qword [rax + CPUMCTX.edi], rdi
     1016    mov     qword [rax + CPUMCTX.ebp], rbp
     1017    mov     qword [rax + CPUMCTX.r8],  r8
     1018    mov     qword [rax + CPUMCTX.r9],  r9
     1019    mov     qword [rax + CPUMCTX.r10], r10
     1020    mov     qword [rax + CPUMCTX.r11], r11
     1021    mov     qword [rax + CPUMCTX.r12], r12
     1022    mov     qword [rax + CPUMCTX.r13], r13
     1023    mov     qword [rax + CPUMCTX.r14], r14
     1024    mov     qword [rax + CPUMCTX.r15], r15
     1025
     1026    mov     eax, VINF_SUCCESS
     1027
     1028    popf
     1029    pop     rbp
     1030    ret
     1031ENDPROC SVMRCVMRun64
     1032
     1033;/**
     1034; * Saves the guest FPU context
     1035; *
     1036; * @returns VBox status code
     1037; * @param   pCtx       Guest context [rsi]
     1038; */
     1039BEGINPROC HMRCSaveGuestFPU64
     1040    mov     rax, cr0
     1041    mov     rcx, rax                    ; save old CR0
     1042    and     rax, ~(X86_CR0_TS | X86_CR0_EM)
     1043    mov     cr0, rax
     1044
     1045    fxsave  [rsi + CPUMCTX.fpu]
     1046
     1047    mov     cr0, rcx                    ; and restore old CR0 again
     1048
     1049    mov     eax, VINF_SUCCESS
     1050    ret
     1051ENDPROC HMRCSaveGuestFPU64
     1052
     1053;/**
     1054; * Saves the guest debug context (DR0-3, DR6)
     1055; *
     1056; * @returns VBox status code
     1057; * @param   pCtx       Guest context [rsi]
     1058; */
     1059BEGINPROC HMRCSaveGuestDebug64
     1060    mov rax, dr0
     1061    mov qword [rsi + CPUMCTX.dr + 0*8], rax
     1062    mov rax, dr1
     1063    mov qword [rsi + CPUMCTX.dr + 1*8], rax
     1064    mov rax, dr2
     1065    mov qword [rsi + CPUMCTX.dr + 2*8], rax
     1066    mov rax, dr3
     1067    mov qword [rsi + CPUMCTX.dr + 3*8], rax
     1068    mov rax, dr6
     1069    mov qword [rsi + CPUMCTX.dr + 6*8], rax
     1070    mov eax, VINF_SUCCESS
     1071    ret
     1072ENDPROC HMRCSaveGuestDebug64
     1073
     1074;/**
     1075; * Dummy callback handler
     1076; *
     1077; * @returns VBox status code
     1078; * @param   param1     Parameter 1 [rsp+8]
     1079; * @param   param2     Parameter 2 [rsp+12]
     1080; * @param   param3     Parameter 3 [rsp+16]
     1081; * @param   param4     Parameter 4 [rsp+20]
     1082; * @param   param5     Parameter 5 [rsp+24]
     1083; * @param   pCtx       Guest context [rsi]
     1084; */
     1085BEGINPROC HMRCTestSwitcher64
     1086    mov eax, [rsp+8]
     1087    ret
     1088ENDPROC HMRCTestSwitcher64
     1089
     1090
     1091
     1092
     1093;
     1094;
     1095; Back to switcher code.
     1096; Back to switcher code.
     1097; Back to switcher code.
     1098;
     1099;
     1100
    4671101
    4681102
     
    4971131    push    rsi
    4981132    COM_NEWLINE
    499     DEBUG_CHAR('b')
    500     DEBUG_CHAR('a')
    501     DEBUG_CHAR('c')
    502     DEBUG_CHAR('k')
    503     DEBUG_CHAR('!')
     1133    COM_CHAR 'b'
     1134    COM_CHAR 'a'
     1135    COM_CHAR 'c'
     1136    COM_CHAR 'k'
     1137    COM_CHAR '!'
    5041138    COM_NEWLINE
    5051139    pop     rsi
     
    5401174GLOBALNAME IDExitTarget
    5411175BITS 32
    542     DEBUG_CHAR('1')
     1176    DEBUG32_CHAR('1')
    5431177
    5441178    ; 1. Deactivate long mode by turning off paging.
     
    5461180    and     ebx, ~X86_CR0_PG
    5471181    mov     cr0, ebx
    548     DEBUG_CHAR('2')
     1182    DEBUG32_CHAR('2')
    5491183
    5501184    ; 2. Load intermediate page table.
     
    5521186    mov     edx, 0ffffffffh
    5531187    mov     cr3, edx
    554     DEBUG_CHAR('3')
     1188    DEBUG32_CHAR('3')
    5551189
    5561190    ; 3. Disable long mode.
    5571191    mov     ecx, MSR_K6_EFER
    5581192    rdmsr
    559     DEBUG_CHAR('5')
     1193    DEBUG32_CHAR('5')
    5601194    and     eax, ~(MSR_K6_EFER_LME)
    5611195    wrmsr
    562     DEBUG_CHAR('6')
     1196    DEBUG32_CHAR('6')
    5631197
    5641198%ifndef NEED_PAE_ON_HOST
     
    5671201    and     eax, ~X86_CR4_PAE
    5681202    mov     cr4, eax
    569     DEBUG_CHAR('7')
     1203    DEBUG32_CHAR('7')
    5701204%endif
    5711205
     
    5751209    jmp short just_a_jump
    5761210just_a_jump:
    577     DEBUG_CHAR('8')
     1211    DEBUG32_CHAR('8')
    5781212
    5791213    ;;
     
    5901224ALIGNCODE(16)
    5911225GLOBALNAME ICExitTarget
    592     DEBUG_CHAR('8')
     1226    DEBUG32_CHAR('8')
    5931227
    5941228    ; load the hypervisor data selector into ds & es
     
    6191253    ; activate host gdt and idt
    6201254    lgdt    [edx + CPUMCPU.Host.gdtr]
    621     DEBUG_CHAR('0')
     1255    DEBUG32_CHAR('0')
    6221256    lidt    [edx + CPUMCPU.Host.idtr]
    623     DEBUG_CHAR('1')
     1257    DEBUG32_CHAR('1')
    6241258
    6251259    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     
    6321266
    6331267    ; activate ldt
    634     DEBUG_CHAR('2')
     1268    DEBUG32_CHAR('2')
    6351269    lldt    [edx + CPUMCPU.Host.ldtr]
    6361270
  • trunk/src/VBox/VMM/include/HMInternal.h

    r45781 r45786  
    316316    /** 32 to 64 bits switcher entrypoint. */
    317317    R0PTRTYPE(PFNHMSWITCHERHC)  pfnHost32ToGuest64R0;
    318 
    319     /* AMD-V 64 bits vmrun handler */
    320     RTRCPTR                     pfnSVMGCVMRun64;
    321 
    322     /* VT-x 64 bits vmlaunch handler */
    323     RTRCPTR                     pfnVMXGCStartVM64;
    324 
    325     /* RC handler to setup the 64 bits FPU state. */
    326     RTRCPTR                     pfnSaveGuestFPU64;
    327 
    328     /* RC handler to setup the 64 bits debug state. */
    329     RTRCPTR                     pfnSaveGuestDebug64;
    330 
    331     /* Test handler */
    332     RTRCPTR                     pfnTest64;
    333 
    334     RTRCPTR                     uAlignment[2];
    335 /*#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    336     uint32_t                    u32Alignment[1]; */
     318    RTR0PTR                     uPadding2;
    337319#endif
    338320
  • trunk/src/VBox/VMM/include/VMMSwitcher.mac

    r45701 r45786  
    128128
    129129%ifdef DEBUG_STUFF
    130     %define DEBUG_CHAR(ch) COM_CHAR ch
    131     %define DEBUG_S_CHAR(ch) COM_S_CHAR ch
     130    %define DEBUG_CHAR(ch)      COM_CHAR ch
     131    %define DEBUG32_CHAR(ch)    COM_CHAR ch
     132    %define DEBUG64_CHAR(ch)    COM_CHAR ch
     133    %define DEBUG_S_CHAR(ch)    COM_S_CHAR ch
     134    %define DEBUG32_S_CHAR(ch)  COM32_S_CHAR ch
     135    %define DEBUG64_S_CHAR(ch)  COM64_S_CHAR ch
    132136%else
    133137    %define DEBUG_CHAR(ch)
     138    %define DEBUG32_CHAR(ch)
     139    %define DEBUG64_CHAR(ch)
    134140    %define DEBUG_S_CHAR(ch)
     141    %define DEBUG32_S_CHAR(ch)
     142    %define DEBUG64_S_CHAR(ch)
    135143%endif
    136144
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette