VirtualBox

Changeset 46441 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 7, 2013 1:38:58 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46420 r46441  
    2929*   Defined Constants And Macros                                               *
    3030*******************************************************************************/
    31 
    32 /**
    33  * MSR-bitmap read permissions.
     31/** @name Segment attribute conversion between CPU and AMD-V VMCB format.
     32 *
     33 * The CPU format of the segment attribute is described in X86DESCATTRBITS
     34 * which is 16-bits (i.e. includes 4 bits of the segment limit).
     35 *
     36 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly
     37 * only the attribute bits and nothing else). Upper 4-bits are unused.
     38 *
     39 * @{ */
     40#define HMSVM_CPU_2_VMCB_SEG_ATTR(a)       (a & 0xff) | ((a & 0xf000) >> 4)
     41#define HMSVM_VMCB_2_CPU_SEG_ATTR(a)       (a & 0xff) | ((a & 0x0f00) << 4)
     42/** @} */
     43
     44/** @name Macros for loading, storing segment registers to/from the VMCB.
     45 *  @{ */
     46#define HMSVM_LOAD_SEG_REG(REG, reg) \
     47    do \
     48    { \
     49        Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \
     50        Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \
     51        pVmcb->guest.REG.u16Sel     = pCtx->reg.Sel; \
     52        pVmcb->guest.REG.u32Limit   = pCtx->reg.u32Limit; \
     53        pVmcb->guest.REG.u64Base    = pCtx->reg.u64Base; \
     54        pVmcb->guest.REG.u16Attr    = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \
     55    } while (0)
     56
     57#define HMSVM_SAVE_SEG_REG(REG, reg) \
     58    do \
     59    { \
     60        pCtx->reg.Sel       = pVmcb->guest.REG.u16Sel; \
     61        pCtx->reg.ValidSel  = pVmcb->guest.REG.u16Sel; \
     62        pCtx->reg.fFlags    = CPUMSELREG_FLAGS_VALID; \
     63        pCtx->reg.u32Limit  = pVmcb->guest.REG.u32Limit; \
     64        pCtx->reg.u64Base   = pVmcb->guest.REG.u64Base; \
     65        pCtx->reg.Attr.u    = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \
     66    } while (0)
     67/** @}  */
     68
     69/**
     70 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR).
    3471 */
    3572typedef enum SVMMSREXITREAD
     
    4279
    4380/**
    44  * MSR-bitmap write permissions.
     81 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR).
    4582 */
    4683typedef enum SVMMSREXITWRITE
     
    5693*   Internal Functions                                                         *
    5794*******************************************************************************/
    58 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
     95static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);
    5996
    6097
     
    203240    {
    204241        PVMCPU pVCpu = &pVM->aCpus[i];
     242        AssertPtr(pVCpu);
    205243
    206244        if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ)
    207245        {
    208246            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false);
    209             pVCpu->hm.s.svm.pvVmcbHost      = 0;
    210             pVCpu->hm.s.svm.HCPhysVmcbHost  = 0;
    211             pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;
     247            pVCpu->hm.s.svm.pvVmcbHost       = 0;
     248            pVCpu->hm.s.svm.HCPhysVmcbHost   = 0;
     249            pVCpu->hm.s.svm.hMemObjVmcbHost  = NIL_RTR0MEMOBJ;
    212250        }
    213251
     
    215253        {
    216254            RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false);
    217             pVCpu->hm.s.svm.pvVmcb      = 0;
    218             pVCpu->hm.s.svm.HCPhysVmcb  = 0;
    219             pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;
     255            pVCpu->hm.s.svm.pvVmcb           = 0;
     256            pVCpu->hm.s.svm.HCPhysVmcb       = 0;
     257            pVCpu->hm.s.svm.hMemObjVmcb      = NIL_RTR0MEMOBJ;
    220258        }
    221259
     
    241279    int rc = VERR_INTERNAL_ERROR_5;
    242280
    243     /* Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch. */
     281    /*
     282     * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch.
     283     */
    244284    uint32_t u32Family;
    245285    uint32_t u32Model;
     
    251291    }
    252292
    253     /* Initialize the memory objects up-front so we can cleanup on allocation failures properly. */
    254     for (uint32_t i = 0; i < pVM->cCpus; i++)
     293    /*
     294     * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures.
     295     */
     296    for (VMCPUID i = 0; i < pVM->cCpus; i++)
    255297    {
    256298        PVMCPU pVCpu = &pVM->aCpus[i];
     
    260302    }
    261303
    262     /* Allocate a VMCB for each VCPU. */
    263     for (uint32_t i = 0; i < pVM->cCpus; i++)
    264     {
    265         /* Allocate one page for the host context */
     304    for (VMCPUID i = 0; i < pVM->cCpus; i++)
     305    {
     306        /*
     307         * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as
     308         * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA.
     309         */
    266310        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */);
    267311        if (RT_FAILURE(rc))
     
    273317        ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost);
    274318
    275         /* Allocate one page for the VM control block (VMCB). */
     319        /*
     320         * Allocate one page for the guest-state VMCB.
     321         */
    276322        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */);
    277323        if (RT_FAILURE(rc))
    278324            goto failure_cleanup;
    279325
    280         pVCpu->hm.s.svm.pvVmcb     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
    281         pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
     326        pVCpu->hm.s.svm.pvVmcb          = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);
     327        pVCpu->hm.s.svm.HCPhysVmcb      = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);
    282328        Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G);
    283329        ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb);
    284330
    285         /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */
     331        /*
     332         * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince
     333         * SVM to not require one.
     334         */
    286335        rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);
    287336        if (RT_FAILURE(rc))
     
    290339        pVCpu->hm.s.svm.pvMsrBitmap     = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap);
    291340        pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */);
    292         /* Set all bits to intercept all MSR accesses. */
     341        /* Set all bits to intercept all MSR accesses (changed later on). */
    293342        ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff);
    294343    }
     
    316365
    317366/**
     367 * Sets the permission bits for the specified MSR in the MSRPM.
     368 *
     369 * @param   pVCpu       Pointer to the VMCPU.
     370 * @param   uMsr       The MSR.
     371 * @param   fRead       Whether reading is allowed.
     372 * @param   fWrite      Whether writing is allowed.
     373 */
     374static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
     375{
     376    unsigned ulBit;
     377    uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
     378
     379    /*
     380     * Layout:
     381     * Byte offset       MSR range
     382     * 0x000  - 0x7ff    0x00000000 - 0x00001fff
     383     * 0x800  - 0xfff    0xc0000000 - 0xc0001fff
     384     * 0x1000 - 0x17ff   0xc0010000 - 0xc0011fff
     385     * 0x1800 - 0x1fff           Reserved
     386     */
     387    if (uMsr <= 0x00001FFF)
     388    {
     389        /* Pentium-compatible MSRs */
     390        ulBit    = uMsr * 2;
     391    }
     392    else if (   uMsr >= 0xC0000000
     393             && uMsr <= 0xC0001FFF)
     394    {
     395        /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
     396        ulBit = (uMsr - 0xC0000000) * 2;
     397        pbMsrBitmap += 0x800;
     398    }
     399    else if (   uMsr >= 0xC0010000
     400             && uMsr <= 0xC0011FFF)
     401    {
     402        /* AMD Seventh and Eighth Generation Processor MSRs */
     403        ulBit = (uMsr - 0xC0001000) * 2;
     404        pbMsrBitmap += 0x1000;
     405    }
     406    else
     407    {
     408        AssertFailed();
     409        return;
     410    }
     411
     412    Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
     413    if (enmRead == SVMMSREXIT_INTERCEPT_READ)
     414        ASMBitSet(pbMsrBitmap, ulBit);
     415    else
     416        ASMBitClear(pbMsrBitmap, ulBit);
     417
     418    if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
     419        ASMBitSet(pbMsrBitmap, ulBit + 1);
     420    else
     421        ASMBitClear(pbMsrBitmap, ulBit + 1);
     422}
     423
     424
     425/**
    318426 * Sets up AMD-V for the specified VM.
    319427 * This function is only called once per-VM during initalization.
     
    332440    {
    333441        PVMCPU   pVCpu = &pVM->aCpus[i];
    334         PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb;
     442        PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcbGuest;
    335443
    336444        AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
     
    432540         * Don't intercept guest read/write accesses to these MSRs.
    433541         */
    434         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    435         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    436         hmR0SvmSetMSRPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    437         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    438         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    439         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    440         hmR0SvmSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    441         hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    442         hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    443         hmR0SvmSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     542        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     543        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     544        hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     545        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     546        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     547        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     548        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     549        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     550        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     551        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    444552    }
    445553
    446554    return rc;
    447 }
    448 
    449 
    450 /**
    451  * Sets the permission bits for the specified MSR.
    452  *
    453  * @param   pVCpu       Pointer to the VMCPU.
    454  * @param   uMsr       The MSR.
    455  * @param   fRead       Whether reading is allowed.
    456  * @param   fWrite      Whether writing is allowed.
    457  */
    458 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
    459 {
    460     unsigned ulBit;
    461     uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;
    462 
    463     /*
    464      * Layout:
    465      * Byte offset       MSR range
    466      * 0x000  - 0x7ff    0x00000000 - 0x00001fff
    467      * 0x800  - 0xfff    0xc0000000 - 0xc0001fff
    468      * 0x1000 - 0x17ff   0xc0010000 - 0xc0011fff
    469      * 0x1800 - 0x1fff           Reserved
    470      */
    471     if (uMsr <= 0x00001FFF)
    472     {
    473         /* Pentium-compatible MSRs */
    474         ulBit    = uMsr * 2;
    475     }
    476     else if (   uMsr >= 0xC0000000
    477              && uMsr <= 0xC0001FFF)
    478     {
    479         /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
    480         ulBit = (uMsr - 0xC0000000) * 2;
    481         pbMsrBitmap += 0x800;
    482     }
    483     else if (   uMsr >= 0xC0010000
    484              && uMsr <= 0xC0011FFF)
    485     {
    486         /* AMD Seventh and Eighth Generation Processor MSRs */
    487         ulBit = (uMsr - 0xC0001000) * 2;
    488         pbMsrBitmap += 0x1000;
    489     }
    490     else
    491     {
    492         AssertFailed();
    493         return;
    494     }
    495 
    496     Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);
    497     if (enmRead == SVMMSREXIT_INTERCEPT_READ)
    498         ASMBitSet(pbMsrBitmap, ulBit);
    499     else
    500         ASMBitClear(pbMsrBitmap, ulBit);
    501 
    502     if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)
    503         ASMBitSet(pbMsrBitmap, ulBit + 1);
    504     else
    505         ASMBitClear(pbMsrBitmap, ulBit + 1);
    506555}
    507556
     
    516565{
    517566    PVM pVM              = pVCpu->CTX_SUFF(pVM);
    518     PSVMVMCB pVmcb       = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     567    PSVMVMCB pVmcb       = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcbGuest;
    519568    PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
    520569
     
    647696
    648697
    649 
     698/** @name 64-bit guest on 32-bit host OS helper functions.
     699 *
     700 * The host CPU is still 64-bit capable but the host OS is running in 32-bit
     701 * mode (code segment, paging). These wrappers/helpers perform the necessary
     702 * bits for the 32->64 switcher.
     703 *
     704 * @{ */
    650705#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    651706/**
     
    712767
    713768#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
    714 
     769/** @} */
     770
     771
     772/**
     773 * Saves the host state.
     774 *
     775 * @returns VBox status code.
     776 * @param   pVM         Pointer to the VM.
     777 * @param   pVCpu       Pointer to the VMCPU.
     778 */
     779VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
     780{
     781    NOREF(pVM);
     782    NOREF(pVCpu);
     783    /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */
     784    return VINF_SUCCESS;
     785}
     786
     787
     788/**
     789 * Loads the guest segment registers into the VMCB.
     790 *
     791 * @returns VBox status code.
     792 * @param   pVCpu       Pointer to the VMCPU.
     793 * @param   pCtx        Pointer to the guest-CPU context.
     794 */
     795static int hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     796{
     797    /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
     798    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
     799    {
     800        HMSVM_LOAD_SEG_REG(CS, cs);
     801        HMSVM_LOAD_SEG_REG(SS, cs);
     802        HMSVM_LOAD_SEG_REG(DS, cs);
     803        HMSVM_LOAD_SEG_REG(ES, cs);
     804        HMSVM_LOAD_SEG_REG(FS, cs);
     805        HMSVM_LOAD_SEG_REG(GS, cs);
     806
     807        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
     808    }
     809
     810    /* Guest TR. */
     811    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
     812    {
     813        HMSVM_LOAD_SEG_REG(TR, tr);
     814        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
     815    }
     816
     817    /* Guest LDTR. */
     818    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
     819    {
     820        HMSVM_LOAD_SEG_REG(LDTR, ldtr);
     821        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
     822    }
     823
     824    /* Guest GDTR. */
     825    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
     826    {
     827        pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;
     828        pVmcb->guest.GDTR.u64Base  = pCtx->gdtr.pGdt;
     829        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
     830    }
     831
     832    /* Guest IDTR. */
     833    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
     834    {
     835        pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;
     836        pVmcb->guest.IDTR.u64Base  = pCtx->idtr.pIdt;
     837        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
     838    }
     839
     840    return VINF_SUCCESS;
     841}
     842
     843
     844/**
     845 * Loads the guest state.
     846 *
     847 * @returns VBox status code.
     848 * @param   pVM         Pointer to the VM.
     849 * @param   pVCpu       Pointer to the VMCPU.
     850 * @param   pCtx        Pointer to the guest-CPU context.
     851 */
     852VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     853{
     854    AssertPtr(pVM);
     855    AssertPtr(pVCpu);
     856    AssertPtr(pMixedCtx);
     857    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     858
     859    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     860    AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);
     861
     862    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
     863
     864    int rc = hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
     865    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestSegmentRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     866
     867    rc
     868    /* -XXX- todo */
     869
     870    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
     871
     872}
     873
  • trunk/src/VBox/VMM/include/HMInternal.h

    r46381 r46441  
    7676#define MASK_INJECT_IRQ_STAT       0xff
    7777
    78 /** @name Changed flags
     78/** @name HM changed flags.
    7979 * These flags are used to keep track of which important registers that
    8080 * have been changed since last they were reset.
     
    703703    struct
    704704    {
    705         /** R0 memory object for the host VM control block (VMCB). */
     705        /** R0 memory object for the host VMCB which holds additional host-state. */
    706706        RTR0MEMOBJ                  hMemObjVmcbHost;
    707         /** Physical address of the host VM control block (VMCB). */
     707        /** Physical address of the host VMCB which holds additional host-state. */
    708708        RTHCPHYS                    HCPhysVmcbHost;
    709         /** Virtual address of the host VM control block (VMCB). */
     709        /** Virtual address of the host VMCB which holds additional host-state. */
    710710        R0PTRTYPE(void *)           pvVmcbHost;
    711711
    712         /** R0 memory object for the VM control block (VMCB). */
     712        /** R0 memory object for the guest VMCB. */
    713713        RTR0MEMOBJ                  hMemObjVmcb;
    714         /** Physical address of the VM control block (VMCB). */
     714        /** Physical address of the guest VMCB. */
    715715        RTHCPHYS                    HCPhysVmcb;
    716         /** Virtual address of the VM control block (VMCB). */
     716        /** Virtual address of the guest VMCB. */
    717717        R0PTRTYPE(void *)           pvVmcb;
    718718
     
    720720        PFNHMSVMVMRUN               pfnVMRun;
    721721
    722         /** R0 memory object for the MSR bitmap (8kb). */
     722        /** R0 memory object for the MSR bitmap (8 KB). */
    723723        RTR0MEMOBJ                  hMemObjMsrBitmap;
    724         /** Physical address of the MSR bitmap (8kb). */
     724        /** Physical address of the MSR bitmap (8 KB). */
    725725        RTHCPHYS                    HCPhysMsrBitmap;
    726726        /** Virtual address of the MSR bitmap. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette