VirtualBox

Changeset 46444 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 7, 2013 5:02:46 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46442 r46444  
    776776 * @param   pVM         Pointer to the VM.
    777777 * @param   pVCpu       Pointer to the VMCPU.
     778 *
     779 * @remarks No-long-jump zone!!!
    778780 */
    779781VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)
     
    786788
    787789
     790DECLINLINE(void) hmR0VmxSvmAddXcptIntercept(uint32_t u32Xcpt)
     791{
     792    if (!(pVmcb->ctrl.u32InterceptException & u32Xcpt)
     793    {
     794        pVmcb->ctrl.u32InterceptException |= u32Xcpt;
     795        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_INTERCEPT_VECTORS;
     796    }
     797}
     798
     799DECLINLINE(void) hmR0VmxSvmRemoveXcptIntercept(uint32_t u32Xcpt)
     800{
     801    if (pVmcb->ctrl.u32InterceptException & u32Xcpt)
     802    {
     803        pVmcb->ctrl.u32InterceptException &= ~u32Xcpt;
     804        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_INTERCEPT_VECTORS;
     805    }
     806}
     807
     808
     809/**
     810 * Loads the guest control registers (CR0, CR2, CR3, CR4) into the VMCB.
     811 *
     812 * @returns VBox status code.
     813 * @param   pVCpu       Pointer to the VMCPU.
     814 * @param   pCtx        Pointer the guest-CPU context.
     815 *
     816 * @remarks No-long-jump zone!!!
     817 */
     818static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     819{
     820    /*
     821     * Guest CR0.
     822     */
     823    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
     824    {
     825        uint64_t u64GuestCR0 = pCtx->cr0;
     826
     827        /* Always enable caching. */
     828        u64GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW);
     829
     830        /*
     831         * With Nested Paging, the guest is allowed to run with paging disabled; guest-physical to host-physical translations
     832         * will remain active through the Nested CR3. AMD supports paged real-mode, See AMD spec. 15.19 "Paged Real Mode".
     833         */
     834        if (!pVM->hm.s.fNestedPaging)
     835        {
     836            u64GuestCR0 |= X86_CR0_PG;  /* When Nested Paging is not available use shadow page tables. */
     837            u64GuestCR0 |= X86_CR0_WP;  /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
     838        }
     839
     840        /*
     841         * Guest FPU bits.
     842         */
     843        bool fInterceptNM = false;
     844        bool fInterceptMF = false;
     845        u64GuestCR0 |= X86_CR0_NE;         /* Use internal x87 FPU exceptions handling rather than external interrupts. */
     846        if (CPUMIsGuestFPUStateActive(pVCpu))
     847        {
     848            /* Catch floating point exceptions if we need to report them to the guest in a different way. */
     849            if (!(u64GuestCR0 & X86_CR0_NE))
     850            {
     851                Log4(("hmR0SvmLoadGuestControlRegs: Intercepting Guest CR0.MP Old-style FPU handling!!!\n"));
     852                pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
     853                fInterceptMF = true;
     854            }
     855        }
     856        else
     857        {
     858            fInterceptNM = true;           /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
     859            u32GuestCR0 |=  X86_CR0_TS     /* Guest can task switch quickly and do lazy FPU syncing. */
     860                          | X86_CR0_MP;    /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
     861        }
     862
     863        /*
     864         * Update the exception intercept bitmap.
     865         */
     866        if (fInterceptNM)
     867            hmR0VmxSvmAddXcptIntercept(RT_BIT(X86_XCPT_NM));
     868        else
     869            hmR0VmxSvmRemoveXcptIntercept(RT_BIT(X86_XCPT_NM));
     870
     871        if (fInterceptMF)
     872            hmR0VmxSvmAddXcptIntercept(RT_BIT(X86_XCPT_MF));
     873        else
     874            hmR0VmxSvmRemoveXcptIntercept(RT_BIT(X86_XCPT_MF));
     875
     876        pVmcb->guest.u64CR0 = u64GuestCR0;
     877        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
     878    }
     879
     880    /*
     881     * Guest CR2.
     882     */
     883    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR2)
     884    {
     885        pVmcb->guest.u64CR2 = pCtx->cr2;
     886        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
     887    }
     888
     889    /*
     890     * Guest CR3.
     891     */
     892    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
     893    {
     894        if (pVM->hm.s.fNestedPaging)
     895        {
     896            PGMMODE enmShwPagingMode;
     897#if HC_ARCH_BITS == 32
     898            if (CPUMIsGuestInLongModeEx(pCtx))
     899                enmShwPagingMode = PGMMODE_AMD64_NX;
     900            else
     901#endif
     902                enmShwPagingMode = PGMGetHostMode(pVM);
     903
     904            pVmcb->ctrl.u64NestedPagingCR3  = PGMGetNestedCR3(pVCpu, enmShwPagingMode);
     905            Assert(pVmcb->ctrl.u64NestedPagingCR3);
     906            pVmcb->guest.u64CR3 = pCtx->cr3;
     907        }
     908        else
     909            pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
     910
     911        pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3;
     912    }
     913
     914    /*
     915     * Guest CR4.
     916     */
     917    if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
     918    {
     919        uint64_t u64GuestCR4 = pCtx->cr4;
     920        if (!pVM->hm.s.fNestedPaging)
     921        {
     922            switch (pVCpu->hm.s.enmShadowMode)
     923            {
     924                case PGMMODE_REAL:
     925                case PGMMODE_PROTECTED:     /* Protected mode, no paging. */
     926                    AssertFailed();
     927                    return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     928
     929                case PGMMODE_32_BIT:        /* 32-bit paging. */
     930                    u64GuestCR4 &= ~X86_CR4_PAE;
     931                    break;
     932
     933                case PGMMODE_PAE:           /* PAE paging. */
     934                case PGMMODE_PAE_NX:        /* PAE paging with NX enabled. */
     935                    /** Must use PAE paging as we could use physical memory > 4 GB */
     936                    u64GuestCR4 |= X86_CR4_PAE;
     937                    break;
     938
     939                case PGMMODE_AMD64:         /* 64-bit AMD paging (long mode). */
     940                case PGMMODE_AMD64_NX:      /* 64-bit AMD paging (long mode) with NX enabled. */
     941#ifdef VBOX_ENABLE_64_BITS_GUESTS
     942                    break;
     943#else
     944                    AssertFailed();
     945                    return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     946#endif
     947
     948                default:                    /* shut up gcc */
     949                    AssertFailed();
     950                    return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     951            }
     952        }
     953
     954        pVmcb->guest.u64CR4 = u64GuestCR4;
     955        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
     956    }
     957
     958    return VINF_SUCCESS;
     959}
     960
    788961/**
    789962 * Loads the guest segment registers into the VMCB.
     
    795968 * @remarks No-long-jump zone!!!
    796969 */
    797 static int hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     970static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
    798971{
    799972    /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
     
    8391012        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
    8401013    }
    841 
    842     return VINF_SUCCESS;
    8431014}
    8441015
     
    8641035    pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
    8651036
    866     /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
    867     if (!CPUMIsGuestInLongModeEx(pCtx))
     1037    /* 64-bit MSRs. */
     1038    if (CPUMIsGuestInLongModeEx(pCtx))
     1039    {
     1040        pVmcb->guest.FS.u64Base      = pCtx->fs.u64Base;
     1041        pVmcb->guest.GS.u64Base      = pCtx->gs.u64Base;
     1042    }
     1043    else
     1044    {
     1045        /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */
    8681046        pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME;
     1047    }
     1048
     1049    /** @todo The following are used in 64-bit only (SYSCALL/SYSRET) but they might
     1050     *        be writable in 32-bit mode. Clarify with AMD spec. */
     1051    pVmcb->guest.u64STAR         = pCtx->msrSTAR;
     1052    pVmcb->guest.u64LSTAR        = pCtx->msrLSTAR;
     1053    pVmcb->guest.u64CSTAR        = pCtx->msrCSTAR;
     1054    pVmcb->guest.u64SFMASK       = pCtx->msrSFMASK;
     1055    pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE;
    8691056}
    8701057
     
    9311118    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9321119
    933     rc = hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
    934     AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestSegmentRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    935 
     1120    hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
    9361121    hmR0SvmLoadGuestMsrs(pVCpu, pCtx);
    9371122
     
    9481133    AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    9491134
    950 
    9511135    /* Clear any unused and reserved bits. */
    9521136    pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_SYSENTER_CS_MSR
     
    9541138                                      | HM_CHANGED_GUEST_SYSENTER_ESP_MSR);
    9551139
     1140    AssertMsg(!pVCpu->hm.s.fContextUseFlags,
     1141             ("Missed updating flags while loading guest state. pVM=%p pVCpu=%p fContextUseFlags=%#RX32\n",
     1142              pVM, pVCpu, pVCpu->hm.s.fContextUseFlags));
    9561143
    9571144    STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
    9581145
    959 }
    960 
     1146    return rc;
     1147}
     1148
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r46420 r46444  
    838838
    839839                /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
    840                 if (!pVCpu->hm.s.fFPUOldStyleOverride)
    841                 {
    842                     pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
    843                     pVCpu->hm.s.fFPUOldStyleOverride = true;
    844                 }
     840                pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);
    845841            }
    846842            val |= X86_CR0_NE;  /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
  • trunk/src/VBox/VMM/include/HMInternal.h

    r46442 r46444  
    552552typedef struct HMCPU
    553553{
    554     /** Old style FPU reporting trap mask override performed (optimization) */
    555     bool                        fFPUOldStyleOverride;
    556554    /** Set if we don't have to flush the TLB on VM entry. */
    557555    bool                        fResumeVM;
     
    562560    /** Set when the TLB has been checked until we return from the world switch. */
    563561    volatile bool               fCheckedTLBFlush;
    564     uint8_t                     u8Alignment[3];
     562    uint8_t                     u8Alignment[4];
    565563
    566564    /** World switch exit counter. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette