VirtualBox

Changeset 9897 in vbox


Ignore:
Timestamp:
Jun 25, 2008 8:02:49 AM (17 years ago)
Author:
vboxsync
Message:

Updates for executing 64 bits guest code with AMD-V.

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/hwacc_svm.h

    r9115 r9897  
    669669
    670670/**
    671  * Prepares for and executes VMRUN.
     671 * Prepares for and executes VMRUN (32 bits guests).
    672672 *
    673673 * @returns VBox status code.
     
    679679
    680680/**
     681 * Prepares for and executes VMRUN (64 bits guests).
     682 *
     683 * @returns VBox status code.
     684 * @param   pVMCBHostPhys   Physical address of host VMCB.
     685 * @param   pVMCBPhys       Physical address of the VMCB.
     686 * @param   pCtx            Guest context.
     687 */
     688DECLASM(int) SVMVMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx);
     689
     690/**
    681691 * Executes INVLPGA.
    682692 *
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r9457 r9897  
    276276        /** Virtual address of the MSR bitmap. */
    277277        R0PTRTYPE(void *)           pMSRBitmap;
     278
     279        /** Ring 0 handlers for VT-x. */
     280        DECLR0CALLBACKMEMBER(int, pfnVMRun,(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx));
    278281
    279282        /** SVM revision. */
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r9896 r9897  
    413413%ifdef RT_ARCH_AMD64
    414414;/**
    415 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     415; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
    416416; *
    417417; * @returns VBox status code
     
    821821
    822822;/**
    823 ; * Prepares for and executes VMRUN
     823; * Prepares for and executes VMRUN (32 bits guests)
    824824; *
    825825; * @returns VBox status code
     
    829829; */
    830830BEGINPROC SVMVMRun
    831 %ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame - I'm lazy, sosume.
     831%ifdef RT_ARCH_AMD64 ; fake a cdecl stack frame
    832832 %ifdef ASM_CALL64_GCC
    833833    push    rdx
     
    927927ENDPROC SVMVMRun
    928928
     929%ifdef RT_ARCH_AMD64
     930;/**
     931; * Prepares for and executes VMRUN (64 bits guests)
     932; *
     933; * @returns VBox status code
     934; * @param   HCPhysVMCB     Physical address of host VMCB
     935; * @param   HCPhysVMCB     Physical address of guest VMCB
     936; * @param   pCtx           Guest context
     937; */
     938BEGINPROC SVMVMRun64
     939    ; fake a cdecl stack frame
     940 %ifdef ASM_CALL64_GCC
     941    push    rdx
     942    push    rsi
     943    push    rdi
     944 %else
     945    push    r8
     946    push    rdx
     947    push    rcx
     948 %endif
     949    push    0
     950    push    rbp
     951    mov     rbp, rsp
     952    pushf
     953
     954    ;/* Manual save and restore:
     955    ; * - General purpose registers except RIP, RSP, RAX
     956    ; *
     957    ; * Trashed:
     958    ; * - CR2 (we don't care)
     959    ; * - LDTR (reset to 0)
     960    ; * - DRx (presumably not changed at all)
     961    ; * - DR7 (reset to 0x400)
     962    ; */
     963
     964    ;/* Save all general purpose host registers. */
     965    MYPUSHAD
     966
     967    ;/* Save the Guest CPU context pointer. */
     968    mov     rsi, [rbp + xS*2 + RTHCPHYS_CB*2]   ; pCtx
     969    push    rsi                     ; push for saving the state at the end
     970
     971    ; Restore CR2
     972    mov     rbx, [rsi + CPUMCTX.cr2]
     973    mov     cr2, rbx
     974
     975    ; save host fs, gs, sysenter msr etc
     976    mov     rax, [rbp + xS*2]       ; pVMCBHostPhys (64 bits physical address; x86: take low dword only)
     977    push    rax                     ; save for the vmload after vmrun
     978    vmsave
     979
     980    ; setup eax for VMLOAD
     981    mov     rax, [rbp + xS*2 + RTHCPHYS_CB]     ; pVMCBPhys (64 bits physical address; take low dword only)
     982
     983    ;/* Restore Guest's general purpose registers. */
     984    ;/* RAX is loaded from the VMCB by VMRUN */
     985    mov     rbx, qword [xSI + CPUMCTX.ebx]
     986    mov     rcx, qword [xSI + CPUMCTX.ecx]
     987    mov     rdx, qword [xSI + CPUMCTX.edx]
     988    mov     rdi, qword [xSI + CPUMCTX.edi]
     989    mov     rbp, qword [xSI + CPUMCTX.ebp]
     990    mov     r8,  qword [xSI + CPUMCTX.r8]
     991    mov     r9,  qword [xSI + CPUMCTX.r9]
     992    mov     r10, qword [xSI + CPUMCTX.r10]
     993    mov     r11, qword [xSI + CPUMCTX.r11]
     994    mov     r12, qword [xSI + CPUMCTX.r12]
     995    mov     r13, qword [xSI + CPUMCTX.r13]
     996    mov     r14, qword [xSI + CPUMCTX.r14]
     997    mov     r15, qword [xSI + CPUMCTX.r15]
     998    mov     rsi, qword [xSI + CPUMCTX.esi]
     999
     1000    ; Clear the global interrupt flag & execute sti to make sure external interrupts cause a world switch
     1001    clgi
     1002    sti
     1003
     1004    ; load guest fs, gs, sysenter msr etc
     1005    vmload
     1006    ; run the VM
     1007    vmrun
     1008
     1009    ;/* RAX is in the VMCB already; we can use it here. */
     1010
     1011    ; save guest fs, gs, sysenter msr etc
     1012    vmsave
     1013
     1014    ; load host fs, gs, sysenter msr etc
     1015    pop     rax                     ; pushed above
     1016    vmload
     1017
     1018    ; Set the global interrupt flag again, but execute cli to make sure IF=0.
     1019    cli
     1020    stgi
     1021
     1022    pop     rax                     ; pCtx
     1023
     1024    mov     qword [rax + CPUMCTX.ebx], rbx
     1025    mov     qword [rax + CPUMCTX.ecx], rcx
     1026    mov     qword [rax + CPUMCTX.edx], rdx
     1027    mov     qword [rax + CPUMCTX.esi], rsi
     1028    mov     qword [rax + CPUMCTX.edi], rdi
     1029    mov     qword [rax + CPUMCTX.ebp], rbp
     1030    mov     qword [rax + CPUMCTX.r8],  r8
     1031    mov     qword [rax + CPUMCTX.r9],  r9
     1032    mov     qword [rax + CPUMCTX.r10], r10
     1033    mov     qword [rax + CPUMCTX.r11], r11
     1034    mov     qword [rax + CPUMCTX.r12], r12
     1035    mov     qword [rax + CPUMCTX.r13], r13
     1036    mov     qword [rax + CPUMCTX.r14], r14
     1037    mov     qword [rax + CPUMCTX.r15], r15
     1038
     1039    ; Restore general purpose registers
     1040    MYPOPAD
     1041
     1042    mov     eax, VINF_SUCCESS
     1043
     1044    popf
     1045    pop     rbp
     1046    add     rsp, 4*xS
     1047    ret
     1048ENDPROC SVMVMRun64
     1049%endif ; RT_ARCH_AMD64
     1050
    9291051
    9301052%if GC_ARCH_BITS == 64
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r9720 r9897  
    608608        val &= ~(X86_CR0_CD|X86_CR0_NW);
    609609
    610         /* Note: WP is not relevant in nested paging mode as we catch accesses on the (host) physical level. */
     610        /* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. */
    611611        /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */
    612612        if (!pVM->hwaccm.s.fNestedPaging)
     
    698698    /* vmrun will fail without MSR_K6_EFER_SVME. */
    699699    pVMCB->guest.u64EFER   = pCtx->msrEFER | MSR_K6_EFER_SVME;
     700
     701    /* 64 bits guest mode? */
     702    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     703    {
     704#if !defined(VBOX_WITH_64_BITS_GUESTS) || HC_ARCH_BITS != 64
     705        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     706#else
     707        pVM->hwaccm.s.svm.pfnVMRun = SVMVMRun64;
     708#endif
     709        /* Unconditionally update these as wrmsr might have changed them. (HWACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */
     710        pVMCB->guest.FS.u64Base    = pCtx->fsHid.u64Base;
     711        pVMCB->guest.GS.u64Base    = pCtx->gsHid.u64Base;
     712    }
     713    else
     714    {
     715        pVM->hwaccm.s.svm.pfnVMRun = SVMVMRun;
     716    }
    700717
    701718    /** TSC offset. */
     
    907924    Assert(pVMCB->ctrl.u64LBRVirt == 0);
    908925
    909     SVMVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
     926    pVM->hwaccm.s.svm.pfnVMRun(pVM->hwaccm.s.svm.pVMCBHostPhys, pVM->hwaccm.s.svm.pVMCBPhys, pCtx);
    910927    STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
    911928
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette