VirtualBox

Changeset 9453 in vbox for trunk/src


Ignore:
Timestamp:
Jun 6, 2008 9:28:02 AM (17 years ago)
Author:
vboxsync
Message:

Backed out 31707

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r9452 r9453  
    194194        /** Virtual address of the TSS page used for real mode emulation. */
    195195        R0PTRTYPE(PVBOXTSS)         pRealModeTSS;
    196 
    197         /** Ring 0 handlers for VT-x. */
    198         DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx));
    199196
    200197        /** Host CR4 value (set by ring-0 VMX init) */
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r9452 r9453  
    178178
    179179;/**
    180 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     180; * Prepares for and executes VMLAUNCH
     181; *
     182; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
    181183; *
    182184; * @returns VBox status code
    183 ; * @param   fResume    vmlauch/vmresume
    184 ; * @param   pCtx       Guest context
     185; * @param   pCtx        Guest context
    185186; */
    186 BEGINPROC VMXR0StartVM32
     187BEGINPROC VMXStartVM
    187188    push    xBP
    188189    mov     xBP, xSP
     
    200201    mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    201202    vmwrite xAX, [xSP]
    202     ;/* Note: assumes success... */
     203    ;/* @todo assumes success... */
    203204    add     xSP, xS
    204205
     
    224225%ifdef RT_ARCH_AMD64
    225226 %ifdef ASM_CALL64_GCC
    226     ; fResume already in rdi
    227     ; pCtx    already in rsi
     227    mov     rsi, rdi ; pCtx
    228228 %else
    229     mov     rdi, rcx        ; fResume
    230     mov     rsi, rdx        ; pCtx
     229    mov     rsi, rcx ; pCtx
    231230 %endif
    232231%else
    233     mov     edi, [ebp + 8]  ; fResume
    234     mov     esi, [ebp + 12] ; pCtx
     232    mov     esi, [ebp + 8] ; pCtx
    235233%endif
    236234    push    xSI
     
    260258    mov     eax, VMX_VMCS_HOST_RSP
    261259    vmwrite xAX, xSP
    262     ;/* Note: assumes success... */
     260    ;/* @todo assumes success... */
    263261    ;/* Don't mess with ESP anymore!! */
    264262
     
    268266    mov     ecx, [xSI + CPUMCTX.ecx]
    269267    mov     edx, [xSI + CPUMCTX.edx]
     268    mov     edi, [xSI + CPUMCTX.edi]
    270269    mov     ebp, [xSI + CPUMCTX.ebp]
    271 
    272     ; resume or start?
    273     cmp     xDI, 0                  ; fResume
    274     je      .vmlauch_lauch
    275 
    276     ;/* Restore edi & esi. */
    277     mov     edi, [xSI + CPUMCTX.edi]
    278     mov     esi, [xSI + CPUMCTX.esi]
    279 
    280     vmresume
    281     jmp     .vmlaunch_done;      ;/* here if vmresume detected a failure. */
    282    
    283 .vmlauch_lauch:   
    284     ;/* Restore edi & esi. */
    285     mov     edi, [xSI + CPUMCTX.edi]
    286270    mov     esi, [xSI + CPUMCTX.esi]
    287271
     
    381365    jmp     .vmstart_end
    382366
    383 ENDPROC VMXR0StartVM32
    384 
    385 %ifdef RT_ARCH_AMD64
     367ENDPROC VMXStartVM
     368
     369
    386370;/**
    387 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     371; * Prepares for and executes VMRESUME
     372; *
     373; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
    388374; *
    389375; * @returns VBox status code
    390 ; * @param   fResume    vmlauch/vmresume
    391 ; * @param   pCtx       Guest context
     376; * @param   pCtx        Guest context
    392377; */
    393 BEGINPROC VMXR0StartVM64
    394     ret
    395 ENDPROC VMXR0StartVM64
    396 
     378BEGINPROC VMXResumeVM
     379    push    xBP
     380    mov     xBP, xSP
     381
     382    pushf
     383    cli
     384
     385    ;/* First we have to save some final CPU context registers. */
     386%ifdef RT_ARCH_AMD64
     387    mov     rax, qword .vmresume_done
     388    push    rax
     389%else
     390    push    .vmresume_done
     391%endif
     392    mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
     393    vmwrite xAX, [xSP]
     394    ;/* @todo assumes success... */
     395    add     xSP, xS
     396
     397    ;/* Manual save and restore:
     398    ; * - General purpose registers except RIP, RSP
     399    ; *
     400    ; * Trashed:
     401    ; * - CR2 (we don't care)
     402    ; * - LDTR (reset to 0)
     403    ; * - DRx (presumably not changed at all)
     404    ; * - DR7 (reset to 0x400)
     405    ; * - EFLAGS (reset to RT_BIT(1); not relevant)
     406    ; *
     407    ; */
     408
     409    ;/* Save all general purpose host registers. */
     410    MYPUSHAD
     411
     412    ;/* Save segment registers */
     413    MYPUSHSEGS xAX, ax
     414
     415    ;/* Save the Guest CPU context pointer. */
     416%ifdef RT_ARCH_AMD64
     417 %ifdef ASM_CALL64_GCC
     418    mov     rsi, rdi        ; pCtx
     419 %else
     420    mov     rsi, rcx        ; pCtx
     421 %endif
     422%else
     423    mov     esi, [ebp + 8]  ; pCtx
     424%endif
     425    push    xSI
     426
     427    ; Save LDTR
     428    xor     eax, eax
     429    sldt    ax
     430    push    xAX
     431
     432    ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
     433    sub     xSP, xS*2
     434    sgdt    [xSP]
     435
     436    sub     xSP, xS*2
     437    sidt    [xSP]
     438
     439%ifdef VBOX_WITH_DR6_EXPERIMENT
     440    ; Restore DR6 - experiment, not safe!
     441    mov     xBX, [xSI + CPUMCTX.dr6]
     442    mov     dr6, xBX
     443%endif
     444
     445    ; Restore CR2
     446    mov     xBX, [xSI + CPUMCTX.cr2]
     447    mov     cr2, xBX
     448
     449    mov     eax, VMX_VMCS_HOST_RSP
     450    vmwrite xAX, xSP
     451    ;/* @todo assumes success... */
     452    ;/* Don't mess with ESP anymore!! */
     453
     454    ;/* Restore Guest's general purpose registers. */
     455    mov     eax, [xSI + CPUMCTX.eax]
     456    mov     ebx, [xSI + CPUMCTX.ebx]
     457    mov     ecx, [xSI + CPUMCTX.ecx]
     458    mov     edx, [xSI + CPUMCTX.edx]
     459    mov     edi, [xSI + CPUMCTX.edi]
     460    mov     ebp, [xSI + CPUMCTX.ebp]
     461    mov     esi, [xSI + CPUMCTX.esi]
     462
     463    vmresume
     464    jmp     .vmresume_done;      ;/* here if vmresume detected a failure. */
     465
     466ALIGNCODE(16)
     467.vmresume_done:
     468    jc      near .vmxresume_invalid_vmxon_ptr
     469    jz      near .vmxresume_start_failed
     470
     471    ; Restore base and limit of the IDTR & GDTR
     472    lidt    [xSP]
     473    add     xSP, xS*2
     474    lgdt    [xSP]
     475    add     xSP, xS*2
     476
     477    push    xDI
     478    mov     xDI, [xSP + xS * 2]         ; pCtx
     479
     480    mov     [ss:xDI + CPUMCTX.eax], eax
     481    mov     [ss:xDI + CPUMCTX.ebx], ebx
     482    mov     [ss:xDI + CPUMCTX.ecx], ecx
     483    mov     [ss:xDI + CPUMCTX.edx], edx
     484    mov     [ss:xDI + CPUMCTX.esi], esi
     485    mov     [ss:xDI + CPUMCTX.ebp], ebp
     486%ifdef RT_ARCH_AMD64
     487    pop     xAX                                 ; the guest edi we pushed above
     488    mov     dword [ss:xDI + CPUMCTX.edi], eax
     489%else
     490    pop     dword [ss:xDI + CPUMCTX.edi]        ; the guest edi we pushed above
     491%endif
     492
     493%ifdef VBOX_WITH_DR6_EXPERIMENT
     494    ; Save DR6 - experiment, not safe!
     495    mov     xAX, dr6
     496    mov     [ss:xDI + CPUMCTX.dr6], xAX
     497%endif
     498
     499    pop     xAX          ; saved LDTR
     500    lldt    ax
     501
     502    add     xSP, xS      ; pCtx
     503
     504    ; Restore segment registers
     505    MYPOPSEGS xAX, ax
     506
     507    ; Restore general purpose registers
     508    MYPOPAD
     509
     510    mov     eax, VINF_SUCCESS
     511
     512.vmresume_end:
     513    popf
     514    pop     xBP
     515    ret
     516
     517.vmxresume_invalid_vmxon_ptr:
     518    ; Restore base and limit of the IDTR & GDTR
     519    lidt    [xSP]
     520    add     xSP, xS*2
     521    lgdt    [xSP]
     522    add     xSP, xS*2
     523
     524    pop     xAX         ; saved LDTR
     525    lldt    ax
     526
     527    add     xSP, xS     ; pCtx
     528
     529    ; Restore segment registers
     530    MYPOPSEGS xAX, ax
     531
     532    ; Restore all general purpose host registers.
     533    MYPOPAD
     534    mov     eax, VERR_VMX_INVALID_VMXON_PTR
     535    jmp     .vmresume_end
     536
     537.vmxresume_start_failed:
     538    ; Restore base and limit of the IDTR & GDTR
     539    lidt    [xSP]
     540    add     xSP, xS*2
     541    lgdt    [xSP]
     542    add     xSP, xS*2
     543
     544    pop     xAX         ; saved LDTR
     545    lldt    ax
     546
     547    add     xSP, xS     ; pCtx
     548
     549    ; Restore segment registers
     550    MYPOPSEGS xAX, ax
     551
     552    ; Restore all general purpose host registers.
     553    MYPOPAD
     554    mov     eax, VERR_VMX_UNABLE_TO_RESUME_VM
     555    jmp     .vmresume_end
     556
     557ENDPROC VMXResumeVM
     558
     559
     560%ifdef RT_ARCH_AMD64
    397561;/**
    398562; * Executes VMWRITE
    399563; *
    400564; * @returns VBox status code
    401 ; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: rdi   VMCS index
     565; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: edi   VMCS index
    402566; * @param   pData      x86: [ebp + 0ch]  msc: rdx  gcc: rsi   VM field value
    403567; */
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r9452 r9453  
    642642            case PGMMODE_PROTECTED:     /* Protected mode, no paging. */
    643643                AssertFailed();
    644                 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     644                return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    645645
    646646            case PGMMODE_32_BIT:        /* 32-bit paging. */
     
    659659#else
    660660                AssertFailed();
    661                 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     661                return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    662662#endif
    663663
    664664            default:                    /* shut up gcc */
    665665                AssertFailed();
    666                 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     666                return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    667667            }
    668668        }
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r9452 r9453  
    841841#else
    842842            AssertFailed();
    843             return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     843            return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    844844#endif
    845845        default:                   /* shut up gcc */
    846846            AssertFailed();
    847             return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     847            return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    848848        }
    849849        /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
     
    960960    /* 64 bits guest mode? */
    961961    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
    962     {
    963962        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
    964 #ifndef VBOX_WITH_64_BITS_GUESTS
    965         return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    966 #else
    967         pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM64;
    968 #endif
    969     }
    970     else
    971     {
    972         pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM32;
    973     }
    974963
    975964    /* Done. */
     
    11691158    /* All done! Let's start VM execution. */
    11701159    STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
    1171     rc = pVM->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx);
     1160    if (pVM->hwaccm.s.vmx.fResumeVM == false)
     1161        rc = VMXStartVM(pCtx);
     1162    else
     1163        rc = VMXResumeVM(pCtx);
    11721164
    11731165    /* In case we execute a goto ResumeExecution later on. */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r9452 r9453  
    162162
    163163
    164 
    165 /**
    166  * Prepares for and executes VMLAUNCH (32 bits guest mode)
    167  *
    168  * @returns VBox status code
    169  * @param   fResume     vmlauch/vmresume
    170  * @param   pCtx        Guest context
    171  */
    172 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
    173 
    174 /**
    175  * Prepares for and executes VMLAUNCH (64 bits guest mode)
    176  *
    177  * @returns VBox status code
    178  * @param   fResume     vmlauch/vmresume
    179  * @param   pCtx        Guest context
    180  */
    181 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx);
    182 
    183164#endif /* IN_RING0 */
    184165
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette