VirtualBox

Changeset 9457 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 6, 2008 9:46:39 AM (17 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
31712
Message:

Reapplied fixed 31707.

Location:
trunk/src/VBox/VMM
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/HWACCMInternal.h

    r9453 r9457  
    194194        /** Virtual address of the TSS page used for real mode emulation. */
    195195        R0PTRTYPE(PVBOXTSS)         pRealModeTSS;
     196
     197        /** Ring 0 handlers for VT-x. */
     198        DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx));
    196199
    197200        /** Host CR4 value (set by ring-0 VMX init) */
  • trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm

    r9453 r9457  
    178178
    179179;/**
    180 ; * Prepares for and executes VMLAUNCH
    181 ; *
    182 ; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode
    183 ; *
    184 ; * @returns VBox status code
    185 ; * @param   pCtx        Guest context
    186 ; */
    187 BEGINPROC VMXStartVM
     180; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     181; *
     182; * @returns VBox status code
     183; * @param   fResume    vmlauch/vmresume
     184; * @param   pCtx       Guest context
     185; */
     186BEGINPROC VMXR0StartVM32
    188187    push    xBP
    189188    mov     xBP, xSP
     
    201200    mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    202201    vmwrite xAX, [xSP]
    203     ;/* @todo assumes success... */
     202    ;/* Note: assumes success... */
    204203    add     xSP, xS
    205204
     
    219218    MYPUSHAD
    220219
     220    ;/* Save the Guest CPU context pointer. */
     221%ifdef RT_ARCH_AMD64
     222 %ifdef ASM_CALL64_GCC
     223    ; fResume already in rdi
     224    ; pCtx    already in rsi
     225 %else
     226    mov     rdi, rcx        ; fResume
     227    mov     rsi, rdx        ; pCtx
     228 %endif
     229%else
     230    mov     edi, [ebp + 8]  ; fResume
     231    mov     esi, [ebp + 12] ; pCtx
     232%endif
     233
    221234    ;/* Save segment registers */
     235    ; Note: MYPUSHSEGS trashes rdx (among others), so we moved it here (msvc amd64 case)
    222236    MYPUSHSEGS xAX, ax
    223237
    224     ;/* Save the Guest CPU context pointer. */
    225 %ifdef RT_ARCH_AMD64
    226  %ifdef ASM_CALL64_GCC
    227     mov     rsi, rdi ; pCtx
    228  %else
    229     mov     rsi, rcx ; pCtx
    230  %endif
    231 %else
    232     mov     esi, [ebp + 8] ; pCtx
    233 %endif
     238    ; Save the pCtx pointer
    234239    push    xSI
    235240
     
    258263    mov     eax, VMX_VMCS_HOST_RSP
    259264    vmwrite xAX, xSP
    260     ;/* @todo assumes success... */
     265    ;/* Note: assumes success... */
    261266    ;/* Don't mess with ESP anymore!! */
    262267
     
    266271    mov     ecx, [xSI + CPUMCTX.ecx]
    267272    mov     edx, [xSI + CPUMCTX.edx]
     273    mov     ebp, [xSI + CPUMCTX.ebp]
     274
     275    ; resume or start?
     276    cmp     xDI, 0                  ; fResume
     277    je      .vmlauch_lauch
     278
     279    ;/* Restore edi & esi. */
    268280    mov     edi, [xSI + CPUMCTX.edi]
    269     mov     ebp, [xSI + CPUMCTX.ebp]
     281    mov     esi, [xSI + CPUMCTX.esi]
     282
     283    vmresume
     284    jmp     .vmlaunch_done;      ;/* here if vmresume detected a failure. */
     285   
     286.vmlauch_lauch:   
     287    ;/* Restore edi & esi. */
     288    mov     edi, [xSI + CPUMCTX.edi]
    270289    mov     esi, [xSI + CPUMCTX.esi]
    271290
     
    365384    jmp     .vmstart_end
    366385
    367 ENDPROC VMXStartVM
    368 
    369 
    370 ;/**
    371 ; * Prepares for and executes VMRESUME
    372 ; *
    373 ; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode
    374 ; *
    375 ; * @returns VBox status code
    376 ; * @param   pCtx        Guest context
    377 ; */
    378 BEGINPROC VMXResumeVM
    379     push    xBP
    380     mov     xBP, xSP
    381 
    382     pushf
    383     cli
    384 
    385     ;/* First we have to save some final CPU context registers. */
    386 %ifdef RT_ARCH_AMD64
    387     mov     rax, qword .vmresume_done
    388     push    rax
    389 %else
    390     push    .vmresume_done
    391 %endif
    392     mov     eax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    393     vmwrite xAX, [xSP]
    394     ;/* @todo assumes success... */
    395     add     xSP, xS
    396 
    397     ;/* Manual save and restore:
    398     ; * - General purpose registers except RIP, RSP
    399     ; *
    400     ; * Trashed:
    401     ; * - CR2 (we don't care)
    402     ; * - LDTR (reset to 0)
    403     ; * - DRx (presumably not changed at all)
    404     ; * - DR7 (reset to 0x400)
    405     ; * - EFLAGS (reset to RT_BIT(1); not relevant)
    406     ; *
    407     ; */
    408 
    409     ;/* Save all general purpose host registers. */
    410     MYPUSHAD
    411 
    412     ;/* Save segment registers */
    413     MYPUSHSEGS xAX, ax
    414 
    415     ;/* Save the Guest CPU context pointer. */
    416 %ifdef RT_ARCH_AMD64
    417  %ifdef ASM_CALL64_GCC
    418     mov     rsi, rdi        ; pCtx
    419  %else
    420     mov     rsi, rcx        ; pCtx
    421  %endif
    422 %else
    423     mov     esi, [ebp + 8]  ; pCtx
    424 %endif
    425     push    xSI
    426 
    427     ; Save LDTR
    428     xor     eax, eax
    429     sldt    ax
    430     push    xAX
    431 
    432     ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    433     sub     xSP, xS*2
    434     sgdt    [xSP]
    435 
    436     sub     xSP, xS*2
    437     sidt    [xSP]
    438 
    439 %ifdef VBOX_WITH_DR6_EXPERIMENT
    440     ; Restore DR6 - experiment, not safe!
    441     mov     xBX, [xSI + CPUMCTX.dr6]
    442     mov     dr6, xBX
    443 %endif
    444 
    445     ; Restore CR2
    446     mov     xBX, [xSI + CPUMCTX.cr2]
    447     mov     cr2, xBX
    448 
    449     mov     eax, VMX_VMCS_HOST_RSP
    450     vmwrite xAX, xSP
    451     ;/* @todo assumes success... */
    452     ;/* Don't mess with ESP anymore!! */
    453 
    454     ;/* Restore Guest's general purpose registers. */
    455     mov     eax, [xSI + CPUMCTX.eax]
    456     mov     ebx, [xSI + CPUMCTX.ebx]
    457     mov     ecx, [xSI + CPUMCTX.ecx]
    458     mov     edx, [xSI + CPUMCTX.edx]
    459     mov     edi, [xSI + CPUMCTX.edi]
    460     mov     ebp, [xSI + CPUMCTX.ebp]
    461     mov     esi, [xSI + CPUMCTX.esi]
    462 
    463     vmresume
    464     jmp     .vmresume_done;      ;/* here if vmresume detected a failure. */
    465 
    466 ALIGNCODE(16)
    467 .vmresume_done:
    468     jc      near .vmxresume_invalid_vmxon_ptr
    469     jz      near .vmxresume_start_failed
    470 
    471     ; Restore base and limit of the IDTR & GDTR
    472     lidt    [xSP]
    473     add     xSP, xS*2
    474     lgdt    [xSP]
    475     add     xSP, xS*2
    476 
    477     push    xDI
    478     mov     xDI, [xSP + xS * 2]         ; pCtx
    479 
    480     mov     [ss:xDI + CPUMCTX.eax], eax
    481     mov     [ss:xDI + CPUMCTX.ebx], ebx
    482     mov     [ss:xDI + CPUMCTX.ecx], ecx
    483     mov     [ss:xDI + CPUMCTX.edx], edx
    484     mov     [ss:xDI + CPUMCTX.esi], esi
    485     mov     [ss:xDI + CPUMCTX.ebp], ebp
    486 %ifdef RT_ARCH_AMD64
    487     pop     xAX                                 ; the guest edi we pushed above
    488     mov     dword [ss:xDI + CPUMCTX.edi], eax
    489 %else
    490     pop     dword [ss:xDI + CPUMCTX.edi]        ; the guest edi we pushed above
    491 %endif
    492 
    493 %ifdef VBOX_WITH_DR6_EXPERIMENT
    494     ; Save DR6 - experiment, not safe!
    495     mov     xAX, dr6
    496     mov     [ss:xDI + CPUMCTX.dr6], xAX
    497 %endif
    498 
    499     pop     xAX          ; saved LDTR
    500     lldt    ax
    501 
    502     add     xSP, xS      ; pCtx
    503 
    504     ; Restore segment registers
    505     MYPOPSEGS xAX, ax
    506 
    507     ; Restore general purpose registers
    508     MYPOPAD
    509 
    510     mov     eax, VINF_SUCCESS
    511 
    512 .vmresume_end:
    513     popf
    514     pop     xBP
    515     ret
    516 
    517 .vmxresume_invalid_vmxon_ptr:
    518     ; Restore base and limit of the IDTR & GDTR
    519     lidt    [xSP]
    520     add     xSP, xS*2
    521     lgdt    [xSP]
    522     add     xSP, xS*2
    523 
    524     pop     xAX         ; saved LDTR
    525     lldt    ax
    526 
    527     add     xSP, xS     ; pCtx
    528 
    529     ; Restore segment registers
    530     MYPOPSEGS xAX, ax
    531 
    532     ; Restore all general purpose host registers.
    533     MYPOPAD
    534     mov     eax, VERR_VMX_INVALID_VMXON_PTR
    535     jmp     .vmresume_end
    536 
    537 .vmxresume_start_failed:
    538     ; Restore base and limit of the IDTR & GDTR
    539     lidt    [xSP]
    540     add     xSP, xS*2
    541     lgdt    [xSP]
    542     add     xSP, xS*2
    543 
    544     pop     xAX         ; saved LDTR
    545     lldt    ax
    546 
    547     add     xSP, xS     ; pCtx
    548 
    549     ; Restore segment registers
    550     MYPOPSEGS xAX, ax
    551 
    552     ; Restore all general purpose host registers.
    553     MYPOPAD
    554     mov     eax, VERR_VMX_UNABLE_TO_RESUME_VM
    555     jmp     .vmresume_end
    556 
    557 ENDPROC VMXResumeVM
    558 
    559 
    560 %ifdef RT_ARCH_AMD64
     386ENDPROC VMXR0StartVM32
     387
     388%ifdef RT_ARCH_AMD64
     389;/**
     390; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode)
     391; *
     392; * @returns VBox status code
     393; * @param   fResume    vmlauch/vmresume
     394; * @param   pCtx       Guest context
     395; */
     396BEGINPROC VMXR0StartVM64
     397    ret
     398ENDPROC VMXR0StartVM64
     399
    561400;/**
    562401; * Executes VMWRITE
    563402; *
    564403; * @returns VBox status code
    565 ; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: edi   VMCS index
     404; * @param   idxField   x86: [ebp + 08h]  msc: rcx  gcc: rdi   VMCS index
    566405; * @param   pData      x86: [ebp + 0ch]  msc: rdx  gcc: rsi   VM field value
    567406; */
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r9453 r9457  
    642642            case PGMMODE_PROTECTED:     /* Protected mode, no paging. */
    643643                AssertFailed();
    644                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     644                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    645645
    646646            case PGMMODE_32_BIT:        /* 32-bit paging. */
     
    659659#else
    660660                AssertFailed();
    661                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     661                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    662662#endif
    663663
    664664            default:                    /* shut up gcc */
    665665                AssertFailed();
    666                 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     666                return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    667667            }
    668668        }
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp

    r9453 r9457  
    841841#else
    842842            AssertFailed();
    843             return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     843            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    844844#endif
    845845        default:                   /* shut up gcc */
    846846            AssertFailed();
    847             return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
     847            return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
    848848        }
    849849        /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
     
    960960    /* 64 bits guest mode? */
    961961    if (pCtx->msrEFER & MSR_K6_EFER_LMA)
     962    {
    962963        val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
     964#ifndef VBOX_WITH_64_BITS_GUESTS
     965        return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
     966#else
     967        pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM64;
     968#endif
     969    }
     970    else
     971    {
     972        pVM->hwaccm.s.vmx.pfnStartVM  = VMXR0StartVM32;
     973    }
    963974
    964975    /* Done. */
     
    11581169    /* All done! Let's start VM execution. */
    11591170    STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
    1160     if (pVM->hwaccm.s.vmx.fResumeVM == false)
    1161         rc = VMXStartVM(pCtx);
    1162     else
    1163         rc = VMXResumeVM(pCtx);
     1171    rc = pVM->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx);
    11641172
    11651173    /* In case we execute a goto ResumeExecution later on. */
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r9453 r9457  
    162162
    163163
     164
     165/**
     166 * Prepares for and executes VMLAUNCH (32 bits guest mode)
     167 *
     168 * @returns VBox status code
     169 * @param   fResume     vmlauch/vmresume
     170 * @param   pCtx        Guest context
     171 */
     172DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);
     173
     174/**
     175 * Prepares for and executes VMLAUNCH (64 bits guest mode)
     176 *
     177 * @returns VBox status code
     178 * @param   fResume     vmlauch/vmresume
     179 * @param   pCtx        Guest context
     180 */
     181DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx);
     182
    164183#endif /* IN_RING0 */
    165184
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette