VirtualBox

Changeset 14900 in vbox for trunk/src


Ignore:
Timestamp:
Dec 2, 2008 1:30:52 PM (16 years ago)
Author:
vboxsync
Message:

Switcher updates

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm

    r14899 r14900  
    6363; @param 2  16-bit regsiter name for \a 1.
    6464
    65   ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)
     65  ; Load the corresponding guest MSR (trashes rdx & rcx)
    6666  %macro LOADGUESTMSR 2
    67     mov     rcx, %1
    68     rdmsr
    69     push    rdx
    70     push    rax
    7167    mov     edx, dword [rsi + %2 + 4]
    7268    mov     eax, dword [rsi + %2]
     
    8177    mov     dword [rsi + %2], eax
    8278    mov     dword [rsi + %2 + 4], edx
    83     pop     rax
    84     pop     rdx
    85     wrmsr
    86   %endmacro
    87 
    88   ; Load the corresponding host MSR (trashes rdx & rcx)
    89   %macro LOADHOSTMSR 1
    90     mov     rcx, %1
    91     pop     rax
    92     pop     rdx
    93     wrmsr
    9479  %endmacro
    9580
     
    185170; *
    186171; * @returns VBox status code
    187 ; * @param   fResume    vmlauch/vmresume
    188172; * @param   pCtx       Guest context
    189173; */
     
    219203
    220204    ;/* Save the Guest CPU context pointer. */
    221 %ifdef ASM_CALL64_GCC
    222     ; fResume already in rdi
    223205    ; pCtx    already in rsi
    224 %else
    225     mov     rdi, rcx        ; fResume
    226     mov     rsi, rdx        ; pCtx
    227 %endif
    228206
    229207    ;/* Save segment registers */
     
    233211    ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
    234212    ;; @todo use the automatic load feature for MSRs
    235     LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    236 %if 0  ; not supported on Intel CPUs
    237     LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR
    238 %endif
    239     LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
    240     LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     213    LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     214    LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
     215    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
    241216    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    242217
     
    280255    mov     r15, qword [rsi + CPUMCTX.r15]
    281256
    282     ; resume or start?
    283     cmp     rdi, 0                  ; fResume
    284     je      .vmlauch64_lauch
    285 
    286     ;/* Restore edi & esi. */
    287     mov     rdi, qword [rsi + CPUMCTX.edi]
    288     mov     rsi, qword [rsi + CPUMCTX.esi]
    289 
    290     vmresume
    291     jmp     .vmlaunch64_done;      ;/* here if vmresume detected a failure. */
    292 
    293 .vmlauch64_lauch:
    294257    ;/* Restore rdi & rsi. */
    295258    mov     rdi, qword [rsi + CPUMCTX.edi]
     
    301264ALIGNCODE(16)
    302265.vmlaunch64_done:
    303     jc      near .vm8tart64_invalid_vmxon_ptr
    304     jz      near .vm8tart64_start_failed
     266    jc      near .vmstart64_invalid_vmxon_ptr
     267    jz      near .vmstart64_start_failed
    305268
    306269    ; Restore base and limit of the IDTR & GDTR
     
    339302    ;; @todo use the automatic load feature for MSRs
    340303    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    341     LOADHOSTMSR MSR_K8_SF_MASK
    342     LOADHOSTMSR MSR_K6_STAR
    343 %if 0  ; not supported on Intel CPUs
    344     LOADHOSTMSR MSR_K8_CSTAR
    345 %endif
    346     LOADHOSTMSR MSR_K8_LSTAR
    347304
    348305    ; Restore segment registers
     
    360317
    361318
    362 .vm8tart64_invalid_vmxon_ptr:
     319.vmstart64_invalid_vmxon_ptr:
    363320    ; Restore base and limit of the IDTR & GDTR
    364321    lidt    [rsp]
     
    375332    ;; @todo use the automatic load feature for MSRs
    376333    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    377     LOADHOSTMSR MSR_K8_SF_MASK
    378     LOADHOSTMSR MSR_K6_STAR
    379 %if 0  ; not supported on Intel CPUs
    380     LOADHOSTMSR MSR_K8_CSTAR
    381 %endif
    382     LOADHOSTMSR MSR_K8_LSTAR
    383334
    384335    ; Restore segment registers
     
    390341    jmp     .vmstart64_end
    391342
    392 .vm8tart64_start_failed:
     343.vmstart64_start_failed:
    393344    ; Restore base and limit of the IDTR & GDTR
    394345    lidt    [rsp]
     
    405356    ;; @todo use the automatic load feature for MSRs
    406357    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    407     LOADHOSTMSR MSR_K8_SF_MASK
    408     LOADHOSTMSR MSR_K6_STAR
    409 %if 0  ; not supported on Intel CPUs
    410     LOADHOSTMSR MSR_K8_CSTAR
    411 %endif
    412     LOADHOSTMSR MSR_K8_LSTAR
    413358
    414359    ; Restore segment registers
     
    542487ENDPROC SVMGCVMRun64
    543488
     489;/**
     490; * Saves the guest FPU context
     491; *
     492; * @returns VBox status code
     493; * @param   pCtx       Guest context [rsi]
     494; */
    544495BEGINPROC HWACCMSaveGuestFPU64
     496    mov     rax, cr0
     497    mov     rcx, rax                    ; save old CR0
     498    and     rax, ~(X86_CR0_TS | X86_CR0_EM)
     499    mov     cr0, rax
     500
     501    fxsave  [rsi + CPUMCTX.fpu]
     502
     503    mov     cr0, rcx                    ; and restore old CR0 again
     504   
     505    mov     eax, VINF_SUCCESS
    545506    ret
    546507ENDPROC HWACCMSaveGuestFPU64
    547508
     509;/**
     510; * Saves the guest debug context (DR0-3, DR6)
     511; *
     512; * @returns VBox status code
     513; * @param   pCtx       Guest context [rsi]
     514; */
    548515BEGINPROC HWACCMSaveGuestDebug64
     516    mov rax, dr0
     517    mov qword [rsi + CPUMCTX.dr + 0*8], rax
     518    mov rax, dr1
     519    mov qword [rsi + CPUMCTX.dr + 1*8], rax
     520    mov rax, dr2
     521    mov qword [rsi + CPUMCTX.dr + 2*8], rax
     522    mov rax, dr3
     523    mov qword [rsi + CPUMCTX.dr + 3*8], rax
     524    mov rax, dr6
     525    mov qword [rsi + CPUMCTX.dr + 6*8], rax
     526    mov eax, VINF_SUCCESS
    549527    ret
    550528ENDPROC HWACCMSaveGuestDebug64
    551529
     530;/**
     531; * Dummy callback handler
     532; *
     533; * @returns VBox status code
     534; * @param   pCtx       Guest context [rsi]
     535; */
    552536BEGINPROC HWACCMTestSwitcher64
     537    mov eax, VINF_SUCCESS
    553538    ret
    554539ENDPROC HWACCMTestSwitcher64
  • trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac

    r14785 r14900  
    430430    lss     esp, [edx + CPUMCPU.Host.esp]
    431431
    432     ; Restore FPU if guest has used it.
    433     ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
    434     mov     esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
    435     test    esi, CPUM_USED_FPU
    436     jz near gth_fpu_no
    437     mov     ecx, cr0
    438     and     ecx, ~(X86_CR0_TS | X86_CR0_EM)
    439     mov     cr0, ecx
    440 
    441     fxsave  [edx + CPUMCPU.Guest.fpu]
    442     fxrstor [edx + CPUMCPU.Host.fpu]
    443     jmp near gth_fpu_no
    444 
    445 ALIGNCODE(16)
    446 gth_fpu_no:
    447 
    448432    ; Control registers.
    449     ; Would've liked to have these higher up in case of crashes, but
    450     ; the fpu stuff must be done before we restore cr0.
    451433    mov     ecx, [edx + CPUMCPU.Host.cr4]
    452434    mov     cr4, ecx
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette