VirtualBox

Changeset 15030 in vbox for trunk/src/VBox/VMM/VMMGC


Ignore:
Timestamp:
Dec 5, 2008 11:12:26 AM (16 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
40406
Message:

Switcher updates

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm

    r14997 r15030  
    4747%endif
    4848
    49 ;; @def MYPUSHAD
    50 ; Macro generating an equivalent to pushad
    51 
    52 ;; @def MYPOPAD
    53 ; Macro generating an equivalent to popad
    54 
    5549;; @def MYPUSHSEGS
    5650; Macro saving all segment registers on the stack.
    5751; @param 1  full width register name
    58 ; @param 2  16-bit regsiter name for \a 1.
    5952
    6053;; @def MYPOPSEGS
    6154; Macro restoring all segment registers on the stack
    6255; @param 1  full width register name
    63 ; @param 2  16-bit regsiter name for \a 1.
    6456
    6557  ; Load the corresponding guest MSR (trashes rdx & rcx)
     
    7062  %endmacro
    7163
    72   ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)
     64  ; Save a guest MSR (trashes rdx & rcx)
    7365  ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)
    74   %macro LOADHOSTMSREX 2
     66  %macro SAVEGUESTMSR 2
    7567    mov     rcx, %1
    7668    rdmsr
     
    7971  %endmacro
    8072
    81  %ifdef ASM_CALL64_GCC
    82   %macro MYPUSHAD 0
    83     push    r15
    84     push    r14
    85     push    r13
    86     push    r12
    87     push    rbx
    88   %endmacro
    89   %macro MYPOPAD 0
    90     pop     rbx
    91     pop     r12
    92     pop     r13
    93     pop     r14
    94     pop     r15
    95   %endmacro
    96 
    97  %else ; ASM_CALL64_MSC
    98   %macro MYPUSHAD 0
    99     push    r15
    100     push    r14
    101     push    r13
    102     push    r12
    103     push    rbx
    104     push    rsi
    105     push    rdi
    106   %endmacro
    107   %macro MYPOPAD 0
    108     pop     rdi
    109     pop     rsi
    110     pop     rbx
    111     pop     r12
    112     pop     r13
    113     pop     r14
    114     pop     r15
    115   %endmacro
    116  %endif
    117 
    118 ; trashes, rax, rdx & rcx
    119  %macro MYPUSHSEGS 2
    120     mov     %2, es
     73 %macro MYPUSHSEGS 1
     74    mov     %1, es
    12175    push    %1
    122     mov     %2, ds
     76    mov     %1, ds
    12377    push    %1
    124 
    125     ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it.
    126     mov     ecx, MSR_K8_FS_BASE
    127     rdmsr
    128     push    rdx
    129     push    rax
    130     push    fs
    131 
    132     ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit
    133     mov     ecx, MSR_K8_GS_BASE
    134     rdmsr
    135     push    rdx
    136     push    rax
    137     push    gs
    138  %endmacro
    139 
    140 ; trashes, rax, rdx & rcx
    141  %macro MYPOPSEGS 2
    142     ; Note: do not step through this code with a debugger!
    143     pop     gs
    144     pop     rax
    145     pop     rdx
    146     mov     ecx, MSR_K8_GS_BASE
    147     wrmsr
    148 
    149     pop     fs
    150     pop     rax
    151     pop     rdx
    152     mov     ecx, MSR_K8_FS_BASE
    153     wrmsr
    154     ; Now it's safe to step again
    155 
     78 %endmacro
     79
     80 %macro MYPOPSEGS 1
    15681    pop     %1
    157     mov     ds, %2
     82    mov     ds, %1
    15883    pop     %1
    159     mov     es, %2
    160  %endmacro
    161 
    162 
     84    mov     es, %1
     85 %endmacro
     86
     87; trashes rax & rdx
     88 %macro VMCSWRITE 2
     89    mov     rdx, %2
     90    mov     eax, %1
     91    vmwrite rax, rdx
     92 %endmacro
     93
     94; trashes rax & rdx
     95 %macro VMCSREAD 2
     96    mov     eax, %1
     97    vmwrite rax, rdx
     98    mov     %2, rdx
     99 %endmacro
    163100
    164101BEGINCODE
     
    176113    mov     rbp, rsp
    177114
    178     pushf
    179     cli
    180 
    181     ; Have to sync half the guest state as we can't access most of the 64 bits state. Sigh
    182 ;    VMCSWRITE VMX_VMCS64_GUEST_CS_BASE,         [rsi + CPUMCTX.csHid.u64Base]
    183 ;    VMCSWRITE VMX_VMCS64_GUEST_DS_BASE,         [rsi + CPUMCTX.dsHid.u64Base]
    184 ;    VMCSWRITE VMX_VMCS64_GUEST_ES_BASE,         [rsi + CPUMCTX.esHid.u64Base]
    185 ;    VMCSWRITE VMX_VMCS64_GUEST_FS_BASE,         [rsi + CPUMCTX.fsHid.u64Base]
    186 ;    VMCSWRITE VMX_VMCS64_GUEST_GS_BASE,         [rsi + CPUMCTX.gsHid.u64Base]
    187 ;    VMCSWRITE VMX_VMCS64_GUEST_SS_BASE,         [rsi + CPUMCTX.ssHid.u64Base]
    188 ;    VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE,       [rsi + CPUMCTX.ldtrHid.u64Base]
    189 ;    VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE,       [rsi + CPUMCTX.gdtrHid.u64Base]
    190 ;    VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE,       [rsi + CPUMCTX.idtrHid.u64Base]
    191 ;    VMCSWRITE VMX_VMCS64_GUEST_TR_BASE,         [rsi + CPUMCTX.trHid.u64Base]
    192 ;   
    193 ;    VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP,    [rsi + CPUMCTX.SysEnter.eip]
    194 ;    VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP,    [rsi + CPUMCTX.SysEnter.esp]
    195 ;   
    196 ;    VMCSWRITE VMX_VMCS64_GUEST_RIP,             [rsi + CPUMCTX.eip]
    197 ;    VMCSWRITE VMX_VMCS64_GUEST_RSP,             [rsi + CPUMCTX.esp]
    198    
    199 
     115    ; Have to sync half the guest state as we can't access most of the 64 bits state in 32 bits mode. Sigh.
     116    VMCSWRITE VMX_VMCS64_GUEST_CS_BASE,         [rsi + CPUMCTX.csHid.u64Base]
     117    VMCSWRITE VMX_VMCS64_GUEST_DS_BASE,         [rsi + CPUMCTX.dsHid.u64Base]
     118    VMCSWRITE VMX_VMCS64_GUEST_ES_BASE,         [rsi + CPUMCTX.esHid.u64Base]
     119    VMCSWRITE VMX_VMCS64_GUEST_FS_BASE,         [rsi + CPUMCTX.fsHid.u64Base]
     120    VMCSWRITE VMX_VMCS64_GUEST_GS_BASE,         [rsi + CPUMCTX.gsHid.u64Base]
     121    VMCSWRITE VMX_VMCS64_GUEST_SS_BASE,         [rsi + CPUMCTX.ssHid.u64Base]
     122    VMCSWRITE VMX_VMCS64_GUEST_GDTR_BASE,       [rsi + CPUMCTX.gdtr.pGdt]
     123    VMCSWRITE VMX_VMCS64_GUEST_IDTR_BASE,       [rsi + CPUMCTX.idtr.pIdt]
     124    VMCSWRITE VMX_VMCS64_GUEST_LDTR_BASE,       [rsi + CPUMCTX.ldtrHid.u64Base]
     125    VMCSWRITE VMX_VMCS64_GUEST_TR_BASE,         [rsi + CPUMCTX.trHid.u64Base]
     126   
     127    VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_EIP,    [rsi + CPUMCTX.SysEnter.eip]
     128    VMCSWRITE VMX_VMCS64_GUEST_SYSENTER_ESP,    [rsi + CPUMCTX.SysEnter.esp]
     129   
     130    VMCSWRITE VMX_VMCS64_GUEST_RIP,             [rsi + CPUMCTX.eip]
     131    VMCSWRITE VMX_VMCS64_GUEST_RSP,             [rsi + CPUMCTX.esp]
     132   
     133    ; Save the host state that's relevant in the temporary 64 bits mode
     134    mov     rax, cr0
     135    VMCSWRITE VMX_VMCS_HOST_CR0,                rax
     136    mov     rax, cr3
     137    VMCSWRITE VMX_VMCS_HOST_CR3,                rax
     138    mov     rax, cr4
     139    VMCSWRITE VMX_VMCS_HOST_CR4,                rax
     140    mov     rax, cs
     141    VMCSWRITE VMX_VMCS_HOST_FIELD_CS,           rax
     142    mov     rax, ss
     143    VMCSWRITE VMX_VMCS_HOST_FIELD_SS,           rax
     144
     145    sub     rsp, 8*2
     146    sgdt    [rsp]         
     147    mov     rax, [rsp+2]
     148    VMCSWRITE VMX_VMCS_HOST_GDTR_BASE,          rax
     149    add     rsp, 8*2
     150   
     151    ; hopefully we can ignore TR (we restore it anyway on the way back to 32 bits mode)
     152   
    200153    ;/* First we have to save some final CPU context registers. */
    201     lea     rax, [.vmlaunch64_done wrt rip]   
    202     push    rax
     154    lea     rdx, [.vmlaunch64_done wrt rip]   
    203155    mov     rax, VMX_VMCS_HOST_RIP  ;/* return address (too difficult to continue after VMLAUNCH?) */
    204     vmwrite rax, [rsp]
     156    vmwrite rax, rdx
    205157    ;/* Note: assumes success... */
    206     add     rsp, 8
    207158
    208159    ;/* Manual save and restore:
     
    218169    ; */
    219170
    220     ;/* Save all general purpose host registers. */
    221     MYPUSHAD
    222 
    223     ;/* Save the Guest CPU context pointer. */
    224     ; pCtx    already in rsi
    225 
    226171    ;/* Save segment registers */
    227     ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case)
    228     MYPUSHSEGS rax, ax
    229 
    230     ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs
     172    MYPUSHSEGS rax
     173
     174    ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    231175    ;; @todo use the automatic load feature for MSRs
    232176    LOADGUESTMSR MSR_K8_LSTAR,          CPUMCTX.msrLSTAR
     177%if 0  ; not supported on Intel CPUs
     178    LOADGUESTMSR MSR_K8_CSTAR,          CPUMCTX.msrCSTAR
     179%endif
    233180    LOADGUESTMSR MSR_K6_STAR,           CPUMCTX.msrSTAR
    234181    LOADGUESTMSR MSR_K8_SF_MASK,        CPUMCTX.msrSFMASK
     
    237184    ; Save the pCtx pointer
    238185    push    rsi
    239 
    240     ; Save LDTR
    241     xor     eax, eax
    242     sldt    ax
    243     push    rax
    244 
    245     ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    246     sub     rsp, 8*2
    247     sgdt    [rsp]
    248 
    249     sub     rsp, 8*2
    250     sidt    [rsp]
    251186
    252187    ; Restore CR2
     
    286221    jz      near .vmstart64_start_failed
    287222
    288     ; Restore base and limit of the IDTR & GDTR
    289     lidt    [rsp]
    290     add     rsp, 8*2
    291     lgdt    [rsp]
    292     add     rsp, 8*2
    293 
    294223    push    rdi
    295224    mov     rdi, [rsp + 8 * 2]         ; pCtx
     
    313242    mov     qword [rdi + CPUMCTX.edi], rax
    314243
    315     pop     rax         ; saved LDTR
    316     lldt    ax
    317 
     244    pop     rsi         ; pCtx (needed in rsi by the macros below)
     245
     246    ;; @todo use the automatic load feature for MSRs
     247    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     248
     249    ; Restore segment registers
     250    MYPOPSEGS rax
     251
     252    mov     eax, VINF_SUCCESS
     253
     254.vmstart64_end:
     255    pop     rbp
     256    ret
     257
     258
     259.vmstart64_invalid_vmxon_ptr:
    318260    pop     rsi         ; pCtx (needed in rsi by the macros below)
    319261
    320262    ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    321263    ;; @todo use the automatic load feature for MSRs
    322     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     264    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    323265
    324266    ; Restore segment registers
    325     MYPOPSEGS rax, ax
    326 
    327     ; Restore general purpose registers
    328     MYPOPAD
    329 
    330     mov     eax, VINF_SUCCESS
    331 
    332 .vmstart64_end:
    333     popf
    334     pop     rbp
    335     ret
    336 
    337 
    338 .vmstart64_invalid_vmxon_ptr:
    339     ; Restore base and limit of the IDTR & GDTR
    340     lidt    [rsp]
    341     add     rsp, 8*2
    342     lgdt    [rsp]
    343     add     rsp, 8*2
    344 
    345     pop     rax         ; saved LDTR
    346     lldt    ax
    347 
     267    MYPOPSEGS rax
     268
     269    ; Restore all general purpose host registers.
     270    mov     eax, VERR_VMX_INVALID_VMXON_PTR
     271    jmp     .vmstart64_end
     272
     273.vmstart64_start_failed:
    348274    pop     rsi         ; pCtx (needed in rsi by the macros below)
    349275
    350276    ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    351277    ;; @todo use the automatic load feature for MSRs
    352     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     278    SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    353279
    354280    ; Restore segment registers
    355     MYPOPSEGS rax, ax
     281    MYPOPSEGS rax
    356282
    357283    ; Restore all general purpose host registers.
    358     MYPOPAD
    359     mov     eax, VERR_VMX_INVALID_VMXON_PTR
    360     jmp     .vmstart64_end
    361 
    362 .vmstart64_start_failed:
    363     ; Restore base and limit of the IDTR & GDTR
    364     lidt    [rsp]
    365     add     rsp, 8*2
    366     lgdt    [rsp]
    367     add     rsp, 8*2
    368 
    369     pop     rax         ; saved LDTR
    370     lldt    ax
    371 
    372     pop     rsi         ; pCtx (needed in rsi by the macros below)
    373 
    374     ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs
    375     ;; @todo use the automatic load feature for MSRs
    376     LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    377 
    378     ; Restore segment registers
    379     MYPOPSEGS rax, ax
    380 
    381     ; Restore all general purpose host registers.
    382     MYPOPAD
    383284    mov     eax, VERR_VMX_UNABLE_TO_START_VM
    384285    jmp     .vmstart64_end
     
    408309    ; * - DR7 (reset to 0x400)
    409310    ; */
    410 
    411     ;/* Save all general purpose host registers. */
    412     MYPUSHAD
    413311
    414312    ;/* Save the Guest CPU context pointer. */
     
    482380    mov     qword [rax + CPUMCTX.r14], r14
    483381    mov     qword [rax + CPUMCTX.r15], r15
    484 
    485     ; Restore general purpose registers
    486     MYPOPAD
    487382
    488383    mov     eax, VINF_SUCCESS
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette