VirtualBox

Changeset 55027 in vbox for trunk/src/VBox/VMM/VMMRC


Ignore:
Timestamp:
Mar 31, 2015 12:14:36 PM (10 years ago)
Author:
vboxsync
Message:

CPUMRCA.asm: indent, remove unused+duplicated CLEANFPU macro.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMRC/CPUMRCA.asm

    r54899 r55027  
    4545BEGINCODE
    4646
    47 ;; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
    48 ; Cleans the FPU state, if necessary, before restoring the FPU.
    49 ;
    50 ; This macro ASSUMES CR0.TS is not set!
    51 ; @remarks Trashes xAX!!
    52 ; Changes here should also be reflected in CPUMR0A.asm's copy!
    53 %macro CLEANFPU 0
    54     test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
    55     jz      .nothing_to_clean
    56 
    57     xor     eax, eax
    58     fnstsw  ax               ; Get FSW
    59     test    eax, RT_BIT(7)   ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
    60                              ; while clearing & loading the FPU bits in 'clean_fpu'
    61     jz      clean_fpu
    62     fnclex
    63 
    64 .clean_fpu:
    65     ffree   st7              ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs
    66                              ; for the upcoming push (load)
    67     fild    dword [xDX + CPUMCPU.Guest.XState] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
    68 
    69 .nothing_to_clean:
    70 %endmacro
    71 
    7247
    7348;;
     
    8762align 16
    8863BEGINPROC   cpumHandleLazyFPUAsm
    89     ;
    90     ; Figure out what to do.
    91     ;
    92     ; There are two basic actions:
    93     ;   1. Save host fpu and restore guest fpu.
    94     ;   2. Generate guest trap.
    95     ;
    96     ; When entering the hypervisor we'll always enable MP (for proper wait
    97     ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
    98     ; is taken from the guest OS in order to get proper SSE handling.
    99     ;
    100     ;
    101     ; Actions taken depending on the guest CR0 flags:
    102     ;
    103     ;   3    2    1
    104     ;  TS | EM | MP | FPUInstr | WAIT :: VMM Action
    105     ; ------------------------------------------------------------------------
    106     ;   0 |  0 |  0 | Exec     | Exec :: Clear TS & MP, Save HC, Load GC.
    107     ;   0 |  0 |  1 | Exec     | Exec :: Clear TS, Save HC, Load GC.
    108     ;   0 |  1 |  0 | #NM      | Exec :: Clear TS & MP, Save HC, Load GC;
    109     ;   0 |  1 |  1 | #NM      | Exec :: Clear TS, Save HC, Load GC.
    110     ;   1 |  0 |  0 | #NM      | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
    111     ;   1 |  0 |  1 | #NM      | #NM  :: Go to host taking trap there.
    112     ;   1 |  1 |  0 | #NM      | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
    113     ;   1 |  1 |  1 | #NM      | #NM  :: Go to host taking trap there.
    114 
    115     ;
    116     ; Before taking any of these actions we're checking if we have already
    117     ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
    118     ;
     64        ;
     65        ; Figure out what to do.
     66        ;
     67        ; There are two basic actions:
     68        ;   1. Save host fpu and restore guest fpu.
     69        ;   2. Generate guest trap.
     70        ;
     71        ; When entering the hypervisor we'll always enable MP (for proper wait
     72        ; trapping) and TS (for intercepting all fpu/mmx/sse stuff). The EM flag
     73        ; is taken from the guest OS in order to get proper SSE handling.
     74        ;
     75        ;
     76        ; Actions taken depending on the guest CR0 flags:
     77        ;
     78        ;   3    2    1
     79        ;  TS | EM | MP | FPUInstr | WAIT :: VMM Action
     80        ; ------------------------------------------------------------------------
     81        ;   0 |  0 |  0 | Exec     | Exec :: Clear TS & MP, Save HC, Load GC.
     82        ;   0 |  0 |  1 | Exec     | Exec :: Clear TS, Save HC, Load GC.
     83        ;   0 |  1 |  0 | #NM      | Exec :: Clear TS & MP, Save HC, Load GC;
     84        ;   0 |  1 |  1 | #NM      | Exec :: Clear TS, Save HC, Load GC.
     85        ;   1 |  0 |  0 | #NM      | Exec :: Clear MP, Save HC, Load GC. (EM is already cleared.)
     86        ;   1 |  0 |  1 | #NM      | #NM  :: Go to host taking trap there.
     87        ;   1 |  1 |  0 | #NM      | Exec :: Clear MP, Save HC, Load GC. (EM is already set.)
     88        ;   1 |  1 |  1 | #NM      | #NM  :: Go to host taking trap there.
     89
     90        ;
     91        ; Before taking any of these actions we're checking if we have already
     92        ; loaded the GC FPU. Because if we have, this is an trap for the guest - raw ring-3.
     93        ;
    11994%ifdef RT_ARCH_AMD64
    12095 %ifdef RT_OS_WINDOWS
    121     mov     xDX, rcx
     96        mov     xDX, rcx
    12297 %else
    123     mov     xDX, rdi
     98        mov     xDX, rdi
    12499 %endif
    125100%else
    126     mov     xDX, dword [esp + 4]
    127 %endif
    128     test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
    129     jz      hlfpua_not_loaded
    130     jmp     hlfpua_to_host
     101        mov     xDX, dword [esp + 4]
     102%endif
     103        test    dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU
     104        jz      hlfpua_not_loaded
     105        jmp     hlfpua_to_host
    131106
    132107    ;
     
    135110align 16
    136111hlfpua_not_loaded:
    137     mov     eax, [xDX + CPUMCPU.Guest.cr0]
    138     and     eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
     112        mov     eax, [xDX + CPUMCPU.Guest.cr0]
     113        and     eax, X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
    139114%ifdef RT_ARCH_AMD64
    140     lea     r8, [hlfpuajmp1 wrt rip]
    141     jmp     qword [rax*4 + r8]
     115        lea     r8, [hlfpuajmp1 wrt rip]
     116        jmp     qword [rax*4 + r8]
    142117%else
    143     jmp     dword [eax*2 + hlfpuajmp1]
     118        jmp     dword [eax*2 + hlfpuajmp1]
    144119%endif
    145120align 16
    146121;; jump table using fpu related cr0 flags as index.
    147122hlfpuajmp1:
    148     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    149     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    150     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    151     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    152     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    153     RTCCPTR_DEF hlfpua_to_host
    154     RTCCPTR_DEF hlfpua_switch_fpu_ctx
    155     RTCCPTR_DEF hlfpua_to_host
     123        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     124        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     125        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     126        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     127        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     128        RTCCPTR_DEF hlfpua_to_host
     129        RTCCPTR_DEF hlfpua_switch_fpu_ctx
     130        RTCCPTR_DEF hlfpua_to_host
    156131;; and mask for cr0.
    157132hlfpu_afFlags:
    158     RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
    159     RTCCPTR_DEF ~(X86_CR0_TS)
    160     RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
    161     RTCCPTR_DEF ~(X86_CR0_TS)
    162     RTCCPTR_DEF ~(X86_CR0_MP)
    163     RTCCPTR_DEF 0
    164     RTCCPTR_DEF ~(X86_CR0_MP)
    165     RTCCPTR_DEF 0
    166 
    167     ;
    168     ; Action - switch FPU context and change cr0 flags.
    169     ;
     133        RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
     134        RTCCPTR_DEF ~(X86_CR0_TS)
     135        RTCCPTR_DEF ~(X86_CR0_TS | X86_CR0_MP)
     136        RTCCPTR_DEF ~(X86_CR0_TS)
     137        RTCCPTR_DEF ~(X86_CR0_MP)
     138        RTCCPTR_DEF 0
     139        RTCCPTR_DEF ~(X86_CR0_MP)
     140        RTCCPTR_DEF 0
     141
     142        ;
     143        ; Action - switch FPU context and change cr0 flags.
     144        ;
    170145align 16
    171146hlfpua_switch_fpu_ctx:
    172     ; Paranoia. This function was previously used in ring-0, not any longer.
     147        ; Paranoia. This function was previously used in ring-0, not any longer.
    173148%ifdef IN_RING3
    174149%error "This function is not written for ring-3"
     
    178153%endif
    179154
    180     mov     xCX, cr0
     155        mov     xCX, cr0
    181156%ifdef RT_ARCH_AMD64
    182     lea     r8, [hlfpu_afFlags wrt rip]
    183     and     rcx, [rax*4 + r8]                   ; calc the new cr0 flags.
     157        lea     r8, [hlfpu_afFlags wrt rip]
     158        and     rcx, [rax*4 + r8]       ; calc the new cr0 flags.
    184159%else
    185     and     ecx, [eax*2 + hlfpu_afFlags]        ; calc the new cr0 flags.
    186 %endif
    187     mov     xAX, cr0
    188     and     xAX, ~(X86_CR0_TS | X86_CR0_EM)
    189     mov     cr0, xAX                            ; clear flags so we don't trap here.
     160        and     ecx, [eax*2 + hlfpu_afFlags] ; calc the new cr0 flags.
     161%endif
     162        mov     xAX, cr0
     163        and     xAX, ~(X86_CR0_TS | X86_CR0_EM)
     164        mov     cr0, xAX                ; clear flags so we don't trap here.
    190165%ifndef RT_ARCH_AMD64
    191     mov     eax, edx                            ; Calculate the PCPUM pointer
    192     sub     eax, [edx + CPUMCPU.offCPUM]
    193     test    dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
    194     jz short hlfpua_no_fxsave
     166        mov     eax, edx                ; Calculate the PCPUM pointer
     167        sub     eax, [edx + CPUMCPU.offCPUM]
     168        test    dword [eax + CPUM.CPUFeatures.edx], X86_CPUID_FEATURE_EDX_FXSR
     169        jz short hlfpua_no_fxsave
    195170%endif
    196171
    197172%ifdef RT_ARCH_AMD64
    198     ; Use explicit REX prefix. See @bugref{6398}.
    199     o64 fxsave  [xDX + CPUMCPU.Host.XState]
     173        ; Use explicit REX prefix. See @bugref{6398}.
     174        o64 fxsave  [xDX + CPUMCPU.Host.XState]
    200175%else
    201     fxsave  [xDX + CPUMCPU.Host.XState]
    202 %endif
    203     or      dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
    204     fxrstor [xDX + CPUMCPU.Guest.XState]        ; raw-mode guest is always 32-bit. See @bugref{7138}.
     176        fxsave  [xDX + CPUMCPU.Host.XState]
     177%endif
     178        or      dword [xDX + CPUMCPU.fUseFlags], (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM)
     179        fxrstor [xDX + CPUMCPU.Guest.XState] ; raw-mode guest is always 32-bit. See @bugref{7138}.
    205180
    206181hlfpua_finished_switch:
    207182
    208     ; Load new CR0 value.
    209     ;; @todo Optimize the many unconditional CR0 writes.
    210     mov     cr0, xCX                            ; load the new cr0 flags.
    211 
    212     ; return continue execution.
    213     xor     eax, eax
    214     ret
     183        ; Load new CR0 value.
     184        ;; @todo Optimize the many unconditional CR0 writes.
     185        mov     cr0, xCX                ; load the new cr0 flags.
     186
     187        ; return continue execution.
     188        xor     eax, eax
     189        ret
    215190
    216191%ifndef RT_ARCH_AMD64
    217192; legacy support.
    218193hlfpua_no_fxsave:
    219     fnsave  [xDX + CPUMCPU.Host.XState]
    220     or      dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
    221     mov     eax, [xDX + CPUMCPU.Guest.XState]   ; control word
    222     not     eax                                 ; 1 means exception ignored (6 LS bits)
    223     and     eax, byte 03Fh                      ; 6 LS bits only
    224     test    eax, [xDX + CPUMCPU.Guest.XState + 4] ; status word
    225     jz short hlfpua_no_exceptions_pending
    226     ; technically incorrect, but we certainly don't want any exceptions now!!
    227     and     dword [xDX + CPUMCPU.Guest.XState + 4], ~03Fh
     194        fnsave  [xDX + CPUMCPU.Host.XState]
     195        or      dword [xDX + CPUMCPU.fUseFlags], dword (CPUM_USED_FPU | CPUM_USED_FPU_SINCE_REM) ; yasm / nasm
     196        mov     eax, [xDX + CPUMCPU.Guest.XState]   ; control word
     197        not     eax                                 ; 1 means exception ignored (6 LS bits)
     198        and     eax, byte 03Fh                      ; 6 LS bits only
     199        test    eax, [xDX + CPUMCPU.Guest.XState + 4] ; status word
     200        jz short hlfpua_no_exceptions_pending
     201        ; technically incorrect, but we certainly don't want any exceptions now!!
     202        and     dword [xDX + CPUMCPU.Guest.XState + 4], ~03Fh
    228203hlfpua_no_exceptions_pending:
    229     frstor  [xDX + CPUMCPU.Guest.XState]
    230     jmp near hlfpua_finished_switch
     204        frstor  [xDX + CPUMCPU.Guest.XState]
     205        jmp near hlfpua_finished_switch
    231206%endif ; !RT_ARCH_AMD64
    232207
    233208
    234     ;
    235     ; Action - Generate Guest trap.
    236     ;
     209        ;
     210        ; Action - Generate Guest trap.
     211        ;
    237212hlfpua_action_4:
    238213hlfpua_to_host:
    239     mov     eax, VINF_EM_RAW_GUEST_TRAP
    240     ret
     214        mov     eax, VINF_EM_RAW_GUEST_TRAP
     215        ret
    241216ENDPROC     cpumHandleLazyFPUAsm
    242217
     
    258233align 16
    259234BEGINPROC_EXPORTED CPUMGCCallGuestTrapHandler
    260     mov     ebp, esp
    261 
    262     ; construct iret stack frame
    263     push    dword [ebp + 20]                ; SS
    264     push    dword [ebp + 24]                ; ESP
    265     push    dword [ebp + 16]                ; EFLAGS
    266     push    dword [ebp + 8]                 ; CS
    267     push    dword [ebp + 12]                ; EIP
    268 
    269     ;
    270     ; enable WP
    271     ;
     235        mov     ebp, esp
     236
     237        ; construct iret stack frame
     238        push    dword [ebp + 20]        ; SS
     239        push    dword [ebp + 24]        ; ESP
     240        push    dword [ebp + 16]        ; EFLAGS
     241        push    dword [ebp + 8]         ; CS
     242        push    dword [ebp + 12]        ; EIP
     243
     244        ;
     245        ; enable WP
     246        ;
    272247%ifdef ENABLE_WRITE_PROTECTION
    273     mov     eax, cr0
    274     or      eax, X86_CR0_WRITE_PROTECT
    275     mov     cr0, eax
    276 %endif
    277 
    278     ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
    279     mov     ebp, [ebp + 4]                  ; pRegFrame
    280     mov     ebx, [ebp + CPUMCTXCORE.ebx]
    281     mov     ecx, [ebp + CPUMCTXCORE.ecx]
    282     mov     edx, [ebp + CPUMCTXCORE.edx]
    283     mov     esi, [ebp + CPUMCTXCORE.esi]
    284     mov     edi, [ebp + CPUMCTXCORE.edi]
    285 
    286     ;; @todo  load segment registers *before* enabling WP.
    287     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
    288     mov     gs, [ebp + CPUMCTXCORE.gs.Sel]
    289     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
    290     mov     fs, [ebp + CPUMCTXCORE.fs.Sel]
    291     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
    292     mov     es, [ebp + CPUMCTXCORE.es.Sel]
    293     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
    294     mov     ds, [ebp + CPUMCTXCORE.ds.Sel]
    295 
    296     mov     eax, [ebp + CPUMCTXCORE.eax]
    297     mov     ebp, [ebp + CPUMCTXCORE.ebp]
    298 
    299     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
    300     iret
     248        mov     eax, cr0
     249        or      eax, X86_CR0_WRITE_PROTECT
     250        mov     cr0, eax
     251%endif
     252
     253        ; restore CPU context (all except cs, eip, ss, esp & eflags; which are restored or overwritten by iret)
     254        mov     ebp, [ebp + 4]          ; pRegFrame
     255        mov     ebx, [ebp + CPUMCTXCORE.ebx]
     256        mov     ecx, [ebp + CPUMCTXCORE.ecx]
     257        mov     edx, [ebp + CPUMCTXCORE.edx]
     258        mov     esi, [ebp + CPUMCTXCORE.esi]
     259        mov     edi, [ebp + CPUMCTXCORE.edi]
     260
     261        ;; @todo  load segment registers *before* enabling WP.
     262        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS | CPUM_HANDLER_CTXCORE_IN_EBP
     263        mov     gs, [ebp + CPUMCTXCORE.gs.Sel]
     264        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS | CPUM_HANDLER_CTXCORE_IN_EBP
     265        mov     fs, [ebp + CPUMCTXCORE.fs.Sel]
     266        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES | CPUM_HANDLER_CTXCORE_IN_EBP
     267        mov     es, [ebp + CPUMCTXCORE.es.Sel]
     268        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS | CPUM_HANDLER_CTXCORE_IN_EBP
     269        mov     ds, [ebp + CPUMCTXCORE.ds.Sel]
     270
     271        mov     eax, [ebp + CPUMCTXCORE.eax]
     272        mov     ebp, [ebp + CPUMCTXCORE.ebp]
     273
     274        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
     275        iret
    301276ENDPROC CPUMGCCallGuestTrapHandler
    302277
     
    313288align 16
    314289BEGINPROC CPUMGCCallV86Code
    315     mov     ebp, [esp + 4]                  ; pRegFrame
    316 
    317     ; construct iret stack frame
    318     push    dword [ebp + CPUMCTXCORE.gs.Sel]
    319     push    dword [ebp + CPUMCTXCORE.fs.Sel]
    320     push    dword [ebp + CPUMCTXCORE.ds.Sel]
    321     push    dword [ebp + CPUMCTXCORE.es.Sel]
    322     push    dword [ebp + CPUMCTXCORE.ss.Sel]
    323     push    dword [ebp + CPUMCTXCORE.esp]
    324     push    dword [ebp + CPUMCTXCORE.eflags]
    325     push    dword [ebp + CPUMCTXCORE.cs.Sel]
    326     push    dword [ebp + CPUMCTXCORE.eip]
    327 
    328     ;
    329     ; enable WP
    330     ;
     290        mov     ebp, [esp + 4]          ; pRegFrame
     291
     292        ; construct iret stack frame
     293        push    dword [ebp + CPUMCTXCORE.gs.Sel]
     294        push    dword [ebp + CPUMCTXCORE.fs.Sel]
     295        push    dword [ebp + CPUMCTXCORE.ds.Sel]
     296        push    dword [ebp + CPUMCTXCORE.es.Sel]
     297        push    dword [ebp + CPUMCTXCORE.ss.Sel]
     298        push    dword [ebp + CPUMCTXCORE.esp]
     299        push    dword [ebp + CPUMCTXCORE.eflags]
     300        push    dword [ebp + CPUMCTXCORE.cs.Sel]
     301        push    dword [ebp + CPUMCTXCORE.eip]
     302
     303        ;
     304        ; enable WP
     305        ;
    331306%ifdef ENABLE_WRITE_PROTECTION
    332     mov     eax, cr0
    333     or      eax, X86_CR0_WRITE_PROTECT
    334     mov     cr0, eax
    335 %endif
    336 
    337     ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
    338     mov     eax, [ebp + CPUMCTXCORE.eax]
    339     mov     ebx, [ebp + CPUMCTXCORE.ebx]
    340     mov     ecx, [ebp + CPUMCTXCORE.ecx]
    341     mov     edx, [ebp + CPUMCTXCORE.edx]
    342     mov     esi, [ebp + CPUMCTXCORE.esi]
    343     mov     edi, [ebp + CPUMCTXCORE.edi]
    344     mov     ebp, [ebp + CPUMCTXCORE.ebp]
    345 
    346     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
    347     iret
     307        mov     eax, cr0
     308        or      eax, X86_CR0_WRITE_PROTECT
     309        mov     cr0, eax
     310%endif
     311
     312        ; restore CPU context (all except cs, eip, ss, esp, eflags, ds, es, fs & gs; which are restored or overwritten by iret)
     313        mov     eax, [ebp + CPUMCTXCORE.eax]
     314        mov     ebx, [ebp + CPUMCTXCORE.ebx]
     315        mov     ecx, [ebp + CPUMCTXCORE.ecx]
     316        mov     edx, [ebp + CPUMCTXCORE.edx]
     317        mov     esi, [ebp + CPUMCTXCORE.esi]
     318        mov     edi, [ebp + CPUMCTXCORE.edi]
     319        mov     ebp, [ebp + CPUMCTXCORE.ebp]
     320
     321        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
     322        iret
    348323ENDPROC CPUMGCCallV86Code
    349324
     
    363338BEGINPROC_EXPORTED CPUMGCResumeGuest
    364339%ifdef VBOX_STRICT
    365     ; Call CPUM to check sanity.
    366     push    edx
    367     mov     edx, IMP(g_VM)
    368     push    edx
    369     call    NAME(CPUMRCAssertPreExecutionSanity)
    370     add     esp, 4
    371     pop     edx
    372 %endif
    373 
    374     ;
    375     ; Setup iretd
    376     ;
    377     push    dword [edx + CPUMCPU.Guest.ss.Sel]
    378     push    dword [edx + CPUMCPU.Guest.esp]
    379     push    dword [edx + CPUMCPU.Guest.eflags]
    380     push    dword [edx + CPUMCPU.Guest.cs.Sel]
    381     push    dword [edx + CPUMCPU.Guest.eip]
    382 
    383     ;
    384     ; Restore registers.
    385     ;
    386     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
    387     mov     es,  [edx + CPUMCPU.Guest.es.Sel]
    388     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
    389     mov     fs,  [edx + CPUMCPU.Guest.fs.Sel]
    390     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
    391     mov     gs,  [edx + CPUMCPU.Guest.gs.Sel]
     340        ; Call CPUM to check sanity.
     341        push    edx
     342        mov     edx, IMP(g_VM)
     343        push    edx
     344        call    NAME(CPUMRCAssertPreExecutionSanity)
     345        add     esp, 4
     346        pop     edx
     347%endif
     348
     349        ;
     350        ; Setup iretd
     351        ;
     352        push    dword [edx + CPUMCPU.Guest.ss.Sel]
     353        push    dword [edx + CPUMCPU.Guest.esp]
     354        push    dword [edx + CPUMCPU.Guest.eflags]
     355        push    dword [edx + CPUMCPU.Guest.cs.Sel]
     356        push    dword [edx + CPUMCPU.Guest.eip]
     357
     358        ;
     359        ; Restore registers.
     360        ;
     361        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_ES
     362        mov     es,  [edx + CPUMCPU.Guest.es.Sel]
     363        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_FS
     364        mov     fs,  [edx + CPUMCPU.Guest.fs.Sel]
     365        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_GS
     366        mov     gs,  [edx + CPUMCPU.Guest.gs.Sel]
    392367
    393368%ifdef VBOX_WITH_STATISTICS
    394     ;
    395     ; Statistics.
    396     ;
    397     push    edx
    398     mov     edx, IMP(g_VM)
    399     lea     edx, [edx + VM.StatTotalQemuToGC]
    400     STAM_PROFILE_ADV_STOP edx
    401 
    402     mov     edx, IMP(g_VM)
    403     lea     edx, [edx + VM.StatTotalInGC]
    404     STAM_PROFILE_ADV_START edx
    405     pop     edx
    406 %endif
    407 
    408     ;
    409     ; enable WP
    410     ;
     369        ;
     370        ; Statistics.
     371        ;
     372        push    edx
     373        mov     edx, IMP(g_VM)
     374        lea     edx, [edx + VM.StatTotalQemuToGC]
     375        STAM_PROFILE_ADV_STOP edx
     376
     377        mov     edx, IMP(g_VM)
     378        lea     edx, [edx + VM.StatTotalInGC]
     379        STAM_PROFILE_ADV_START edx
     380        pop     edx
     381%endif
     382
     383        ;
     384        ; enable WP
     385        ;
    411386%ifdef ENABLE_WRITE_PROTECTION
    412     mov     eax, cr0
    413     or      eax, X86_CR0_WRITE_PROTECT
    414     mov     cr0, eax
    415 %endif
    416 
    417     ;
    418     ; Continue restore.
    419     ;
    420     mov     esi, [edx + CPUMCPU.Guest.esi]
    421     mov     edi, [edx + CPUMCPU.Guest.edi]
    422     mov     ebp, [edx + CPUMCPU.Guest.ebp]
    423     mov     ebx, [edx + CPUMCPU.Guest.ebx]
    424     mov     ecx, [edx + CPUMCPU.Guest.ecx]
    425     mov     eax, [edx + CPUMCPU.Guest.eax]
    426     push    dword [edx + CPUMCPU.Guest.ds.Sel]
    427     mov     edx, [edx + CPUMCPU.Guest.edx]
    428     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
    429     pop     ds
    430 
    431     ; restart execution.
    432     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
    433     iretd
     387        mov     eax, cr0
     388        or      eax, X86_CR0_WRITE_PROTECT
     389        mov     cr0, eax
     390%endif
     391
     392        ;
     393        ; Continue restore.
     394        ;
     395        mov     esi, [edx + CPUMCPU.Guest.esi]
     396        mov     edi, [edx + CPUMCPU.Guest.edi]
     397        mov     ebp, [edx + CPUMCPU.Guest.ebp]
     398        mov     ebx, [edx + CPUMCPU.Guest.ebx]
     399        mov     ecx, [edx + CPUMCPU.Guest.ecx]
     400        mov     eax, [edx + CPUMCPU.Guest.eax]
     401        push    dword [edx + CPUMCPU.Guest.ds.Sel]
     402        mov     edx, [edx + CPUMCPU.Guest.edx]
     403        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_DS
     404        pop     ds
     405
     406        ; restart execution.
     407        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
     408        iretd
    434409ENDPROC     CPUMGCResumeGuest
    435410
     
    449424BEGINPROC_EXPORTED CPUMGCResumeGuestV86
    450425%ifdef VBOX_STRICT
    451     ; Call CPUM to check sanity.
    452     push    edx
    453     mov     edx, IMP(g_VM)
    454     push    edx
    455     call    NAME(CPUMRCAssertPreExecutionSanity)
    456     add     esp, 4
    457     pop     edx
    458 %endif
    459 
    460     ;
    461     ; Setup iretd
    462     ;
    463     push    dword [edx + CPUMCPU.Guest.gs.Sel]
    464     push    dword [edx + CPUMCPU.Guest.fs.Sel]
    465     push    dword [edx + CPUMCPU.Guest.ds.Sel]
    466     push    dword [edx + CPUMCPU.Guest.es.Sel]
    467 
    468     push    dword [edx + CPUMCPU.Guest.ss.Sel]
    469     push    dword [edx + CPUMCPU.Guest.esp]
    470 
    471     push    dword [edx + CPUMCPU.Guest.eflags]
    472     push    dword [edx + CPUMCPU.Guest.cs.Sel]
    473     push    dword [edx + CPUMCPU.Guest.eip]
    474 
    475     ;
    476     ; Restore registers.
    477     ;
     426        ; Call CPUM to check sanity.
     427        push    edx
     428        mov     edx, IMP(g_VM)
     429        push    edx
     430        call    NAME(CPUMRCAssertPreExecutionSanity)
     431        add     esp, 4
     432        pop     edx
     433%endif
     434
     435        ;
     436        ; Setup iretd
     437        ;
     438        push    dword [edx + CPUMCPU.Guest.gs.Sel]
     439        push    dword [edx + CPUMCPU.Guest.fs.Sel]
     440        push    dword [edx + CPUMCPU.Guest.ds.Sel]
     441        push    dword [edx + CPUMCPU.Guest.es.Sel]
     442
     443        push    dword [edx + CPUMCPU.Guest.ss.Sel]
     444        push    dword [edx + CPUMCPU.Guest.esp]
     445
     446        push    dword [edx + CPUMCPU.Guest.eflags]
     447        push    dword [edx + CPUMCPU.Guest.cs.Sel]
     448        push    dword [edx + CPUMCPU.Guest.eip]
     449
     450        ;
     451        ; Restore registers.
     452        ;
    478453
    479454%ifdef VBOX_WITH_STATISTICS
    480     ;
    481     ; Statistics.
    482     ;
    483     push    edx
    484     mov     edx, IMP(g_VM)
    485     lea     edx, [edx + VM.StatTotalQemuToGC]
    486     STAM_PROFILE_ADV_STOP edx
    487 
    488     mov     edx, IMP(g_VM)
    489     lea     edx, [edx + VM.StatTotalInGC]
    490     STAM_PROFILE_ADV_START edx
    491     pop     edx
    492 %endif
    493 
    494     ;
    495     ; enable WP
    496     ;
     455        ;
     456        ; Statistics.
     457        ;
     458        push    edx
     459        mov     edx, IMP(g_VM)
     460        lea     edx, [edx + VM.StatTotalQemuToGC]
     461        STAM_PROFILE_ADV_STOP edx
     462
     463        mov     edx, IMP(g_VM)
     464        lea     edx, [edx + VM.StatTotalInGC]
     465        STAM_PROFILE_ADV_START edx
     466        pop     edx
     467%endif
     468
     469        ;
     470        ; enable WP
     471        ;
    497472%ifdef ENABLE_WRITE_PROTECTION
    498     mov     eax, cr0
    499     or      eax, X86_CR0_WRITE_PROTECT
    500     mov     cr0, eax
    501 %endif
    502 
    503     ;
    504     ; Continue restore.
    505     ;
    506     mov     esi, [edx + CPUMCPU.Guest.esi]
    507     mov     edi, [edx + CPUMCPU.Guest.edi]
    508     mov     ebp, [edx + CPUMCPU.Guest.ebp]
    509     mov     ecx, [edx + CPUMCPU.Guest.ecx]
    510     mov     ebx, [edx + CPUMCPU.Guest.ebx]
    511     mov     eax, [edx + CPUMCPU.Guest.eax]
    512     mov     edx, [edx + CPUMCPU.Guest.edx]
    513 
    514     ; restart execution.
    515     TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
    516     iretd
     473        mov     eax, cr0
     474        or      eax, X86_CR0_WRITE_PROTECT
     475        mov     cr0, eax
     476%endif
     477
     478        ;
     479        ; Continue restore.
     480        ;
     481        mov     esi, [edx + CPUMCPU.Guest.esi]
     482        mov     edi, [edx + CPUMCPU.Guest.edi]
     483        mov     ebp, [edx + CPUMCPU.Guest.ebp]
     484        mov     ecx, [edx + CPUMCPU.Guest.ecx]
     485        mov     ebx, [edx + CPUMCPU.Guest.ebx]
     486        mov     eax, [edx + CPUMCPU.Guest.eax]
     487        mov     edx, [edx + CPUMCPU.Guest.edx]
     488
     489        ; restart execution.
     490        TRPM_NP_GP_HANDLER NAME(cpumRCHandleNPAndGP), CPUM_HANDLER_IRET
     491        iretd
    517492ENDPROC     CPUMGCResumeGuestV86
    518493
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette