VirtualBox

Changeset 14267 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Nov 17, 2008 7:06:30 PM (16 years ago)
Author:
vboxsync
Message:

current 32 bit mode switcher (not working)

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm

    r14234 r14267  
    5454GLOBALNAME Start
    5555
    56 %ifndef VBOX_WITH_HYBIRD_32BIT_KERNEL
    57 BITS 64
     56BITS 32
    5857
    5958;;
    6059; The C interface.
    6160;
    62 ; @param    pVM  GCC: rdi  MSC:rcx  The VM handle.
    63 ;
    6461BEGINPROC vmmR0HostToGuest
    65 %ifdef DEBUG_STUFF
    66     COM64_S_NEWLINE
    67     COM64_S_CHAR '^'
    68 %endif
    69     ;
    70     ; The ordinary version of the code.
    71     ;
    72 
    73  %ifdef STRICT_IF
    74     pushf
    75     pop     rax
    76     test    eax, X86_EFL_IF
    77     jz      .if_clear_in
    78     mov     eax, 0c0ffee00h
    79     ret
    80 .if_clear_in:
    81  %endif
    82 
    83     ;
    84     ; make r9 = pVM and rdx = pCpum.
    85     ; rax, rcx and r8 are scratch here after.
    86  %ifdef RT_OS_WINDOWS
    87     mov     r9, rcx
    88  %else
    89     mov     r9, rdi
    90  %endif
    91     lea     rdx, [r9 + VM.cpum]
    92 
    93  %ifdef VBOX_WITH_STATISTICS
    94     ;
    95     ; Switcher stats.
    96     ;
    97     lea     r8, [r9 + VM.StatSwitcherToGC]
    98     STAM64_PROFILE_ADV_START r8
    99  %endif
    100 
    101     ;
    102     ; Call worker (far return).
    103     ;
    104     mov     eax, cs
    105     push    rax
    106     call    NAME(vmmR0HostToGuestAsm)
    107 
    108  %ifdef VBOX_WITH_STATISTICS
    109     ;
    110     ; Switcher stats.
    111     ;
    112     lea     r8, [r9 + VM.StatSwitcherToGC]
    113     STAM64_PROFILE_ADV_STOP r8
    114  %endif
    115 
    116     ret
    117 ENDPROC vmmR0HostToGuest
    118 
    119 
    120 %else ; VBOX_WITH_HYBIRD_32BIT_KERNEL
    121 
    122 
    123 BITS 32
    124 
    125 ;;
    126 ; The C interface.
    127 ;
    128 BEGINPROC vmmR0HostToGuest
    129 %ifdef DEBUG_STUFF
     62 %ifdef DEBUG_STUFF
    13063    COM32_S_NEWLINE
    13164    COM32_S_CHAR '^'
    132 %endif
     65 %endif
    13366
    13467 %ifdef VBOX_WITH_STATISTICS
     
    14174 %endif
    14275
    143     ; Thunk to/from 64 bit when invoking the worker routine.
    144     ;
    145     FIXUP FIX_HC_VM_OFF, 1, VM.cpum
     76    ;
     77    ; Call worker.
     78    ;
     79    FIXUP FIX_HC_CPUM_OFF, 1, 0
    14680    mov     edx, 0ffffffffh
    147 
    148     push    0
    149     push    cs
    150     push    0
    151     FIXUP FIX_HC_32BIT, 1, .vmmR0HostToGuestReturn - NAME(Start)
    152     push    0ffffffffh
    153 
    154     FIXUP FIX_HC_64BIT_CS, 1
    155     push    0ffffh
    156     FIXUP FIX_HC_32BIT, 1, NAME(vmmR0HostToGuestAsm) - NAME(Start)
    157     push    0ffffffffh
    158     retf
    159 .vmmR0HostToGuestReturn:
    160 
    161     ;
    162     ; This selector reloading is probably not necessary, but we do it anyway to be quite sure
    163     ; the CPU has the right idea about the selectors.
    164     ;
    165     mov     edx, ds
    166     mov     ds, edx
    167     mov     ecx, es
    168     mov     es, ecx
    169     mov     edx, ss
    170     mov     ss, edx
    171 
    172  %ifdef VBOX_WITH_STATISTICS
     81    push    cs                          ; allow for far return and restore cs correctly.
     82    call    NAME(vmmR0HostToGuestAsm)
     83
     84%ifdef VBOX_WITH_STATISTICS
    17385    ;
    17486    ; Switcher stats.
     
    17789    mov     edx, 0ffffffffh
    17890    STAM_PROFILE_ADV_STOP edx
    179  %endif
     91%endif
    18092
    18193    ret
     94   
    18295ENDPROC vmmR0HostToGuest
    183 
    184 BITS 64
    185 %endif ;!VBOX_WITH_HYBIRD_32BIT_KERNEL
    186 
    187 
    18896
    18997; *****************************************************************************
     
    196104;
    197105; USES/DESTROYS:
    198 ;       - eax, ecx, edx, r8
     106;       - eax, ecx, edx, esi
    199107;
    200108; ASSUMPTION:
     
    204112ALIGNCODE(16)
    205113BEGINPROC vmmR0HostToGuestAsm
    206     mov     cr3, rax
    207     DEBUG_CHAR('2')                     ; trashes esi
    208 
    209     ;;
    210     ;; 1. Switch to compatibility mode, placing ourselves in identity mapped code.
    211     ;;
    212     jmp far [NAME(fpIDEnterTarget) wrt rip]
    213 
    214 ; 16:32 Pointer to IDEnterTarget.
    215 NAME(fpIDEnterTarget):
    216     FIXUP FIX_ID_32BIT, 0, NAME(IDEnterTarget) - NAME(Start)
    217 dd  0
    218     FIXUP FIX_HYPER_CS, 0
    219 dd  0
    220 
    221 
    222     ; We're now on an identity mapped pages! in 32-bit compatability mode.
    223 BITS 32
     114    ;;
     115    ;; Save CPU host context
     116    ;;      Skip eax, edx and ecx as these are not preserved over calls.
     117    ;;
     118    CPUMCPU_FROM_CPUM(edx)
     119    ; general registers.
     120    mov     [edx + CPUMCPU.Host.ebx], ebx
     121    mov     [edx + CPUMCPU.Host.edi], edi
     122    mov     [edx + CPUMCPU.Host.esi], esi
     123    mov     [edx + CPUMCPU.Host.esp], esp
     124    mov     [edx + CPUMCPU.Host.ebp], ebp
     125    ; selectors.
     126    mov     [edx + CPUMCPU.Host.ds], ds
     127    mov     [edx + CPUMCPU.Host.es], es
     128    mov     [edx + CPUMCPU.Host.fs], fs
     129    mov     [edx + CPUMCPU.Host.gs], gs
     130    mov     [edx + CPUMCPU.Host.ss], ss
     131    ; special registers.
     132    sldt    [edx + CPUMCPU.Host.ldtr]
     133    sidt    [edx + CPUMCPU.Host.idtr]
     134    sgdt    [edx + CPUMCPU.Host.gdtr]
     135    str     [edx + CPUMCPU.Host.tr]
     136    ; flags
     137    pushfd
     138    pop     dword [edx + CPUMCPU.Host.eflags]
     139
     140    FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter.
     141    ; save MSR_IA32_SYSENTER_CS register.
     142    mov     ecx, MSR_IA32_SYSENTER_CS
     143    mov     ebx, edx                    ; save edx
     144    rdmsr                               ; edx:eax <- MSR[ecx]
     145    mov     [ebx + CPUMCPU.Host.SysEnter.cs], eax
     146    mov     [ebx + CPUMCPU.Host.SysEnter.cs + 4], edx
     147    xor     eax, eax                    ; load 0:0 to cause #GP upon sysenter
     148    xor     edx, edx
     149    wrmsr
     150    xchg    ebx, edx                    ; restore edx
     151    jmp short htg_no_sysenter
     152
     153ALIGNCODE(16)
     154htg_no_sysenter:
     155
     156    ;; handle use flags.
     157    mov     esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
     158    and     esi, ~CPUM_USED_FPU         ; Clear CPUM_USED_* flags. ;;@todo FPU check can be optimized to use cr0 flags!
     159    mov     [edx + CPUMCPU.fUseFlags], esi
     160
     161    ; debug registers.
     162    test    esi, CPUM_USE_DEBUG_REGS | CPUM_USE_DEBUG_REGS_HOST
     163    jz      htg_debug_regs_no
     164    jmp     htg_debug_regs_save_dr7and6
     165htg_debug_regs_no:
     166
     167    ; control registers.
     168    mov     eax, cr0
     169    mov     [edx + CPUMCPU.Host.cr0], eax
     170    ;mov     eax, cr2                   ; assume host os don't stuff things in cr2. (safe)
     171    ;mov     [edx + CPUMCPU.Host.cr2], eax
     172    mov     eax, cr3
     173    mov     [edx + CPUMCPU.Host.cr3], eax
     174    mov     eax, cr4
     175    mov     [edx + CPUMCPU.Host.cr4], eax
     176
     177    ;;
     178    ;; Load Intermediate memory context.
     179    ;;
     180    FIXUP FIX_INTER_32BIT_CR3, 1
     181    mov     eax, 0ffffffffh
     182    mov     cr3, eax
     183    DEBUG_CHAR('?')
     184
     185    ;;
     186    ;; Jump to identity mapped location
     187    ;;
     188    FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDEnterTarget) - NAME(Start)
     189    jmp near NAME(IDEnterTarget)
     190
     191       
     192    ; We're now on identity mapped pages!
    224193ALIGNCODE(16)
    225194GLOBALNAME IDEnterTarget
    226     DEBUG_CHAR('3')
    227 
    228     ; 2. Deactivate long mode by turning off paging.
     195    DEBUG_CHAR('2')
     196       
     197    ; 1. Disable paging.
    229198    mov     ebx, cr0
    230199    and     ebx, ~X86_CR0_PG
    231200    mov     cr0, ebx
    232     DEBUG_CHAR('4')
    233 
    234     ; 3. Load 32-bit intermediate page table.
    235     FIXUP FIX_INTER_PAE_CR3, 1
    236     mov     edx, 0ffffffffh
    237     mov     cr3, edx
    238 
    239     ; 4. Disable long mode.
    240     ;    We also use the chance to disable syscall/sysret and fast fxsave/fxrstor.
     201    DEBUG_CHAR('2')
     202
     203    ; 2. Enable PAE.
     204    mov     ecx, cr4
     205    or      ecx, X86_CR4_PAE
     206    mov     cr4, ecx
     207
     208    ; 3. Load long mode intermediate CR3.
     209    FIXUP FIX_INTER_AMD64_CR3, 1
     210    mov     ecx, 0ffffffffh
     211    mov     cr3, ecx
     212    DEBUG_CHAR('3')
     213
     214    ; 4. Enable long mode.
     215    mov     ebp, edx
    241216    mov     ecx, MSR_K6_EFER
    242217    rdmsr
    243     DEBUG_CHAR('5')
    244     and     eax, ~(MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)
     218    or      eax, MSR_K6_EFER_LME
    245219    wrmsr
    246     DEBUG_CHAR('6')
     220    mov     edx, ebp
     221    DEBUG_CHAR('4')
    247222
    248223    ; 5. Enable paging.
    249224    or      ebx, X86_CR0_PG
    250225    mov     cr0, ebx
    251     jmp short just_a_jump
    252 just_a_jump:
    253     DEBUG_CHAR('7')
    254 
    255     ;;
    256     ;; 6. Jump to guest code mapping of the code and load the Hypervisor CS.
    257     ;;
    258     FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(JmpGCTarget) - NAME(Start)
    259     jmp near NAME(JmpGCTarget)
    260 
    261 
    262     ;;
    263     ;; When we arrive at this label we're at the
    264     ;; guest code mapping of the switching code.
    265     ;;
    266 ALIGNCODE(16)
    267 GLOBALNAME JmpGCTarget
    268     DEBUG_CHAR('-')
    269     ; load final cr3 and do far jump to load cs.
    270     FIXUP FIX_HYPER_PAE_CR3, 1
    271     mov     eax, 0ffffffffh
    272     mov     cr3, eax
    273     DEBUG_CHAR('0')
    274 
    275     ;;
    276     ;; We're in VMM MMU context and VMM CS is loaded.
    277     ;; Setup the rest of the VMM state.
    278     ;;
    279     ; Load selectors
    280     DEBUG_CHAR('1')
    281     FIXUP FIX_HYPER_DS, 1
    282     mov     eax, 0ffffh
    283     mov     ds, eax
    284     mov     es, eax
    285     xor     eax, eax
    286     mov     gs, eax
    287     mov     fs, eax
    288     ; Load pCpum into EDX
    289     FIXUP FIX_GC_CPUM_OFF, 1, 0
    290     mov     edx, 0ffffffffh
    291     ; Activate guest IDT
    292     DEBUG_CHAR('2')
    293     lidt    [edx + CPUM.Hyper.idtr]
    294 
    295     ; Setup stack; use the lss_esp, ss pair for lss
    296     DEBUG_CHAR('3')
    297     mov     eax, [edx + CPUM.Hyper.esp]
    298     mov     [edx + CPUM.Hyper.lss_esp], eax
    299     lss     esp, [edx + CPUM.Hyper.lss_esp]
    300 
    301     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    302     DEBUG_CHAR('4')
    303     FIXUP FIX_GC_TSS_GDTE_DW2, 2
    304     and     dword [0ffffffffh], ~0200h      ; clear busy flag (2nd type2 bit)
    305226    DEBUG_CHAR('5')
    306     ltr     word [edx + CPUM.Hyper.tr]
     227
     228    ; Jump from compatability mode to 64-bit mode.
     229    FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDEnter64Mode) - NAME(Start)
     230    jmp     0ffffh:0fffffffeh
     231
     232    ;
     233    ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
     234BITS 64
     235ALIGNCODE(16)
     236NAME(IDEnter64Mode):
    307237    DEBUG_CHAR('6')
    308 
    309     ; Activate the ldt (now we can safely crash).
    310     lldt    [edx + CPUM.Hyper.ldtr]
    311     DEBUG_CHAR('7')
    312 
    313     ;; use flags.
    314     mov     esi, [edx + CPUM.ulOffCPUMCPU]
    315     mov     esi, [edx + esi + CPUMCPU.fUseFlags]
    316 
    317     ; debug registers
    318     test    esi, CPUM_USE_DEBUG_REGS
    319     jz      htg_debug_regs_guest_no
    320     jmp     htg_debug_regs_guest
    321 htg_debug_regs_guest_no:
    322     DEBUG_CHAR('9')
    323 
    324     ; General registers.
    325     mov     ebx, [edx + CPUM.Hyper.ebx]
    326     mov     ebp, [edx + CPUM.Hyper.ebp]
    327     mov     esi, [edx + CPUM.Hyper.esi]
    328     mov     edi, [edx + CPUM.Hyper.edi]
    329     push    dword [edx + CPUM.Hyper.eflags]
    330     popfd
    331     DEBUG_CHAR('!')
    332 
    333     ;;
    334     ;; Return to the VMM code which either called the switcher or
    335     ;; the code set up to run by HC.
    336     ;;
    337 %ifdef DEBUG_STUFF
    338     COM32_S_PRINT ';eip='
    339     mov     eax, [edx + CPUM.Hyper.eip]
    340     COM32_S_DWORD_REG eax
    341     COM32_S_CHAR ';'
    342 %endif
    343     mov     eax, [edx + CPUM.Hyper.eip]
    344 %ifdef VBOX_WITH_STATISTICS
    345     FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToGC
    346     mov     edx, 0ffffffffh
    347     STAM32_PROFILE_ADV_STOP edx
    348     FIXUP FIX_GC_CPUM_OFF, 1, 0
    349     mov     edx, 0ffffffffh
    350 %endif
    351     jmp     eax
    352 
    353 ;;
    354 ; Detour for saving host DR0-3 and loading hypervisor debug registers.
     238    jmp     [NAME(pICEnterTarget) wrt rip]
     239
     240; 64-bit jump target
     241NAME(pICEnterTarget):
     242FIXUP FIX_HC_64BIT, 0, NAME(ICEnterTarget) - NAME(Start)
     243dq 0ffffffffffffffffh
     244
     245; 64-bit pCpum address.
     246NAME(pCpumIC):
     247FIXUP FIX_HC_64BIT_CPUM, 0
     248dq 0ffffffffffffffffh
     249
     250    ;
     251    ; When we arrive here we're at the 64 bit mode of intermediate context
     252    ;
     253ALIGNCODE(16)
     254GLOBALNAME ICEnterTarget     
     255    ; at this moment we're in 64-bit mode. let's write something to CPUM
     256    ; Load CPUM pointer into rdx
     257    mov     rdx, [NAME(pCpumIC) wrt rip]
     258    ; Load the CPUMCPU offset.
     259    mov     r8, [rdx + CPUM.ulOffCPUMCPU]
     260       
     261    mov rsi, 012345678h
     262    mov [rdx + r8 + CPUMCPU.uPadding], rsi
     263
     264    ; now let's switch back
     265    mov     rax,  0666h
     266    jmp     NAME(VMMGCGuestToHostAsm)   ; rax = returncode.
     267
     268BITS 32
     269;;
     270; Detour for saving the host DR7 and DR6.
    355271; esi and edx must be preserved.
    356 htg_debug_regs_guest:
    357     DEBUG_S_CHAR('D')
    358     DEBUG_S_CHAR('R')
    359     DEBUG_S_CHAR('x')
    360     ; load hyper DR0-7
    361     mov     ebx, [edx + CPUM.Hyper.dr]
    362     mov     dr0, ebx
    363     mov     ecx, [edx + CPUM.Hyper.dr + 8*1]
    364     mov     dr1, ecx
    365     mov     eax, [edx + CPUM.Hyper.dr + 8*2]
    366     mov     dr2, eax
    367     mov     ebx, [edx + CPUM.Hyper.dr + 8*3]
    368     mov     dr3, ebx
    369     ;mov     eax, [edx + CPUM.Hyper.dr + 8*6]
    370     mov     ecx, 0ffff0ff0h
    371     mov     dr6, ecx
    372     mov     eax, [edx + CPUM.Hyper.dr + 8*7]
     272htg_debug_regs_save_dr7and6:
     273DEBUG_S_CHAR('s');
     274    mov     eax, dr7                    ; not sure, but if I read the docs right this will trap if GD is set. FIXME!!!
     275    mov     [edx + CPUMCPU.Host.dr7], eax
     276    xor     eax, eax                    ; clear everything. (bit 12? is read as 1...)
    373277    mov     dr7, eax
    374     jmp     htg_debug_regs_guest_no
    375 
     278    mov     eax, dr6                    ; just in case we save the state register too.
     279    mov     [edx + CPUMCPU.Host.dr6], eax
     280    jmp     htg_debug_regs_no
     281
     282
     283BITS 64
    376284ENDPROC vmmR0HostToGuestAsm
    377285
     
    393301    COM32_S_CHAR '!'
    394302%endif
    395 
    396     ; call routine
    397     pop     eax                         ; call address
    398     mov     esi, edx                    ; save edx
    399     pop     edi                         ; argument count.
    400 %ifdef DEBUG_STUFF
    401     COM32_S_PRINT ';eax='
    402     COM32_S_DWORD_REG eax
    403     COM32_S_CHAR ';'
    404 %endif
    405     call    eax                         ; do call
    406     add     esp, edi                    ; cleanup stack
    407 
    408     ; return to the host context.
    409     push    byte 0                      ; eip
    410     mov     edx, esi                    ; CPUM pointer
    411 
    412 %ifdef DEBUG_STUFF
    413     COM32_S_CHAR '`'
    414 %endif
    415     jmp     NAME(VMMGCGuestToHostAsm)   ; eax = returncode.
     303    int3
    416304ENDPROC vmmGCCallTrampoline
    417305
    418306
    419 
     307BITS 64
    420308;;
    421309; The C interface.
     
    434322    pop     esi
    435323%endif
    436     mov     eax, [esp + 4]
    437     jmp     NAME(VMMGCGuestToHostAsm)
     324    int3
    438325ENDPROC vmmGCGuestToHost
    439 
    440 
    441 ;;
    442 ; VMMGCGuestToHostAsmGuestCtx
    443 ;
    444 ; Switches from Guest Context to Host Context.
    445 ; Of course it's only called from within the GC.
    446 ;
    447 ; @param    eax     Return code.
    448 ; @param    esp + 4 Pointer to CPUMCTXCORE.
    449 ;
    450 ; @remark   ASSUMES interrupts disabled.
    451 ;
    452 ALIGNCODE(16)
    453 BEGINPROC VMMGCGuestToHostAsmGuestCtx
    454     DEBUG_CHAR('~')
    455 
    456 %ifdef VBOX_WITH_STATISTICS
    457     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
    458     mov     edx, 0ffffffffh
    459     STAM32_PROFILE_ADV_STOP edx
    460 
    461     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
    462     mov     edx, 0ffffffffh
    463     STAM32_PROFILE_ADV_START edx
    464 
    465     FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
    466     mov     edx, 0ffffffffh
    467     STAM32_PROFILE_ADV_START edx
    468 %endif
    469 
    470     ;
    471     ; Load the CPUM pointer.
    472     ;
    473     FIXUP FIX_GC_CPUM_OFF, 1, 0
    474     mov     edx, 0ffffffffh
    475     ; Convert to CPUMCPU pointer
    476     add     edx, [edx + CPUM.ulOffCPUMCPU]
    477    
    478     ; Skip return address (assumes called!)
    479     lea     esp, [esp + 4]
    480 
    481     ;
    482     ; Guest Context (assumes esp now points to CPUMCTXCORE structure).
    483     ;
    484     ; general purpose registers
    485     push    eax                         ; save return code.
    486     mov     eax, [esp + 4 + CPUMCTXCORE.edi]
    487     mov     [edx + CPUMCPU.Guest.edi], eax
    488     mov     eax, [esp + 4 + CPUMCTXCORE.esi]
    489     mov     [edx + CPUMCPU.Guest.esi], eax
    490     mov     eax, [esp + 4 + CPUMCTXCORE.ebp]
    491     mov     [edx + CPUMCPU.Guest.ebp], eax
    492     mov     eax, [esp + 4 + CPUMCTXCORE.eax]
    493     mov     [edx + CPUMCPU.Guest.eax], eax
    494     mov     eax, [esp + 4 + CPUMCTXCORE.ebx]
    495     mov     [edx + CPUMCPU.Guest.ebx], eax
    496     mov     eax, [esp + 4 + CPUMCTXCORE.edx]
    497     mov     [edx + CPUMCPU.Guest.edx], eax
    498     mov     eax, [esp + 4 + CPUMCTXCORE.ecx]
    499     mov     [edx + CPUMCPU.Guest.ecx], eax
    500     mov     eax, [esp + 4 + CPUMCTXCORE.esp]
    501     mov     [edx + CPUMCPU.Guest.esp], eax
    502     ; selectors
    503     mov     eax, [esp + 4 + CPUMCTXCORE.ss]
    504     mov     [edx + CPUMCPU.Guest.ss], eax
    505     mov     eax, [esp + 4 + CPUMCTXCORE.gs]
    506     mov     [edx + CPUMCPU.Guest.gs], eax
    507     mov     eax, [esp + 4 + CPUMCTXCORE.fs]
    508     mov     [edx + CPUMCPU.Guest.fs], eax
    509     mov     eax, [esp + 4 + CPUMCTXCORE.es]
    510     mov     [edx + CPUMCPU.Guest.es], eax
    511     mov     eax, [esp + 4 + CPUMCTXCORE.ds]
    512     mov     [edx + CPUMCPU.Guest.ds], eax
    513     mov     eax, [esp + 4 + CPUMCTXCORE.cs]
    514     mov     [edx + CPUMCPU.Guest.cs], eax
    515     ; flags
    516     mov     eax, [esp + 4 + CPUMCTXCORE.eflags]
    517     mov     [edx + CPUMCPU.Guest.eflags], eax
    518     ; eip
    519     mov     eax, [esp + 4 + CPUMCTXCORE.eip]
    520     mov     [edx + CPUMCPU.Guest.eip], eax
    521     ; jump to common worker code.
    522     pop     eax                         ; restore return code.
    523     ; Load CPUM into edx again
    524     sub     edx, [edx + CPUMCPU.ulOffCPUM]
    525 
    526     add     esp, CPUMCTXCORE_size      ; skip CPUMCTXCORE structure
    527 
    528     jmp     vmmGCGuestToHostAsm_EIPDone
    529 ENDPROC VMMGCGuestToHostAsmGuestCtx
    530 
    531 
    532 ;;
    533 ; VMMGCGuestToHostAsmHyperCtx
    534 ;
    535 ; This is an alternative entry point which we'll be using
    536 ; when the we have the hypervisor context and need to save
    537 ; that before going to the host.
    538 ;
    539 ; This is typically useful when abandoning the hypervisor
    540 ; because of a trap and want the trap state to be saved.
    541 ;
    542 ; @param    eax     Return code.
    543 ; @param    ecx     Points to CPUMCTXCORE.
    544 ; @uses     eax,edx,ecx
    545 ALIGNCODE(16)
    546 BEGINPROC VMMGCGuestToHostAsmHyperCtx
    547     DEBUG_CHAR('#')
    548 
    549 %ifdef VBOX_WITH_STATISTICS
    550     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
    551     mov     edx, 0ffffffffh
    552     STAM32_PROFILE_ADV_STOP edx
    553 
    554     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
    555     mov     edx, 0ffffffffh
    556     STAM32_PROFILE_ADV_START edx
    557 
    558     FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
    559     mov     edx, 0ffffffffh
    560     STAM32_PROFILE_ADV_START edx
    561 %endif
    562 
    563     ;
    564     ; Load the CPUM pointer.
    565     ;
    566     FIXUP FIX_GC_CPUM_OFF, 1, 0
    567     mov     edx, 0ffffffffh
    568 
    569     push    eax                         ; save return code.
    570     ; general purpose registers
    571     mov     eax, [ecx + CPUMCTXCORE.edi]
    572     mov     [edx + CPUM.Hyper.edi], eax
    573     mov     eax, [ecx + CPUMCTXCORE.esi]
    574     mov     [edx + CPUM.Hyper.esi], eax
    575     mov     eax, [ecx + CPUMCTXCORE.ebp]
    576     mov     [edx + CPUM.Hyper.ebp], eax
    577     mov     eax, [ecx + CPUMCTXCORE.eax]
    578     mov     [edx + CPUM.Hyper.eax], eax
    579     mov     eax, [ecx + CPUMCTXCORE.ebx]
    580     mov     [edx + CPUM.Hyper.ebx], eax
    581     mov     eax, [ecx + CPUMCTXCORE.edx]
    582     mov     [edx + CPUM.Hyper.edx], eax
    583     mov     eax, [ecx + CPUMCTXCORE.ecx]
    584     mov     [edx + CPUM.Hyper.ecx], eax
    585     mov     eax, [ecx + CPUMCTXCORE.esp]
    586     mov     [edx + CPUM.Hyper.esp], eax
    587     ; selectors
    588     mov     eax, [ecx + CPUMCTXCORE.ss]
    589     mov     [edx + CPUM.Hyper.ss], eax
    590     mov     eax, [ecx + CPUMCTXCORE.gs]
    591     mov     [edx + CPUM.Hyper.gs], eax
    592     mov     eax, [ecx + CPUMCTXCORE.fs]
    593     mov     [edx + CPUM.Hyper.fs], eax
    594     mov     eax, [ecx + CPUMCTXCORE.es]
    595     mov     [edx + CPUM.Hyper.es], eax
    596     mov     eax, [ecx + CPUMCTXCORE.ds]
    597     mov     [edx + CPUM.Hyper.ds], eax
    598     mov     eax, [ecx + CPUMCTXCORE.cs]
    599     mov     [edx + CPUM.Hyper.cs], eax
    600     ; flags
    601     mov     eax, [ecx + CPUMCTXCORE.eflags]
    602     mov     [edx + CPUM.Hyper.eflags], eax
    603     ; eip
    604     mov     eax, [ecx + CPUMCTXCORE.eip]
    605     mov     [edx + CPUM.Hyper.eip], eax
    606     ; jump to common worker code.
    607     pop     eax                         ; restore return code.
    608     jmp     vmmGCGuestToHostAsm_SkipHyperRegs
    609 
    610 ENDPROC VMMGCGuestToHostAsmHyperCtx
    611 
    612326
    613327;;
     
    623337ALIGNCODE(16)
    624338BEGINPROC VMMGCGuestToHostAsm
    625     DEBUG_CHAR('%')
    626 
    627 %ifdef VBOX_WITH_STATISTICS
    628     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalInGC
    629     mov     edx, 0ffffffffh
    630     STAM32_PROFILE_ADV_STOP edx
    631 
    632     FIXUP FIX_GC_VM_OFF, 1, VM.StatTotalGCToQemu
    633     mov     edx, 0ffffffffh
    634     STAM32_PROFILE_ADV_START edx
    635 
    636     FIXUP FIX_GC_VM_OFF, 1, VM.StatSwitcherToHC
    637     mov     edx, 0ffffffffh
    638     STAM32_PROFILE_ADV_START edx
    639 %endif
    640 
    641     ;
    642     ; Load the CPUM pointer.
    643     ;
    644     FIXUP FIX_GC_CPUM_OFF, 1, 0
    645     mov     edx, 0ffffffffh
    646 
    647     pop     dword [edx + CPUM.Hyper.eip] ; call return from stack
    648     jmp short vmmGCGuestToHostAsm_EIPDone
    649 
    650 ALIGNCODE(16)
    651 vmmGCGuestToHostAsm_EIPDone:
    652     ; general registers which we care about.
    653     mov     dword [edx + CPUM.Hyper.ebx], ebx
    654     mov     dword [edx + CPUM.Hyper.esi], esi
    655     mov     dword [edx + CPUM.Hyper.edi], edi
    656     mov     dword [edx + CPUM.Hyper.ebp], ebp
    657     mov     dword [edx + CPUM.Hyper.esp], esp
    658 
    659     ; special registers which may change.
    660 vmmGCGuestToHostAsm_SkipHyperRegs:
    661 %ifdef STRICT_IF
    662     pushf
    663     pop     ecx
    664     test    ecx, X86_EFL_IF
    665     jz      .if_clear_out
    666     mov     eax, 0c0ffee01h
    667     cli
    668 .if_clear_out:
    669 %endif
    670     ; str     [edx + CPUM.Hyper.tr] - double fault only, and it won't be right then either.
    671     sldt    [edx + CPUM.Hyper.ldtr]
    672 
    673     ; No need to save CRx here. They are set dynamically according to Guest/Host requirements.
    674     ; FPU context is saved before restore of host saving (another) branch.
    675 
    676 
    677     ;;
    678     ;; Load Intermediate memory context.
    679     ;;
    680     mov     edi, eax                    ; save return code in EDI (careful with COM_DWORD_REG from here on!)
    681     FIXUP FIX_INTER_PAE_CR3, 1
    682     mov     eax, 0ffffffffh
    683     mov     cr3, eax
    684     DEBUG_CHAR('?')
    685 
     339    CPUMCPU_FROM_CPUM(rdx)
     340    FIXUP FIX_INTER_AMD64_CR3, 1
     341    mov     rax, 0ffffffffh
     342    mov     cr3, rax
    686343    ;; We're now in intermediate memory context!
    687 
    688     ;;
    689     ;; 0. Jump to identity mapped location
     344       
     345    ;;
     346    ;; Jump to identity mapped location
    690347    ;;
    691348    FIXUP FIX_GC_2_ID_NEAR_REL, 1, NAME(IDExitTarget) - NAME(Start)
     
    695352ALIGNCODE(16)
    696353GLOBALNAME IDExitTarget
     354BITS 32     
    697355    DEBUG_CHAR('1')
    698356
    699     ; 1. Disable paging.
     357    ; 1. Deactivate long mode by turning off paging.
    700358    mov     ebx, cr0
    701359    and     ebx, ~X86_CR0_PG
     
    703361    DEBUG_CHAR('2')
    704362
    705     ; 2. Enable PAE - already enabled.
    706 
    707     ; 3. Load long mode intermediate CR3.
    708     FIXUP FIX_INTER_AMD64_CR3, 1
    709     mov     ecx, 0ffffffffh
    710     mov     cr3, ecx
     363    ; 2. Load 32-bit intermediate page table.
     364    FIXUP FIX_INTER_32BIT_CR3, 1
     365    mov     edx, 0ffffffffh
     366    mov     cr3, edx
    711367    DEBUG_CHAR('3')
    712368
    713     ; 4. Enable long mode.
    714     mov     ebp, edx
     369    ; 3. Disable long mode.
    715370    mov     ecx, MSR_K6_EFER
    716371    rdmsr
    717     or      eax, MSR_K6_EFER_LME
     372    DEBUG_CHAR('5')
     373    and     eax, ~(MSR_K6_EFER_LME)
    718374    wrmsr
    719     mov     edx, ebp
    720     DEBUG_CHAR('4')
    721 
    722     ; 5. Enable paging.
     375    DEBUG_CHAR('6')
     376
     377    ; 3b. Disable PAE.
     378    mov     eax, cr4
     379    and     eax, ~X86_CR4_PAE
     380    mov     cr4, eax
     381    DEBUG_CHAR('7')
     382
     383    ; 4. Enable paging.
    723384    or      ebx, X86_CR0_PG
    724385    mov     cr0, ebx
    725     DEBUG_CHAR('5')
    726 
    727     ; Jump from compatability mode to 64-bit mode.
    728     FIXUP FIX_ID_FAR32_TO_64BIT_MODE, 1, NAME(IDExit64Mode) - NAME(Start)
    729     jmp     0ffffh:0fffffffeh
    730 
    731     ;
    732     ; We're in 64-bit mode (ds, ss, es, fs, gs are all bogus).
    733     ; Move on to the HC mapping.
    734     ;
    735 BITS 64
    736 ALIGNCODE(16)
    737 NAME(IDExit64Mode):
    738     DEBUG_CHAR('6')
    739     jmp     [NAME(pHCExitTarget) wrt rip]
    740 
    741 ; 64-bit jump target
    742 NAME(pHCExitTarget):
    743 FIXUP FIX_HC_64BIT, 0, NAME(HCExitTarget) - NAME(Start)
    744 dq 0ffffffffffffffffh
    745 
    746 ; 64-bit pCpum address.
    747 NAME(pCpumHC):
    748 FIXUP FIX_HC_64BIT_CPUM, 0
    749 dq 0ffffffffffffffffh
    750 
    751     ;
    752     ; When we arrive here we're at the host context
    753     ; mapping of the switcher code.
    754     ;
    755 ALIGNCODE(16)
    756 GLOBALNAME HCExitTarget
     386    jmp short just_a_jump
     387just_a_jump:
     388    DEBUG_CHAR('8')
     389
     390    ;;
     391    ;; 5. Jump to guest code mapping of the code and load the Hypervisor CS.
     392    ;;
     393    FIXUP FIX_ID_2_GC_NEAR_REL, 1, NAME(ICExitTarget) - NAME(Start)
     394    jmp near NAME(ICExitTarget)
     395   
     396    ;;
     397    ;; When we arrive at this label we're at the
     398    ;; intermediate mapping of the switching code.
     399    ;;
     400BITS 32
     401ALIGNCODE(16)
     402GLOBALNAME ICExitTarget
     403    DEBUG_CHAR('8')
     404    FIXUP FIX_HC_CPUM_OFF, 1, 0
     405    mov     edx, 0ffffffffh
     406    CPUMCPU_FROM_CPUM(edx)
     407    mov     esi, [edx + CPUMCPU.Host.cr3]
     408    mov     cr3, esi
     409
     410    ;; now we're in host memory context, let's restore regs
     411       
     412    ; activate host gdt and idt
     413    lgdt    [edx + CPUMCPU.Host.gdtr]
     414    DEBUG_CHAR('0')
     415    lidt    [edx + CPUMCPU.Host.idtr]
     416    DEBUG_CHAR('1')
     417       
     418    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     419    ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
     420    movzx   eax, word [edx + CPUMCPU.Host.tr]          ; eax <- TR
     421    and     al, 0F8h                                ; mask away TI and RPL bits, get descriptor offset.
     422    add     eax, [edx + CPUMCPU.Host.gdtr + 2]         ; eax <- GDTR.address + descriptor offset.
     423    and     dword [eax + 4], ~0200h                 ; clear busy flag (2nd type2 bit)
     424    ltr     word [edx + CPUMCPU.Host.tr]
     425
     426    ; activate ldt
     427    DEBUG_CHAR('2')
     428    lldt    [edx + CPUMCPU.Host.ldtr]
     429    ; Restore segment registers
     430    mov     eax, [edx + CPUMCPU.Host.ds]
     431    mov     ds, eax
     432    mov     eax, [edx + CPUMCPU.Host.es]
     433    mov     es, eax
     434    mov     eax, [edx + CPUMCPU.Host.fs]
     435    mov     fs, eax
     436    mov     eax, [edx + CPUMCPU.Host.gs]
     437    mov     gs, eax
     438    ; restore stack
     439    lss     esp, [edx + CPUMCPU.Host.esp]
     440
     441        FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter.
     442    ; restore MSR_IA32_SYSENTER_CS register.
     443    mov     ecx, MSR_IA32_SYSENTER_CS
     444    mov     eax, [edx + CPUMCPU.Host.SysEnter.cs]
     445    mov     ebx, [edx + CPUMCPU.Host.SysEnter.cs + 4]
     446    xchg    edx, ebx                    ; save/load edx
     447    wrmsr                               ; MSR[ecx] <- edx:eax
     448    xchg    edx, ebx                    ; restore edx
    757449    jmp short gth_sysenter_no
    758450
     
    764456    ; Restore FPU if guest has used it.
    765457    ; Using fxrstor should ensure that we're not causing unwanted exception on the host.
    766     mov     esi, [rdx + r8 + CPUMCPU.fUseFlags] ; esi == use flags.
     458    mov     esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.
    767459    test    esi, CPUM_USED_FPU
    768     jz short gth_fpu_no
    769     mov     rcx, cr0
    770     and     rcx, ~(X86_CR0_TS | X86_CR0_EM)
    771     mov     cr0, rcx
    772 
    773     fxsave  [rdx + r8 + CPUMCPU.Guest.fpu]
    774     fxrstor [rdx + r8 + CPUMCPU.Host.fpu]
     460    jz near gth_fpu_no
     461    mov     ecx, cr0
     462    and     ecx, ~(X86_CR0_TS | X86_CR0_EM)
     463    mov     cr0, ecx
     464
     465    FIXUP FIX_NO_FXSAVE_JMP, 0, gth_no_fxsave - NAME(Start) ; this will insert a jmp gth_no_fxsave if fxsave isn't supported.
     466    fxsave  [edx + CPUMCPU.Guest.fpu]
     467    fxrstor [edx + CPUMCPU.Host.fpu]
     468    jmp near gth_fpu_no
     469
     470gth_no_fxsave:
     471    fnsave  [edx + CPUMCPU.Guest.fpu]
     472    mov     eax, [edx + CPUMCPU.Host.fpu]     ; control word
     473    not     eax                            ; 1 means exception ignored (6 LS bits)
     474    and     eax, byte 03Fh                 ; 6 LS bits only
     475    test    eax, [edx + CPUMCPU.Host.fpu + 4] ; status word
     476    jz      gth_no_exceptions_pending
     477
     478    ; technically incorrect, but we certainly don't want any exceptions now!!
     479    and     dword [edx + CPUMCPU.Host.fpu + 4], ~03Fh
     480
     481gth_no_exceptions_pending:
     482    frstor  [edx + CPUMCPU.Host.fpu]
    775483    jmp short gth_fpu_no
    776484
     
    781489    ; Would've liked to have these highere up in case of crashes, but
    782490    ; the fpu stuff must be done before we restore cr0.
    783     mov     rcx, [rdx + r8 + CPUMCPU.Host.cr4]
    784     mov     cr4, rcx
    785     mov     rcx, [rdx + r8 + CPUMCPU.Host.cr0]
    786     mov     cr0, rcx
    787     ;mov     rcx, [rdx + r8 + CPUMCPU.Host.cr2] ; assumes this is waste of time.
    788     ;mov     cr2, rcx
     491    mov     ecx, [edx + CPUMCPU.Host.cr4]
     492    mov     cr4, ecx
     493    mov     ecx, [edx + CPUMCPU.Host.cr0]
     494    mov     cr0, ecx
     495    ;mov     ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time.
     496    ;mov     cr2, ecx
    789497
    790498    ; restore debug registers (if modified) (esi must still be fUseFlags!)
     
    794502    jmp     gth_debug_regs_restore
    795503gth_debug_regs_no:
     504
     505    ; restore general registers.
     506    mov     eax, edi                    ; restore return code. eax = return code !!
     507    mov     edi, [edx + CPUMCPU.Host.edi]
     508    mov     esi, [edx + CPUMCPU.Host.esi]
     509    mov     ebx, [edx + CPUMCPU.Host.ebx]
     510    mov     ebp, [edx + CPUMCPU.Host.ebp]
     511    push    dword [edx + CPUMCPU.Host.eflags]
     512    popfd
     513
     514%ifdef DEBUG_STUFF
     515;    COM_S_CHAR '4'
     516%endif
    796517    retf
    797518
     
    802523    DEBUG_S_CHAR('d')
    803524    xor     eax, eax
    804     mov     dr7, rax                    ; paranoia or not?
     525    mov     dr7, eax                    ; paranoia or not?
    805526    test    esi, CPUM_USE_DEBUG_REGS
    806527    jz short gth_debug_regs_dr7
    807528    DEBUG_S_CHAR('r')
    808     mov     rax, [rdx + r8 + CPUMCPU.Host.dr0]
    809     mov     dr0, rax
    810     mov     rbx, [rdx + r8 + CPUMCPU.Host.dr1]
    811     mov     dr1, rbx
    812     mov     rcx, [rdx + r8 + CPUMCPU.Host.dr2]
    813     mov     dr2, rcx
    814     mov     rax, [rdx + r8 + CPUMCPU.Host.dr3]
    815     mov     dr3, rax
     529    mov     eax, [edx + CPUMCPU.Host.dr0]
     530    mov     dr0, eax
     531    mov     ebx, [edx + CPUMCPU.Host.dr1]
     532    mov     dr1, ebx
     533    mov     ecx, [edx + CPUMCPU.Host.dr2]
     534    mov     dr2, ecx
     535    mov     eax, [edx + CPUMCPU.Host.dr3]
     536    mov     dr3, eax
    816537gth_debug_regs_dr7:
    817     mov     rbx, [rdx + r8 + CPUMCPU.Host.dr6]
    818     mov     dr6, rbx
    819     mov     rcx, [rdx + r8 + CPUMCPU.Host.dr7]
    820     mov     dr7, rcx
     538    mov     ebx, [edx + CPUMCPU.Host.dr6]
     539    mov     dr6, ebx
     540    mov     ecx, [edx + CPUMCPU.Host.dr7]
     541    mov     dr7, ecx
    821542    jmp     gth_debug_regs_no
    822 
     543       
    823544ENDPROC VMMGCGuestToHostAsm
    824545
    825 
     546;;
     547; VMMGCGuestToHostAsmHyperCtx
     548;
     549; This is an alternative entry point which we'll be using
     550; when the we have the hypervisor context and need to save
     551; that before going to the host.
     552;
     553; This is typically useful when abandoning the hypervisor
     554; because of a trap and want the trap state to be saved.
     555;
     556; @param    eax     Return code.
     557; @param    ecx     Points to CPUMCTXCORE.
     558; @uses     eax,edx,ecx
     559ALIGNCODE(16)
     560BEGINPROC VMMGCGuestToHostAsmHyperCtx
     561     int3
     562
     563;;
     564; VMMGCGuestToHostAsmGuestCtx
     565;
     566; Switches from Guest Context to Host Context.
     567; Of course it's only called from within the GC.
     568;
     569; @param    eax     Return code.
     570; @param    esp + 4 Pointer to CPUMCTXCORE.
     571;
     572; @remark   ASSUMES interrupts disabled.
     573;
     574ALIGNCODE(16)
     575BEGINPROC VMMGCGuestToHostAsmGuestCtx
     576      int3
     577     
    826578GLOBALNAME End
    827579;
     
    829581;
    830582NAME(Description):
    831     db "32-bits  to/from AMD64", 0
     583    db "32-bits to/from AMD64", 0
    832584
    833585extern NAME(Relocate)
     
    860612        at VMMSWITCHERDEF.offHCCode0,                   dd 0
    861613        at VMMSWITCHERDEF.cbHCCode0,                    dd NAME(IDEnterTarget)              - NAME(Start)
    862         at VMMSWITCHERDEF.offHCCode1,                   dd NAME(HCExitTarget)               - NAME(Start)
    863         at VMMSWITCHERDEF.cbHCCode1,                    dd NAME(End)                        - NAME(HCExitTarget)
     614        at VMMSWITCHERDEF.offHCCode1,                   dd NAME(ICExitTarget)               - NAME(Start)
     615        at VMMSWITCHERDEF.cbHCCode1,                    dd NAME(End)                        - NAME(ICExitTarget)
    864616        at VMMSWITCHERDEF.offIDCode0,                   dd NAME(IDEnterTarget)              - NAME(Start)
    865         at VMMSWITCHERDEF.cbIDCode0,                    dd NAME(JmpGCTarget)                - NAME(IDEnterTarget)
     617        at VMMSWITCHERDEF.cbIDCode0,                    dd NAME(ICEnterTarget)              - NAME(IDEnterTarget)
    866618        at VMMSWITCHERDEF.offIDCode1,                   dd NAME(IDExitTarget)               - NAME(Start)
    867         at VMMSWITCHERDEF.cbIDCode1,                    dd NAME(HCExitTarget)               - NAME(IDExitTarget)
    868         at VMMSWITCHERDEF.offGCCode,                    dd NAME(JmpGCTarget)                - NAME(Start)
    869         at VMMSWITCHERDEF.cbGCCode,                     dd NAME(IDExitTarget)               - NAME(JmpGCTarget)
     619        at VMMSWITCHERDEF.cbIDCode1,                    dd NAME(ICExitTarget)               - NAME(IDExitTarget)
     620        at VMMSWITCHERDEF.offGCCode,                    dd NAME(ICEnterTarget)              - NAME(Start)
     621        at VMMSWITCHERDEF.cbGCCode,                     dd NAME(IDExitTarget)               - NAME(ICEnterTarget)
    870622
    871623    iend
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette