VirtualBox

Changeset 47793 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Aug 16, 2013 9:51:36 AM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
88054
Message:

VMM/HMR0Mixed.mac: Macrofied handling various exit paths. Essentially 6 copies are now 2.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac

    r47652 r47793  
    3030 %define VMX_SKIP_TR
    3131%endif
     32
     33;; @def RESTORESTATEVM32
     34; Macro restoring essential host state and updating guest state
     35; for common host, 32-bit guest for VT-x.
     36;
     37; @param 1  Jump label suffix 1.
     38; @param 2  Jump label suffix 2.
     39; @param 3  Jump label suffix 3.
     40%macro RESTORESTATEVM32 3
     41    ; Restore base and limit of the IDTR & GDTR.
     42 %ifndef VMX_SKIP_IDTR
     43    lidt    [xSP]
     44    add     xSP, xCB * 2
     45 %endif
     46 %ifndef VMX_SKIP_GDTR
     47    lgdt    [xSP]
     48    add     xSP, xCB * 2
     49 %endif
     50
     51    push    xDI
     52 %ifndef VMX_SKIP_TR
     53    mov     xDI, [xSP + xCB * 3]         ; pCtx (*3 to skip the saved xDI, TR, LDTR).
     54 %else
     55    mov     xDI, [xSP + xCB * 2]         ; pCtx (*2 to skip the saved xDI, LDTR).
     56 %endif
     57
     58    mov     [ss:xDI + CPUMCTX.eax], eax
     59    mov     [ss:xDI + CPUMCTX.ebx], ebx
     60    mov     [ss:xDI + CPUMCTX.ecx], ecx
     61    mov     [ss:xDI + CPUMCTX.edx], edx
     62    mov     [ss:xDI + CPUMCTX.esi], esi
     63    mov     [ss:xDI + CPUMCTX.ebp], ebp
     64    mov     xAX, cr2
     65    mov     [ss:xDI + CPUMCTX.cr2], xAX
     66
     67 %ifdef RT_ARCH_AMD64
     68    pop     xAX                                 ; The guest edi we pushed above.
     69    mov     dword [ss:xDI + CPUMCTX.edi], eax
     70 %else
     71    pop     dword [ss:xDI + CPUMCTX.edi]        ; The guest edi we pushed above.
     72 %endif
     73
     74 %ifndef VMX_SKIP_TR
     75    ; Restore TSS selector; must mark it as not busy before using ltr (!)
     76    ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
     77    ; @todo get rid of sgdt
     78    pop     xBX         ; Saved TR
     79    sub     xSP, xCB * 2
     80    sgdt    [xSP]
     81    mov     xAX, xBX
     82    and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
     83    add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
     84    and     dword [ss:xAX + 4], ~0200h              ; Clear busy flag (2nd type2 bit).
     85    ltr     bx
     86    add     xSP, xCB * 2
     87 %endif
     88
     89    pop     xAX         ; Saved LDTR
     90 %ifdef RT_ARCH_AMD64
     91    cmp     xAX, 0
     92    je      .skipldtwrite32%1
     93 %endif
     94    lldt    ax
     95
     96.skipldtwrite32%1:
     97    add     xSP, xCB     ; pCtx
     98
     99 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
     100    pop     xDX         ; Saved pCache
     101
     102    mov     ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
     103    cmp     ecx, 0      ; Can't happen
     104    je      .no_cached_reads%2
     105    jmp     .cached_read%3
     106
     107ALIGN(16)
     108.cached_read%3:
     109    dec     xCX
     110    mov     eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
     111    vmread  [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
     112    cmp     xCX, 0
     113    jnz     .cached_read
     114.no_cached_reads%2:
     115 %endif
     116
     117    ; Restore segment registers.
     118    MYPOPSEGS xAX, ax
     119
     120    ; Restore general purpose registers.
     121    MYPOPAD
     122%endmacro
     123
    32124
    33125;/**
     
    173265    jz      near .vmxstart_start_failed
    174266
    175     ; Restore base and limit of the IDTR & GDTR.
    176 %ifndef VMX_SKIP_IDTR
    177     lidt    [xSP]
    178     add     xSP, xCB * 2
    179 %endif
    180 %ifndef VMX_SKIP_GDTR
    181     lgdt    [xSP]
    182     add     xSP, xCB * 2
    183 %endif
    184 
    185     push    xDI
    186 %ifndef VMX_SKIP_TR
    187     mov     xDI, [xSP + xCB * 3]         ; pCtx (*3 to skip the saved xDI, TR, LDTR).
    188 %else
    189     mov     xDI, [xSP + xCB * 2]         ; pCtx (*2 to skip the saved xDI, LDTR).
    190 %endif
    191 
    192     mov     [ss:xDI + CPUMCTX.eax], eax
    193     mov     [ss:xDI + CPUMCTX.ebx], ebx
    194     mov     [ss:xDI + CPUMCTX.ecx], ecx
    195     mov     [ss:xDI + CPUMCTX.edx], edx
    196     mov     [ss:xDI + CPUMCTX.esi], esi
    197     mov     [ss:xDI + CPUMCTX.ebp], ebp
    198     mov     xAX, cr2
    199     mov     [ss:xDI + CPUMCTX.cr2], xAX
    200 
    201 %ifdef RT_ARCH_AMD64
    202     pop     xAX                                 ; The guest edi we pushed above.
    203     mov     dword [ss:xDI + CPUMCTX.edi], eax
    204 %else
    205     pop     dword [ss:xDI + CPUMCTX.edi]        ; The guest edi we pushed above.
    206 %endif
    207 
    208 %ifndef VMX_SKIP_TR
    209     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    210     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
    211     ; @todo get rid of sgdt
    212     pop     xBX         ; Saved TR
    213     sub     xSP, xCB * 2
    214     sgdt    [xSP]
    215     mov     xAX, xBX
    216     and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
    217     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    218     and     dword [ss:xAX + 4], ~0200h              ; Clear busy flag (2nd type2 bit).
    219     ltr     bx
    220     add     xSP, xCB * 2
    221 %endif
    222 
    223     pop     xAX         ; Saved LDTR
    224 %ifdef RT_ARCH_AMD64
    225     cmp     xAX, 0
    226     je      .skipldtwrite32
    227 %endif
    228     lldt    ax
    229 
    230 .skipldtwrite32:
    231     add     xSP, xCB     ; pCtx
    232 
    233 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    234     pop     xDX         ; Saved pCache
    235 
    236     mov     ecx, [ss:xDX + VMCSCACHE.Read.cValidEntries]
    237     cmp     ecx, 0      ; Can't happen
    238     je      .no_cached_reads
    239     jmp     .cached_read
    240 
    241 ALIGN(16)
    242 .cached_read:
    243     dec     xCX
    244     mov     eax, [ss:xDX + VMCSCACHE.Read.aField + xCX * 4]
    245     vmread  [ss:xDX + VMCSCACHE.Read.aFieldVal + xCX * 8], xAX
    246     cmp     xCX, 0
    247     jnz     .cached_read
    248 .no_cached_reads:
    249 %endif
    250 
    251     ; Restore segment registers.
    252     MYPOPSEGS xAX, ax
    253 
    254     ; Restore general purpose registers.
    255     MYPOPAD
    256 
     267    RESTORESTATEVM32 A, B, C
    257268    mov     eax, VINF_SUCCESS
    258269
     
    262273    ret
    263274
    264 
    265275.vmxstart_invalid_vmcs_ptr:
     276    RESTORESTATEVM32 D, E, F
     277    mov     eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
     278    jmp     .vmstart_end
     279
     280.vmxstart_start_failed:
     281    RESTORESTATEVM32 G, H, I
     282    mov     eax, VERR_VMX_UNABLE_TO_START_VM
     283    jmp     .vmstart_end
     284
     285ENDPROC MY_NAME(VMXR0StartVM32)
     286
     287
     288%ifdef RT_ARCH_AMD64
     289;; @def RESTORESTATEVM64
     290; Macro restoring essential host state and updating guest state
     291; for 64-bit host, 64-bit guest for VT-x.
     292;
     293; @param 1  Jump label suffix 1.
     294; @param 2  Jump label suffix 2.
     295; @param 3  Jump label suffix 3.
     296%macro RESTORESTATEVM64 3
    266297    ; Restore base and limit of the IDTR & GDTR
    267 %ifndef VMX_SKIP_IDTR
     298 %ifndef VMX_SKIP_IDTR
    268299    lidt    [xSP]
    269300    add     xSP, xCB * 2
    270 %endif
    271 %ifndef VMX_SKIP_GDTR
     301 %endif
     302 %ifndef VMX_SKIP_GDTR
    272303    lgdt    [xSP]
    273304    add     xSP, xCB * 2
    274 %endif
    275 
    276 %ifndef VMX_SKIP_TR
    277     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    278     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
    279     ; @todo get rid of sgdt
    280     pop     xBX         ; Saved TR
    281     sub     xSP, xCB * 2
    282     sgdt    [xSP]
    283     mov     xAX, xBX
    284     and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
    285     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    286     and     dword [ss:xAX + 4], ~0200h              ; Clear busy flag (2nd type2 bit).
    287     ltr     bx
    288     add     xSP, xCB * 2
    289 %endif
    290 
    291     pop     xAX         ; Saved LDTR
    292     lldt    ax          ; Don't bother with conditional restoration in the error case.
    293 
    294 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    295     add     xSP, xCB * 2  ; pCtx + pCache
    296 %else
    297     add     xSP, xCB     ; pCtx
    298 %endif
    299 
    300     ; Restore segment registers.
    301     MYPOPSEGS xAX, ax
    302 
    303     ; Restore all general purpose host registers.
    304     MYPOPAD
    305     mov     eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
    306     jmp     .vmstart_end
    307 
    308 .vmxstart_start_failed:
    309     ; Restore base and limit of the IDTR & GDTR.
    310 %ifndef VMX_SKIP_IDTR
    311     lidt    [xSP]
    312     add     xSP, xCB * 2
    313 %endif
    314 %ifndef VMX_SKIP_GDTR
    315     lgdt    [xSP]
    316     add     xSP, xCB * 2
    317 %endif
    318 
    319 %ifndef VMX_SKIP_TR
    320     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    321     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p)
    322     ; @todo get rid of sgdt
    323     pop     xBX         ; Saved TR
    324     sub     xSP, xCB * 2
    325     sgdt    [xSP]
    326     mov     xAX, xBX
    327     and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
    328     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    329     and     dword [ss:xAX + 4], ~0200h              ; Clear busy flag (2nd type2 bit).
    330     ltr     bx
    331     add     xSP, xCB * 2
    332 %endif
    333 
    334     pop     xAX         ; Saved LDTR
    335     lldt    ax          ; Don't bother with conditional restoration in the error case
    336 
    337 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    338     add     xSP, xCB * 2  ; pCtx + pCache
    339 %else
    340     add     xSP, xCB    ; pCtx
    341 %endif
    342 
    343     ; Restore segment registers.
    344     MYPOPSEGS xAX, ax
    345 
    346     ; Restore all general purpose host registers.
    347     MYPOPAD
    348     mov     eax, VERR_VMX_UNABLE_TO_START_VM
    349     jmp     .vmstart_end
    350 
    351 ENDPROC MY_NAME(VMXR0StartVM32)
    352 
    353 %ifdef RT_ARCH_AMD64
    354 ;/**
    355 ; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
    356 ; *
    357 ; * @returns VBox status code
    358 ; * @param   fResume    msc:rcx, gcc:rdi     vmlauch/vmresume
    359 ; * @param   pCtx       msc:rdx, gcc:rsi     Guest context
    360 ; * @param   pCache     msc:r8,  gcc:rdx     VMCS cache
    361 ; */
    362 ALIGNCODE(16)
    363 BEGINPROC MY_NAME(VMXR0StartVM64)
    364     push    xBP
    365     mov     xBP, xSP
    366 
    367     pushf
    368     cli
    369 
    370     ; Save all general purpose host registers.
    371     MYPUSHAD
    372 
    373     ; First we have to save some final CPU context registers.
    374     lea     r10, [.vmlaunch64_done wrt rip]
    375     mov     rax, VMX_VMCS_HOST_RIP      ; Return address (too difficult to continue after VMLAUNCH?).
    376     vmwrite rax, r10
    377     ; Note: assumes success!
    378 
    379     ; Save the Guest CPU context pointer.
    380 %ifdef ASM_CALL64_GCC
    381     ; fResume already in rdi
    382     ; pCtx    already in rsi
    383     mov     rbx, rdx        ; pCache
    384 %else
    385     mov     rdi, rcx        ; fResume
    386     mov     rsi, rdx        ; pCtx
    387     mov     rbx, r8         ; pCache
    388 %endif
    389 
    390     ; Save segment registers.
    391     ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
    392     MYPUSHSEGS xAX, ax
    393 
    394 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    395     mov     ecx, [xBX + VMCSCACHE.Write.cValidEntries]
    396     cmp     ecx, 0
    397     je      .no_cached_writes
    398     mov     edx, ecx
    399     mov     ecx, 0
    400     jmp     .cached_write
    401 
    402 ALIGN(16)
    403 .cached_write:
    404     mov     eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
    405     vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
    406     inc     xCX
    407     cmp     xCX, xDX
    408     jl     .cached_write
    409 
    410     mov     dword [xBX + VMCSCACHE.Write.cValidEntries], 0
    411 .no_cached_writes:
    412 
    413     ; Save the pCache pointer.
    414     push    xBX
    415 %endif
    416 
    417 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    418     ; Save the host MSRs and load the guest MSRs.
    419     LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    420     LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
    421     LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
    422     LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
    423 %endif
    424 
    425     ; Save the pCtx pointer.
    426     push    xSI
    427 
    428     ; Save LDTR.
    429     xor     eax, eax
    430     sldt    ax
    431     push    xAX
    432 
    433 %ifndef VMX_SKIP_TR
    434     ; The TR limit is reset to 0x67; restore it manually.
    435     str     eax
    436     push    xAX
    437 %endif
    438 
    439     ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
    440 %ifndef VMX_SKIP_GDTR
    441     sub     xSP, xCB * 2
    442     sgdt    [xSP]
    443 %endif
    444 %ifndef VMX_SKIP_IDTR
    445     sub     xSP, xCB * 2
    446     sidt    [xSP]
    447 %endif
    448 
    449     ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
    450     mov     rbx, qword [xSI + CPUMCTX.cr2]
    451     mov     rdx, cr2
    452     cmp     rbx, rdx
    453     je      .skipcr2write
    454     mov     cr2, rbx
    455 
    456 .skipcr2write:
    457     mov     eax, VMX_VMCS_HOST_RSP
    458     vmwrite xAX, xSP
    459     ; Note: assumes success!
    460     ; Don't mess with ESP anymore!!!
    461 
    462     ; Restore Guest's general purpose registers.
    463     mov     rax, qword [xSI + CPUMCTX.eax]
    464     mov     rbx, qword [xSI + CPUMCTX.ebx]
    465     mov     rcx, qword [xSI + CPUMCTX.ecx]
    466     mov     rdx, qword [xSI + CPUMCTX.edx]
    467     mov     rbp, qword [xSI + CPUMCTX.ebp]
    468     mov     r8,  qword [xSI + CPUMCTX.r8]
    469     mov     r9,  qword [xSI + CPUMCTX.r9]
    470     mov     r10, qword [xSI + CPUMCTX.r10]
    471     mov     r11, qword [xSI + CPUMCTX.r11]
    472     mov     r12, qword [xSI + CPUMCTX.r12]
    473     mov     r13, qword [xSI + CPUMCTX.r13]
    474     mov     r14, qword [xSI + CPUMCTX.r14]
    475     mov     r15, qword [xSI + CPUMCTX.r15]
    476 
    477     ; Resume or start?
    478     cmp     xDI, 0                  ; fResume
    479     je      .vmlaunch64_launch
    480 
    481     ; Restore edi & esi.
    482     mov     rdi, qword [xSI + CPUMCTX.edi]
    483     mov     rsi, qword [xSI + CPUMCTX.esi]
    484 
    485     vmresume
    486     jmp     .vmlaunch64_done;      ; Here if vmresume detected a failure.
    487 
    488 .vmlaunch64_launch:
    489     ; Restore rdi & rsi.
    490     mov     rdi, qword [xSI + CPUMCTX.edi]
    491     mov     rsi, qword [xSI + CPUMCTX.esi]
    492 
    493     vmlaunch
    494     jmp     .vmlaunch64_done;      ; Here if vmlaunch detected a failure.
    495 
    496 ALIGNCODE(16)
    497 .vmlaunch64_done:
    498     jc      near .vmxstart64_invalid_vmcs_ptr
    499     jz      near .vmxstart64_start_failed
    500 
    501     ; Restore base and limit of the IDTR & GDTR
    502 %ifndef VMX_SKIP_IDTR
    503     lidt    [xSP]
    504     add     xSP, xCB * 2
    505 %endif
    506 %ifndef VMX_SKIP_GDTR
    507     lgdt    [xSP]
    508     add     xSP, xCB * 2
    509 %endif
     305 %endif
    510306
    511307    push    xDI
    512 %ifndef VMX_SKIP_TR
     308 %ifndef VMX_SKIP_TR
    513309    mov     xDI, [xSP + xCB * 3]        ; pCtx (*3 to skip the saved xDI, TR, LDTR)
    514 %else
     310 %else
    515311    mov     xDI, [xSP + xCB * 2]        ; pCtx (*2 to skip the saved xDI, LDTR)
    516 %endif
     312 %endif
    517313
    518314    mov     qword [xDI + CPUMCTX.eax], rax
     
    536332    mov     qword [xDI + CPUMCTX.edi], rax
    537333
    538 %ifndef VMX_SKIP_TR
     334 %ifndef VMX_SKIP_TR
    539335    ; Restore TSS selector; must mark it as not busy before using ltr (!)
    540336    ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
     
    549345    ltr     bx
    550346    add     xSP, xCB * 2
    551 %endif
     347 %endif
    552348
    553349    pop     xAX         ; Saved LDTR
    554350    cmp     xAX, 0
    555     je      .skipldtwrite64
     351    je      .skipldtwrite64%1
    556352    lldt    ax
    557353
    558 .skipldtwrite64:
     354.skipldtwrite64%1:
    559355    pop     xSI         ; pCtx (needed in rsi by the macros below)
    560356
    561 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     357 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    562358    ; Save the guest MSRs and load the host MSRs.
    563359    LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     
    565361    LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR
    566362    LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR
    567 %endif
    568 
    569 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
     363 %endif
     364
     365 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    570366    pop     xDX         ; Saved pCache
    571367
    572368    mov     ecx, [xDX + VMCSCACHE.Read.cValidEntries]
    573369    cmp     ecx, 0      ; Can't happen
    574     je      .no_cached_reads
    575     jmp     .cached_read
     370    je      .no_cached_reads64%2
     371    jmp     .cached_read64%3
    576372
    577373ALIGN(16)
    578 .cached_read:
     374.cached_read64%3:
    579375    dec     xCX
    580376    mov     eax, [xDX + VMCSCACHE.Read.aField + xCX * 4]
     
    582378    cmp      xCX, 0
    583379    jnz     .cached_read
    584 .no_cached_reads:
    585 %endif
     380.no_cached_reads64%2:
     381 %endif
    586382
    587383    ; Restore segment registers.
     
    590386    ; Restore general purpose registers.
    591387    MYPOPAD
    592 
     388%endmacro
     389
     390
     391;/**
     392; * Prepares for and executes VMLAUNCH/VMRESUME (64 bits guest mode)
     393; *
     394; * @returns VBox status code
     395; * @param   fResume    msc:rcx, gcc:rdi     vmlauch/vmresume
     396; * @param   pCtx       msc:rdx, gcc:rsi     Guest context
     397; * @param   pCache     msc:r8,  gcc:rdx     VMCS cache
     398; */
     399ALIGNCODE(16)
     400BEGINPROC MY_NAME(VMXR0StartVM64)
     401    push    xBP
     402    mov     xBP, xSP
     403
     404    pushf
     405    cli
     406
     407    ; Save all general purpose host registers.
     408    MYPUSHAD
     409
     410    ; First we have to save some final CPU context registers.
     411    lea     r10, [.vmlaunch64_done wrt rip]
     412    mov     rax, VMX_VMCS_HOST_RIP      ; Return address (too difficult to continue after VMLAUNCH?).
     413    vmwrite rax, r10
     414    ; Note: assumes success!
     415
     416    ; Save the Guest CPU context pointer.
     417%ifdef ASM_CALL64_GCC
     418    ; fResume already in rdi
     419    ; pCtx    already in rsi
     420    mov     rbx, rdx        ; pCache
     421%else
     422    mov     rdi, rcx        ; fResume
     423    mov     rsi, rdx        ; pCtx
     424    mov     rbx, r8         ; pCache
     425%endif
     426
     427    ; Save segment registers.
     428    ; Note: MYPUSHSEGS trashes rdx & rcx, so we moved it here (msvc amd64 case).
     429    MYPUSHSEGS xAX, ax
     430
     431%ifdef VMX_USE_CACHED_VMCS_ACCESSES
     432    mov     ecx, [xBX + VMCSCACHE.Write.cValidEntries]
     433    cmp     ecx, 0
     434    je      .no_cached_writes
     435    mov     edx, ecx
     436    mov     ecx, 0
     437    jmp     .cached_write
     438
     439ALIGN(16)
     440.cached_write:
     441    mov     eax, [xBX + VMCSCACHE.Write.aField + xCX * 4]
     442    vmwrite xAX, [xBX + VMCSCACHE.Write.aFieldVal + xCX * 8]
     443    inc     xCX
     444    cmp     xCX, xDX
     445    jl     .cached_write
     446
     447    mov     dword [xBX + VMCSCACHE.Write.cValidEntries], 0
     448.no_cached_writes:
     449
     450    ; Save the pCache pointer.
     451    push    xBX
     452%endif
     453
     454%ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     455    ; Save the host MSRs and load the guest MSRs.
     456    LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR
     457    LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR
     458    LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK
     459    LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE
     460%endif
     461
     462    ; Save the pCtx pointer.
     463    push    xSI
     464
     465    ; Save LDTR.
     466    xor     eax, eax
     467    sldt    ax
     468    push    xAX
     469
     470%ifndef VMX_SKIP_TR
     471    ; The TR limit is reset to 0x67; restore it manually.
     472    str     eax
     473    push    xAX
     474%endif
     475
     476    ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly!
     477%ifndef VMX_SKIP_GDTR
     478    sub     xSP, xCB * 2
     479    sgdt    [xSP]
     480%endif
     481%ifndef VMX_SKIP_IDTR
     482    sub     xSP, xCB * 2
     483    sidt    [xSP]
     484%endif
     485
     486    ; Load CR2 if necessary (may be expensive as writing CR2 is a synchronizing instruction).
     487    mov     rbx, qword [xSI + CPUMCTX.cr2]
     488    mov     rdx, cr2
     489    cmp     rbx, rdx
     490    je      .skipcr2write
     491    mov     cr2, rbx
     492
     493.skipcr2write:
     494    mov     eax, VMX_VMCS_HOST_RSP
     495    vmwrite xAX, xSP
     496    ; Note: assumes success!
     497    ; Don't mess with ESP anymore!!!
     498
     499    ; Restore Guest's general purpose registers.
     500    mov     rax, qword [xSI + CPUMCTX.eax]
     501    mov     rbx, qword [xSI + CPUMCTX.ebx]
     502    mov     rcx, qword [xSI + CPUMCTX.ecx]
     503    mov     rdx, qword [xSI + CPUMCTX.edx]
     504    mov     rbp, qword [xSI + CPUMCTX.ebp]
     505    mov     r8,  qword [xSI + CPUMCTX.r8]
     506    mov     r9,  qword [xSI + CPUMCTX.r9]
     507    mov     r10, qword [xSI + CPUMCTX.r10]
     508    mov     r11, qword [xSI + CPUMCTX.r11]
     509    mov     r12, qword [xSI + CPUMCTX.r12]
     510    mov     r13, qword [xSI + CPUMCTX.r13]
     511    mov     r14, qword [xSI + CPUMCTX.r14]
     512    mov     r15, qword [xSI + CPUMCTX.r15]
     513
     514    ; Resume or start?
     515    cmp     xDI, 0                  ; fResume
     516    je      .vmlaunch64_launch
     517
     518    ; Restore edi & esi.
     519    mov     rdi, qword [xSI + CPUMCTX.edi]
     520    mov     rsi, qword [xSI + CPUMCTX.esi]
     521
     522    vmresume
     523    jmp     .vmlaunch64_done;      ; Here if vmresume detected a failure.
     524
     525.vmlaunch64_launch:
     526    ; Restore rdi & rsi.
     527    mov     rdi, qword [xSI + CPUMCTX.edi]
     528    mov     rsi, qword [xSI + CPUMCTX.esi]
     529
     530    vmlaunch
     531    jmp     .vmlaunch64_done;      ; Here if vmlaunch detected a failure.
     532
     533ALIGNCODE(16)
     534.vmlaunch64_done:
     535    jc      near .vmxstart64_invalid_vmcs_ptr
     536    jz      near .vmxstart64_start_failed
     537
     538    RESTORESTATEVM64 a, b, c
    593539    mov     eax, VINF_SUCCESS
    594540
     
    598544    ret
    599545
    600 
    601546.vmxstart64_invalid_vmcs_ptr:
    602     ; Restore base and limit of the IDTR & GDTR.
    603 %ifndef VMX_SKIP_IDTR
    604     lidt    [xSP]
    605     add     xSP, xCB * 2
    606 %endif
    607 %ifndef VMX_SKIP_GDTR
    608     lgdt    [xSP]
    609     add     xSP, xCB * 2
    610 %endif
    611 
    612 %ifndef VMX_SKIP_TR
    613     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    614     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
    615     ; @todo get rid of sgdt
    616     pop     xBX         ; Saved TR
    617     sub     xSP, xCB * 2
    618     sgdt    [xSP]
    619     mov     xAX, xBX
    620     and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
    621     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    622     and     dword [xAX + 4], ~0200h                 ; Clear busy flag (2nd type2 bit).
    623     ltr     bx
    624     add     xSP, xCB * 2
    625 %endif
    626 
    627     pop     xAX         ; Saved LDTR
    628     lldt    ax          ; Don't bother with conditional restoration in the error case.
    629 
    630     pop     xSI         ; pCtx (needed in rsi by the macros below)
    631 
    632 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    633     ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
    634     LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    635     LOADHOSTMSR MSR_K8_SF_MASK
    636     LOADHOSTMSR MSR_K6_STAR
    637     LOADHOSTMSR MSR_K8_LSTAR
    638 %endif
    639 
    640 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    641     add     xSP, xCB    ; pCache
    642 %endif
    643 
    644     ; Restore segment registers.
    645     MYPOPSEGS xAX, ax
    646 
    647     ; Restore all general purpose host registers.
    648     MYPOPAD
     547    RESTORESTATEVM64 d, e, f
    649548    mov     eax, VERR_VMX_INVALID_VMCS_PTR_TO_START_VM
    650549    jmp     .vmstart64_end
    651550
    652551.vmxstart64_start_failed:
    653     ; Restore base and limit of the IDTR & GDTR.
    654 %ifndef VMX_SKIP_IDTR
    655     lidt    [xSP]
    656     add     xSP, xCB * 2
    657 %endif
    658 %ifndef VMX_SKIP_GDTR
    659     lgdt    [xSP]
    660     add     xSP, xCB * 2
    661 %endif
    662 
    663 %ifndef VMX_SKIP_TR
    664     ; Restore TSS selector; must mark it as not busy before using ltr (!)
    665     ; ASSUME that this is supposed to be 'BUSY'. (saves 20-30 ticks on the T42p).
    666     ; @todo get rid of sgdt
    667     pop     xBX         ; Saved TR
    668     sub     xSP, xCB * 2
    669     sgdt    [xSP]
    670     mov     xAX, xBX
    671     and     al, 0F8h                                ; Mask away TI and RPL bits, get descriptor offset.
    672     add     xAX, [xSP + 2]                          ; eax <- GDTR.address + descriptor offset.
    673     and     dword [xAX + 4], ~0200h                 ; Clear busy flag (2nd type2 bit).
    674     ltr     bx
    675     add     xSP, xCB * 2
    676 %endif
    677 
    678     pop     xAX         ; Saved LDTR
    679     lldt    ax          ; Don't bother with conditional restoration in the error case.
    680 
    681     pop     xSI         ; pCtx (needed in rsi by the macros below).
    682 
    683 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    684     ; Load the host MSRs. Don't bother saving the guest MSRs as vmlaunch/vmresume failed.
    685     LOADHOSTMSR MSR_K8_KERNEL_GS_BASE
    686     LOADHOSTMSR MSR_K8_SF_MASK
    687     LOADHOSTMSR MSR_K6_STAR
    688     LOADHOSTMSR MSR_K8_LSTAR
    689 %endif
    690 
    691 %ifdef VMX_USE_CACHED_VMCS_ACCESSES
    692     add     xSP, xCB    ; pCache
    693 %endif
    694 
    695     ; Restore segment registers.
    696     MYPOPSEGS xAX, ax
    697 
    698     ; Restore all general purpose host registers.
    699     MYPOPAD
     552    RESTORESTATEVM64 g, h, i
    700553    mov     eax, VERR_VMX_UNABLE_TO_START_VM
    701554    jmp     .vmstart64_end
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette