VirtualBox

Changeset 20538 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Jun 13, 2009 9:15:27 PM (16 years ago)
Author:
vboxsync
Message:

CPUMR0A.asm: Split out the currently unused code into CPUMR0UnusedA..asm (easier to handle now).

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm

    r20536 r20538  
    281281%endif
    282282ENDPROC   cpumR0RestoreHostFPUState
    283 
    284 
    285 ;;
    286 ; Restores the guest's FPU/XMM state
    287 ;
    288 ; @param    pCtx  x86:[esp+4] GCC:rdi MSC:rcx     CPUMCTX pointer
    289 ;
    290 ; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
    291 ;
    292 align 16
    293 BEGINPROC   cpumR0LoadFPU
    294 %ifdef RT_ARCH_AMD64
    295  %ifdef RT_OS_WINDOWS
    296     mov     xDX, rcx
    297  %else
    298     mov     xDX, rdi
    299  %endif
    300 %else
    301     mov     xDX, dword [esp + 4]
    302 %endif
    303 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    304     cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
    305     jz      .legacy_mode
    306     db      0xea                        ; jmp far .sixtyfourbit_mode
    307     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    308 .legacy_mode:
    309 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    310 
    311     fxrstor [xDX + CPUMCTX.fpu]
    312 .done:
    313     ret
    314 
    315 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
    316 ALIGNCODE(16)
    317 BITS 64
    318 .sixtyfourbit_mode:
    319     and     edx, 0ffffffffh
    320     fxrstor [rdx + CPUMCTX.fpu]
    321     jmp far [.fpret wrt rip]
    322 .fpret:                                 ; 16:32 Pointer to .the_end.
    323     dd      .done, NAME(SUPR0AbsKernelCS)
    324 BITS 32
    325 %endif
    326 ENDPROC     cpumR0LoadFPU
    327 
    328 
    329 ;;
    330 ; Restores the guest's FPU/XMM state
    331 ;
    332 ; @param    pCtx  x86:[esp+4] GCC:rdi MSC:rcx     CPUMCTX pointer
    333 ;
    334 ; @remarks Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
    335 ;
    336 align 16
    337 BEGINPROC   cpumR0SaveFPU
    338 %ifdef RT_ARCH_AMD64
    339  %ifdef RT_OS_WINDOWS
    340     mov     xDX, rcx
    341  %else
    342     mov     xDX, rdi
    343  %endif
    344 %else
    345     mov     xDX, dword [esp + 4]
    346 %endif
    347 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    348     cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
    349     jz      .legacy_mode
    350     db      0xea                        ; jmp far .sixtyfourbit_mode
    351     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    352 .legacy_mode:
    353 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    354     fxsave  [xDX + CPUMCTX.fpu]
    355 .done:
    356     ret
    357 
    358 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
    359 ALIGNCODE(16)
    360 BITS 64
    361 .sixtyfourbit_mode:
    362     and     edx, 0ffffffffh
    363     fxsave  [rdx + CPUMCTX.fpu]
    364     jmp far [.fpret wrt rip]
    365 .fpret:                                 ; 16:32 Pointer to .the_end.
    366     dd      .done, NAME(SUPR0AbsKernelCS)
    367 BITS 32
    368 %endif
    369 ENDPROC cpumR0SaveFPU
    370 
    371 
    372 ;;
    373 ; Restores the guest's XMM state
    374 ;
    375 ; @param    pCtx  x86:[esp+4] GCC:rdi MSC:rcx     CPUMCTX pointer
    376 ;
    377 ; @remarks  Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
    378 ;
    379 align 16
    380 BEGINPROC   cpumR0LoadXMM
    381 %ifdef RT_ARCH_AMD64
    382  %ifdef RT_OS_WINDOWS
    383     mov     xDX, rcx
    384  %else
    385     mov     xDX, rdi
    386  %endif
    387 %else
    388     mov     xDX, dword [esp + 4]
    389 %endif
    390 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    391     cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
    392     jz      .legacy_mode
    393     db      0xea                        ; jmp far .sixtyfourbit_mode
    394     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    395 .legacy_mode:
    396 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    397 
    398     movdqa  xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
    399     movdqa  xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
    400     movdqa  xmm2, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
    401     movdqa  xmm3, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
    402     movdqa  xmm4, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
    403     movdqa  xmm5, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
    404     movdqa  xmm6, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
    405     movdqa  xmm7, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
    406 
    407 %ifdef RT_ARCH_AMD64
    408     test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    409     jz .done
    410 
    411     movdqa  xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
    412     movdqa  xmm9, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
    413     movdqa  xmm10, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
    414     movdqa  xmm11, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
    415     movdqa  xmm12, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
    416     movdqa  xmm13, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
    417     movdqa  xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
    418     movdqa  xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
    419 %endif
    420 .done:
    421     ret
    422 
    423 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
    424 ALIGNCODE(16)
    425 BITS 64
    426 .sixtyfourbit_mode:
    427     and     edx, 0ffffffffh
    428 
    429     movdqa  xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0]
    430     movdqa  xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1]
    431     movdqa  xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2]
    432     movdqa  xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3]
    433     movdqa  xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4]
    434     movdqa  xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5]
    435     movdqa  xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6]
    436     movdqa  xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7]
    437 
    438     test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    439     jz .sixtyfourbit_done
    440 
    441     movdqa  xmm8,  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8]
    442     movdqa  xmm9,  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9]
    443     movdqa  xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10]
    444     movdqa  xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11]
    445     movdqa  xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12]
    446     movdqa  xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13]
    447     movdqa  xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14]
    448     movdqa  xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15]
    449 .sixtyfourbit_done:
    450     jmp far [.fpret wrt rip]
    451 .fpret:                                 ; 16:32 Pointer to .the_end.
    452     dd      .done, NAME(SUPR0AbsKernelCS)
    453 BITS 32
    454 %endif
    455 ENDPROC     cpumR0LoadXMM
    456 
    457 
    458 ;;
    459 ; Restores the guest's XMM state
    460 ;
    461 ; @param    pCtx  x86:[esp+4] GCC:rdi MSC:rcx     CPUMCTX pointer
    462 ;
    463 ; @remarks  Used by the disabled CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE code.
    464 ;
    465 align 16
    466 BEGINPROC   cpumR0SaveXMM
    467 %ifdef RT_ARCH_AMD64
    468  %ifdef RT_OS_WINDOWS
    469     mov     xDX, rcx
    470  %else
    471     mov     xDX, rdi
    472  %endif
    473 %else
    474     mov     xDX, dword [esp + 4]
    475 %endif
    476 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    477     cmp     byte [NAME(g_fCPUMIs64bitHost)], 0
    478     jz      .legacy_mode
    479     db      0xea                        ; jmp far .sixtyfourbit_mode
    480     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    481 .legacy_mode:
    482 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    483 
    484     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
    485     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
    486     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
    487     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
    488     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
    489     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
    490     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
    491     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
    492 
    493 %ifdef RT_ARCH_AMD64
    494     test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    495     jz .done
    496 
    497     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
    498     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
    499     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
    500     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
    501     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
    502     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
    503     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
    504     movdqa  [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
    505 
    506 %endif
    507 .done:
    508     ret
    509 
    510 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
    511 ALIGNCODE(16)
    512 BITS 64
    513 .sixtyfourbit_mode:
    514     and     edx, 0ffffffffh
    515 
    516     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0
    517     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1
    518     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2
    519     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3
    520     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4
    521     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5
    522     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6
    523     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7
    524 
    525     test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA
    526     jz .sixtyfourbit_done
    527 
    528     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8
    529     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9
    530     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10
    531     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11
    532     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12
    533     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13
    534     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14
    535     movdqa  [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15
    536 
    537 .sixtyfourbit_done:
    538     jmp far [.fpret wrt rip]
    539 .fpret:                                 ; 16:32 Pointer to .the_end.
    540     dd      .done, NAME(SUPR0AbsKernelCS)
    541 BITS 32
    542 %endif
    543 ENDPROC     cpumR0SaveXMM
    544 
    545 
    546 ;;
    547 ; Set the FPU control word; clearing exceptions first
    548 ;
    549 ; @param  u16FCW    x86:[esp+4] GCC:rdi MSC:rcx     New FPU control word
    550 align 16
    551 BEGINPROC cpumR0SetFCW
    552 %ifdef RT_ARCH_AMD64
    553  %ifdef RT_OS_WINDOWS
    554     mov     xAX, rcx
    555  %else
    556     mov     xAX, rdi
    557  %endif
    558 %else
    559     mov     xAX, dword [esp + 4]
    560 %endif
    561     fnclex
    562     push    xAX
    563     fldcw   [xSP]
    564     pop     xAX
    565     ret
    566 ENDPROC   cpumR0SetFCW
    567 
    568 
    569 ;;
    570 ; Get the FPU control word
    571 ;
    572 align 16
    573 BEGINPROC cpumR0GetFCW
    574     fnstcw  [xSP - 8]
    575     mov     ax, word [xSP - 8]
    576     ret
    577 ENDPROC   cpumR0GetFCW
    578 
    579 
    580 ;;
    581 ; Set the MXCSR;
    582 ;
    583 ; @param  u32MXCSR    x86:[esp+4] GCC:rdi MSC:rcx     New MXCSR
    584 align 16
    585 BEGINPROC cpumR0SetMXCSR
    586 %ifdef RT_ARCH_AMD64
    587  %ifdef RT_OS_WINDOWS
    588     mov     xAX, rcx
    589  %else
    590     mov     xAX, rdi
    591  %endif
    592 %else
    593     mov     xAX, dword [esp + 4]
    594 %endif
    595     push    xAX
    596     ldmxcsr [xSP]
    597     pop     xAX
    598     ret
    599 ENDPROC   cpumR0SetMXCSR
    600 
    601 
    602 ;;
    603 ; Get the MXCSR
    604 ;
    605 align 16
    606 BEGINPROC cpumR0GetMXCSR
    607     stmxcsr [xSP - 8]
    608     mov     eax, dword [xSP - 8]
    609     ret
    610 ENDPROC   cpumR0GetMXCSR
    611283
    612284
     
    726398
    727399%endif ; VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0
     400
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette