- Timestamp:
- Dec 2, 2008 1:30:52 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm
r14899 r14900 63 63 ; @param 2 16-bit regsiter name for \a 1. 64 64 65 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)65 ; Load the corresponding guest MSR (trashes rdx & rcx) 66 66 %macro LOADGUESTMSR 2 67 mov rcx, %168 rdmsr69 push rdx70 push rax71 67 mov edx, dword [rsi + %2 + 4] 72 68 mov eax, dword [rsi + %2] … … 81 77 mov dword [rsi + %2], eax 82 78 mov dword [rsi + %2 + 4], edx 83 pop rax84 pop rdx85 wrmsr86 %endmacro87 88 ; Load the corresponding host MSR (trashes rdx & rcx)89 %macro LOADHOSTMSR 190 mov rcx, %191 pop rax92 pop rdx93 wrmsr94 79 %endmacro 95 80 … … 185 170 ; * 186 171 ; * @returns VBox status code 187 ; * @param fResume vmlauch/vmresume188 172 ; * @param pCtx Guest context 189 173 ; */ … … 219 203 220 204 ;/* Save the Guest CPU context pointer. */ 221 %ifdef ASM_CALL64_GCC222 ; fResume already in rdi223 205 ; pCtx already in rsi 224 %else225 mov rdi, rcx ; fResume226 mov rsi, rdx ; pCtx227 %endif228 206 229 207 ;/* Save segment registers */ … … 233 211 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs 234 212 ;; @todo use the automatic load feature for MSRs 235 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 236 %if 0 ; not supported on Intel CPUs 237 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR 238 %endif 239 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 240 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 213 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 214 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 215 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 241 216 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 242 217 … … 280 255 mov r15, qword [rsi + CPUMCTX.r15] 281 256 282 ; resume or start?283 cmp rdi, 0 ; fResume284 je .vmlauch64_lauch285 286 ;/* Restore edi & esi. */287 mov rdi, qword [rsi + CPUMCTX.edi]288 mov rsi, qword [rsi + CPUMCTX.esi]289 290 vmresume291 jmp .vmlaunch64_done; ;/* here if vmresume detected a failure. */292 293 .vmlauch64_lauch:294 257 ;/* Restore rdi & rsi. */ 295 258 mov rdi, qword [rsi + CPUMCTX.edi] … … 301 264 ALIGNCODE(16) 302 265 .vmlaunch64_done: 303 jc near .vm 8tart64_invalid_vmxon_ptr304 jz near .vm 8tart64_start_failed266 jc near .vmstart64_invalid_vmxon_ptr 267 jz near .vmstart64_start_failed 305 268 306 269 ; Restore base and limit of the IDTR & GDTR … … 339 302 ;; @todo use the automatic load feature for MSRs 340 303 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 341 LOADHOSTMSR MSR_K8_SF_MASK342 LOADHOSTMSR MSR_K6_STAR343 %if 0 ; not supported on Intel CPUs344 LOADHOSTMSR MSR_K8_CSTAR345 %endif346 LOADHOSTMSR MSR_K8_LSTAR347 304 348 305 ; Restore segment registers … … 360 317 361 318 362 .vm 8tart64_invalid_vmxon_ptr:319 .vmstart64_invalid_vmxon_ptr: 363 320 ; Restore base and limit of the IDTR & GDTR 364 321 lidt [rsp] … … 375 332 ;; @todo use the automatic load feature for MSRs 376 333 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 377 LOADHOSTMSR MSR_K8_SF_MASK378 LOADHOSTMSR MSR_K6_STAR379 %if 0 ; not supported on Intel CPUs380 LOADHOSTMSR MSR_K8_CSTAR381 %endif382 LOADHOSTMSR MSR_K8_LSTAR383 334 384 335 ; Restore segment registers … … 390 341 jmp .vmstart64_end 391 342 392 .vm 8tart64_start_failed:343 .vmstart64_start_failed: 393 344 ; Restore base and limit of the IDTR & GDTR 394 345 lidt [rsp] … … 405 356 ;; @todo use the automatic load feature for MSRs 406 357 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 407 LOADHOSTMSR MSR_K8_SF_MASK408 LOADHOSTMSR MSR_K6_STAR409 %if 0 ; not supported on Intel CPUs410 LOADHOSTMSR MSR_K8_CSTAR411 %endif412 LOADHOSTMSR MSR_K8_LSTAR413 358 414 359 ; Restore segment registers … … 542 487 ENDPROC SVMGCVMRun64 543 488 489 ;/** 490 ; * Saves the guest FPU context 491 ; * 492 ; * @returns VBox status code 493 ; * @param pCtx Guest context [rsi] 494 ; */ 544 495 BEGINPROC HWACCMSaveGuestFPU64 496 mov rax, cr0 497 mov rcx, rax ; save old CR0 498 and rax, ~(X86_CR0_TS | X86_CR0_EM) 499 mov cr0, rax 500 501 fxsave [rsi + CPUMCTX.fpu] 502 503 mov cr0, rcx ; and restore old CR0 again 504 505 mov eax, VINF_SUCCESS 545 506 ret 546 507 ENDPROC HWACCMSaveGuestFPU64 547 508 509 ;/** 510 ; * Saves the guest debug context (DR0-3, DR6) 511 ; * 512 ; * @returns VBox status code 513 ; * @param pCtx Guest context [rsi] 514 ; */ 548 515 BEGINPROC HWACCMSaveGuestDebug64 516 mov rax, dr0 517 mov qword [rsi + CPUMCTX.dr + 0*8], rax 518 mov rax, dr1 519 mov qword [rsi + CPUMCTX.dr + 1*8], rax 520 mov rax, dr2 521 mov qword [rsi + CPUMCTX.dr + 2*8], rax 522 mov rax, dr3 523 mov qword [rsi + CPUMCTX.dr + 3*8], rax 524 mov rax, dr6 525 mov qword [rsi + CPUMCTX.dr + 6*8], rax 526 mov eax, VINF_SUCCESS 549 527 ret 550 528 ENDPROC HWACCMSaveGuestDebug64 551 529 530 ;/** 531 ; * Dummy callback handler 532 ; * 533 ; * @returns VBox status code 534 ; * @param pCtx Guest context [rsi] 535 ; */ 552 536 BEGINPROC HWACCMTestSwitcher64 537 mov eax, VINF_SUCCESS 553 538 ret 554 539 ENDPROC HWACCMTestSwitcher64 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r14785 r14900 430 430 lss esp, [edx + CPUMCPU.Host.esp] 431 431 432 ; Restore FPU if guest has used it.433 ; Using fxrstor should ensure that we're not causing unwanted exception on the host.434 mov esi, [edx + CPUMCPU.fUseFlags] ; esi == use flags.435 test esi, CPUM_USED_FPU436 jz near gth_fpu_no437 mov ecx, cr0438 and ecx, ~(X86_CR0_TS | X86_CR0_EM)439 mov cr0, ecx440 441 fxsave [edx + CPUMCPU.Guest.fpu]442 fxrstor [edx + CPUMCPU.Host.fpu]443 jmp near gth_fpu_no444 445 ALIGNCODE(16)446 gth_fpu_no:447 448 432 ; Control registers. 449 ; Would've liked to have these higher up in case of crashes, but450 ; the fpu stuff must be done before we restore cr0.451 433 mov ecx, [edx + CPUMCPU.Host.cr4] 452 434 mov cr4, ecx
Note:
See TracChangeset
for help on using the changeset viewer.