- Timestamp:
- Jun 6, 2008 9:28:02 AM (17 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCMInternal.h
r9452 r9453 194 194 /** Virtual address of the TSS page used for real mode emulation. */ 195 195 R0PTRTYPE(PVBOXTSS) pRealModeTSS; 196 197 /** Ring 0 handlers for VT-x. */198 DECLR0CALLBACKMEMBER(int, pfnStartVM,(RTHCUINT fResume, PCPUMCTX pCtx));199 196 200 197 /** Host CR4 value (set by ring-0 VMX init) */ -
trunk/src/VBox/VMM/VMMR0/HWACCMR0A.asm
r9452 r9453 178 178 179 179 ;/** 180 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode) 180 ; * Prepares for and executes VMLAUNCH 181 ; * 182 ; * @note identical to VMXResumeVM, except for the vmlaunch/vmresume opcode 181 183 ; * 182 184 ; * @returns VBox status code 183 ; * @param fResume vmlauch/vmresume 184 ; * @param pCtx Guest context 185 ; * @param pCtx Guest context 185 186 ; */ 186 BEGINPROC VMX R0StartVM32187 BEGINPROC VMXStartVM 187 188 push xBP 188 189 mov xBP, xSP … … 200 201 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ 201 202 vmwrite xAX, [xSP] 202 ;/* Note:assumes success... */203 ;/* @todo assumes success... */ 203 204 add xSP, xS 204 205 … … 224 225 %ifdef RT_ARCH_AMD64 225 226 %ifdef ASM_CALL64_GCC 226 ; fResume already in rdi 227 ; pCtx already in rsi 227 mov rsi, rdi ; pCtx 228 228 %else 229 mov rdi, rcx ; fResume 230 mov rsi, rdx ; pCtx 229 mov rsi, rcx ; pCtx 231 230 %endif 232 231 %else 233 mov edi, [ebp + 8] ; fResume 234 mov esi, [ebp + 12] ; pCtx 232 mov esi, [ebp + 8] ; pCtx 235 233 %endif 236 234 push xSI … … 260 258 mov eax, VMX_VMCS_HOST_RSP 261 259 vmwrite xAX, xSP 262 ;/* Note:assumes success... */260 ;/* @todo assumes success... */ 263 261 ;/* Don't mess with ESP anymore!! */ 264 262 … … 268 266 mov ecx, [xSI + CPUMCTX.ecx] 269 267 mov edx, [xSI + CPUMCTX.edx] 268 mov edi, [xSI + CPUMCTX.edi] 270 269 mov ebp, [xSI + CPUMCTX.ebp] 271 272 ; resume or start?273 cmp xDI, 0 ; fResume274 je .vmlauch_lauch275 276 ;/* Restore edi & esi. */277 mov edi, [xSI + CPUMCTX.edi]278 mov esi, [xSI + CPUMCTX.esi]279 280 vmresume281 jmp .vmlaunch_done; ;/* here if vmresume detected a failure. */282 283 .vmlauch_lauch:284 ;/* Restore edi & esi. */285 mov edi, [xSI + CPUMCTX.edi]286 270 mov esi, [xSI + CPUMCTX.esi] 287 271 … … 381 365 jmp .vmstart_end 382 366 383 ENDPROC VMX R0StartVM32384 385 %ifdef RT_ARCH_AMD64 367 ENDPROC VMXStartVM 368 369 386 370 ;/** 387 ; * Prepares for and executes VMLAUNCH/VMRESUME (32 bits guest mode) 371 ; * Prepares for and executes VMRESUME 372 ; * 373 ; * @note identical to VMXStartVM, except for the vmlaunch/vmresume opcode 388 374 ; * 389 375 ; * @returns VBox status code 390 ; * @param fResume vmlauch/vmresume 391 ; * @param pCtx Guest context 376 ; * @param pCtx Guest context 392 377 ; */ 393 BEGINPROC VMXR0StartVM64 394 ret 395 ENDPROC VMXR0StartVM64 396 378 BEGINPROC VMXResumeVM 379 push xBP 380 mov xBP, xSP 381 382 pushf 383 cli 384 385 ;/* First we have to save some final CPU context registers. */ 386 %ifdef RT_ARCH_AMD64 387 mov rax, qword .vmresume_done 388 push rax 389 %else 390 push .vmresume_done 391 %endif 392 mov eax, VMX_VMCS_HOST_RIP ;/* return address (too difficult to continue after VMLAUNCH?) */ 393 vmwrite xAX, [xSP] 394 ;/* @todo assumes success... */ 395 add xSP, xS 396 397 ;/* Manual save and restore: 398 ; * - General purpose registers except RIP, RSP 399 ; * 400 ; * Trashed: 401 ; * - CR2 (we don't care) 402 ; * - LDTR (reset to 0) 403 ; * - DRx (presumably not changed at all) 404 ; * - DR7 (reset to 0x400) 405 ; * - EFLAGS (reset to RT_BIT(1); not relevant) 406 ; * 407 ; */ 408 409 ;/* Save all general purpose host registers. */ 410 MYPUSHAD 411 412 ;/* Save segment registers */ 413 MYPUSHSEGS xAX, ax 414 415 ;/* Save the Guest CPU context pointer. */ 416 %ifdef RT_ARCH_AMD64 417 %ifdef ASM_CALL64_GCC 418 mov rsi, rdi ; pCtx 419 %else 420 mov rsi, rcx ; pCtx 421 %endif 422 %else 423 mov esi, [ebp + 8] ; pCtx 424 %endif 425 push xSI 426 427 ; Save LDTR 428 xor eax, eax 429 sldt ax 430 push xAX 431 432 ; VMX only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 433 sub xSP, xS*2 434 sgdt [xSP] 435 436 sub xSP, xS*2 437 sidt [xSP] 438 439 %ifdef VBOX_WITH_DR6_EXPERIMENT 440 ; Restore DR6 - experiment, not safe! 441 mov xBX, [xSI + CPUMCTX.dr6] 442 mov dr6, xBX 443 %endif 444 445 ; Restore CR2 446 mov xBX, [xSI + CPUMCTX.cr2] 447 mov cr2, xBX 448 449 mov eax, VMX_VMCS_HOST_RSP 450 vmwrite xAX, xSP 451 ;/* @todo assumes success... */ 452 ;/* Don't mess with ESP anymore!! */ 453 454 ;/* Restore Guest's general purpose registers. */ 455 mov eax, [xSI + CPUMCTX.eax] 456 mov ebx, [xSI + CPUMCTX.ebx] 457 mov ecx, [xSI + CPUMCTX.ecx] 458 mov edx, [xSI + CPUMCTX.edx] 459 mov edi, [xSI + CPUMCTX.edi] 460 mov ebp, [xSI + CPUMCTX.ebp] 461 mov esi, [xSI + CPUMCTX.esi] 462 463 vmresume 464 jmp .vmresume_done; ;/* here if vmresume detected a failure. */ 465 466 ALIGNCODE(16) 467 .vmresume_done: 468 jc near .vmxresume_invalid_vmxon_ptr 469 jz near .vmxresume_start_failed 470 471 ; Restore base and limit of the IDTR & GDTR 472 lidt [xSP] 473 add xSP, xS*2 474 lgdt [xSP] 475 add xSP, xS*2 476 477 push xDI 478 mov xDI, [xSP + xS * 2] ; pCtx 479 480 mov [ss:xDI + CPUMCTX.eax], eax 481 mov [ss:xDI + CPUMCTX.ebx], ebx 482 mov [ss:xDI + CPUMCTX.ecx], ecx 483 mov [ss:xDI + CPUMCTX.edx], edx 484 mov [ss:xDI + CPUMCTX.esi], esi 485 mov [ss:xDI + CPUMCTX.ebp], ebp 486 %ifdef RT_ARCH_AMD64 487 pop xAX ; the guest edi we pushed above 488 mov dword [ss:xDI + CPUMCTX.edi], eax 489 %else 490 pop dword [ss:xDI + CPUMCTX.edi] ; the guest edi we pushed above 491 %endif 492 493 %ifdef VBOX_WITH_DR6_EXPERIMENT 494 ; Save DR6 - experiment, not safe! 495 mov xAX, dr6 496 mov [ss:xDI + CPUMCTX.dr6], xAX 497 %endif 498 499 pop xAX ; saved LDTR 500 lldt ax 501 502 add xSP, xS ; pCtx 503 504 ; Restore segment registers 505 MYPOPSEGS xAX, ax 506 507 ; Restore general purpose registers 508 MYPOPAD 509 510 mov eax, VINF_SUCCESS 511 512 .vmresume_end: 513 popf 514 pop xBP 515 ret 516 517 .vmxresume_invalid_vmxon_ptr: 518 ; Restore base and limit of the IDTR & GDTR 519 lidt [xSP] 520 add xSP, xS*2 521 lgdt [xSP] 522 add xSP, xS*2 523 524 pop xAX ; saved LDTR 525 lldt ax 526 527 add xSP, xS ; pCtx 528 529 ; Restore segment registers 530 MYPOPSEGS xAX, ax 531 532 ; Restore all general purpose host registers. 533 MYPOPAD 534 mov eax, VERR_VMX_INVALID_VMXON_PTR 535 jmp .vmresume_end 536 537 .vmxresume_start_failed: 538 ; Restore base and limit of the IDTR & GDTR 539 lidt [xSP] 540 add xSP, xS*2 541 lgdt [xSP] 542 add xSP, xS*2 543 544 pop xAX ; saved LDTR 545 lldt ax 546 547 add xSP, xS ; pCtx 548 549 ; Restore segment registers 550 MYPOPSEGS xAX, ax 551 552 ; Restore all general purpose host registers. 553 MYPOPAD 554 mov eax, VERR_VMX_UNABLE_TO_RESUME_VM 555 jmp .vmresume_end 556 557 ENDPROC VMXResumeVM 558 559 560 %ifdef RT_ARCH_AMD64 397 561 ;/** 398 562 ; * Executes VMWRITE 399 563 ; * 400 564 ; * @returns VBox status code 401 ; * @param idxField x86: [ebp + 08h] msc: rcx gcc: rdi VMCS index565 ; * @param idxField x86: [ebp + 08h] msc: rcx gcc: edi VMCS index 402 566 ; * @param pData x86: [ebp + 0ch] msc: rdx gcc: rsi VM field value 403 567 ; */ -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r9452 r9453 642 642 case PGMMODE_PROTECTED: /* Protected mode, no paging. */ 643 643 AssertFailed(); 644 return VERR_PGM_UNSUPPORTED_ SHADOW_PAGING_MODE;644 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 645 645 646 646 case PGMMODE_32_BIT: /* 32-bit paging. */ … … 659 659 #else 660 660 AssertFailed(); 661 return VERR_PGM_UNSUPPORTED_ SHADOW_PAGING_MODE;661 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 662 662 #endif 663 663 664 664 default: /* shut up gcc */ 665 665 AssertFailed(); 666 return VERR_PGM_UNSUPPORTED_ SHADOW_PAGING_MODE;666 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 667 667 } 668 668 } -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r9452 r9453 841 841 #else 842 842 AssertFailed(); 843 return VERR_PGM_UNSUPPORTED_ SHADOW_PAGING_MODE;843 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 844 844 #endif 845 845 default: /* shut up gcc */ 846 846 AssertFailed(); 847 return VERR_PGM_UNSUPPORTED_ SHADOW_PAGING_MODE;847 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE; 848 848 } 849 849 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ … … 960 960 /* 64 bits guest mode? */ 961 961 if (pCtx->msrEFER & MSR_K6_EFER_LMA) 962 {963 962 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; 964 #ifndef VBOX_WITH_64_BITS_GUESTS965 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;966 #else967 pVM->hwaccm.s.vmx.pfnStartVM = VMXR0StartVM64;968 #endif969 }970 else971 {972 pVM->hwaccm.s.vmx.pfnStartVM = VMXR0StartVM32;973 }974 963 975 964 /* Done. */ … … 1169 1158 /* All done! Let's start VM execution. */ 1170 1159 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x); 1171 rc = pVM->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx); 1160 if (pVM->hwaccm.s.vmx.fResumeVM == false) 1161 rc = VMXStartVM(pCtx); 1162 else 1163 rc = VMXResumeVM(pCtx); 1172 1164 1173 1165 /* In case we execute a goto ResumeExecution later on. */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r9452 r9453 162 162 163 163 164 165 /**166 * Prepares for and executes VMLAUNCH (32 bits guest mode)167 *168 * @returns VBox status code169 * @param fResume vmlauch/vmresume170 * @param pCtx Guest context171 */172 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx);173 174 /**175 * Prepares for and executes VMLAUNCH (64 bits guest mode)176 *177 * @returns VBox status code178 * @param fResume vmlauch/vmresume179 * @param pCtx Guest context180 */181 DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx);182 183 164 #endif /* IN_RING0 */ 184 165
Note:
See TracChangeset
for help on using the changeset viewer.