Changeset 57429 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Aug 18, 2015 1:35:18 PM (9 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r57358 r57429 1493 1493 } 1494 1494 1495 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1495 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 1496 1496 1497 1497 /** … … 1552 1552 } 1553 1553 1554 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)*/1554 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */ 1555 1555 1556 1556 /** -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r57270 r57429 46 46 ;* Defined Constants And Macros * 47 47 ;******************************************************************************* 48 %ifdef RT_ARCH_AMD64 49 %define MAYBE_64_BIT 50 %endif 51 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 52 %define MAYBE_64_BIT 53 %else 54 %ifdef RT_OS_DARWIN 55 %ifdef RT_ARCH_AMD64 56 ;; 57 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 58 ; risk loading a stale LDT value or something invalid. 59 %define HM_64_BIT_USE_NULL_SEL 60 %endif 48 %ifdef RT_OS_DARWIN 49 %ifdef RT_ARCH_AMD64 50 ;; 51 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 52 ; risk loading a stale LDT value or something invalid. 53 %define HM_64_BIT_USE_NULL_SEL 61 54 %endif 62 55 %endif … … 220 213 ;* External Symbols * 221 214 ;******************************************************************************* 222 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL223 extern NAME(SUPR0AbsIs64bit)224 extern NAME(SUPR0Abs64bitKernelCS)225 extern NAME(SUPR0Abs64bitKernelSS)226 extern NAME(SUPR0Abs64bitKernelDS)227 extern NAME(SUPR0AbsKernelCS)228 %endif229 215 %ifdef VBOX_WITH_KERNEL_USING_XMM 230 216 extern NAME(CPUMIsGuestFPUStateActive) 231 %endif232 233 234 ;*******************************************************************************235 ;* Global Variables *236 ;*******************************************************************************237 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL238 BEGINDATA239 ;;240 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without241 ; needing to clobber a register. (This trick doesn't quite work for PE btw.242 ; but that's not relevant atm.)243 GLOBALNAME g_fVMXIs64bitHost244 dd NAME(SUPR0AbsIs64bit)245 217 %endif 246 218 … … 395 367 mov ecx, [esp + 4] ; idxField 396 368 lea edx, [esp + 8] ; &u64Data 397 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL398 cmp byte [NAME(g_fVMXIs64bitHost)], 0399 jz .legacy_mode400 db 0xea ; jmp far .sixtyfourbit_mode401 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)402 .legacy_mode:403 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL404 369 vmwrite ecx, [edx] ; low dword 405 370 jz .done … … 418 383 .the_end: 419 384 ret 420 421 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL422 ALIGNCODE(16)423 BITS 64424 .sixtyfourbit_mode:425 and edx, 0ffffffffh426 and ecx, 0ffffffffh427 xor eax, eax428 vmwrite rcx, [rdx]429 mov r8d, VERR_VMX_INVALID_VMCS_FIELD430 cmovz eax, r8d431 mov r9d, VERR_VMX_INVALID_VMCS_PTR432 cmovc eax, r9d433 jmp far [.fpret wrt rip]434 .fpret: ; 16:32 Pointer to .the_end.435 dd .the_end, NAME(SUPR0AbsKernelCS)436 BITS 32437 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL438 385 ENDPROC VMXWriteVmcs64 439 386 … … 462 409 mov ecx, [esp + 4] ; idxField 463 410 mov edx, [esp + 8] ; pData 464 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL465 cmp byte [NAME(g_fVMXIs64bitHost)], 0466 jz .legacy_mode467 db 0xea ; jmp far .sixtyfourbit_mode468 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)469 .legacy_mode:470 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL471 411 vmread [edx], ecx ; low dword 472 412 jz .done … … 485 425 .the_end: 486 426 ret 487 488 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL489 ALIGNCODE(16)490 BITS 64491 .sixtyfourbit_mode:492 and edx, 0ffffffffh493 and ecx, 0ffffffffh494 xor eax, eax495 vmread [rdx], rcx496 mov r8d, VERR_VMX_INVALID_VMCS_FIELD497 cmovz eax, r8d498 mov r9d, VERR_VMX_INVALID_VMCS_PTR499 cmovc eax, r9d500 jmp far [.fpret wrt rip]501 .fpret: ; 16:32 Pointer to .the_end.502 dd .the_end, NAME(SUPR0AbsKernelCS)503 BITS 32504 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL505 427 ENDPROC VMXReadVmcs64 506 428 … … 531 453 mov ecx, [esp + 4] ; idxField 532 454 mov edx, [esp + 8] ; pu32Data 533 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL534 cmp byte [NAME(g_fVMXIs64bitHost)], 0535 jz .legacy_mode536 db 0xea ; jmp far .sixtyfourbit_mode537 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)538 .legacy_mode:539 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL540 455 xor eax, eax 541 456 vmread [edx], ecx … … 549 464 .the_end: 550 465 ret 551 552 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL553 ALIGNCODE(16)554 BITS 64555 .sixtyfourbit_mode:556 and edx, 0ffffffffh557 and ecx, 0ffffffffh558 xor eax, eax559 vmread r10, rcx560 mov [rdx], r10d561 mov r8d, VERR_VMX_INVALID_VMCS_FIELD562 cmovz eax, r8d563 mov r9d, VERR_VMX_INVALID_VMCS_PTR564 cmovc eax, r9d565 jmp far [.fpret wrt rip]566 .fpret: ; 16:32 Pointer to .the_end.567 dd .the_end, NAME(SUPR0AbsKernelCS)568 BITS 32569 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL570 466 ENDPROC VMXReadVmcs32 571 467 … … 596 492 mov ecx, [esp + 4] ; idxField 597 493 mov edx, [esp + 8] ; u32Data 598 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL599 cmp byte [NAME(g_fVMXIs64bitHost)], 0600 jz .legacy_mode601 db 0xea ; jmp far .sixtyfourbit_mode602 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)603 .legacy_mode:604 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL605 494 xor eax, eax 606 495 vmwrite ecx, edx … … 614 503 .the_end: 615 504 ret 616 617 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL618 ALIGNCODE(16)619 BITS 64620 .sixtyfourbit_mode:621 and edx, 0ffffffffh622 and ecx, 0ffffffffh623 xor eax, eax624 vmwrite rcx, rdx625 mov r8d, VERR_VMX_INVALID_VMCS_FIELD626 cmovz eax, r8d627 mov r9d, VERR_VMX_INVALID_VMCS_PTR628 cmovc eax, r9d629 jmp far [.fpret wrt rip]630 .fpret: ; 16:32 Pointer to .the_end.631 dd .the_end, NAME(SUPR0AbsKernelCS)632 BITS 32633 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL634 505 ENDPROC VMXWriteVmcs32 635 506 … … 652 523 vmxon [rsp] 653 524 %else ; RT_ARCH_X86 654 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL655 cmp byte [NAME(g_fVMXIs64bitHost)], 0656 jz .legacy_mode657 db 0xea ; jmp far .sixtyfourbit_mode658 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)659 .legacy_mode:660 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL661 525 xor eax, eax 662 526 vmxon [esp + 4] … … 675 539 %endif 676 540 ret 677 678 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL679 ALIGNCODE(16)680 BITS 64681 .sixtyfourbit_mode:682 lea rdx, [rsp + 4] ; &HCPhysVMXOn.683 and edx, 0ffffffffh684 xor eax, eax685 vmxon [rdx]686 mov r8d, VERR_VMX_VMXON_FAILED687 cmovz eax, r8d688 mov r9d, VERR_VMX_INVALID_VMXON_PTR689 cmovc eax, r9d690 jmp far [.fpret wrt rip]691 .fpret: ; 16:32 Pointer to .the_end.692 dd .the_end, NAME(SUPR0AbsKernelCS)693 BITS 32694 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL695 541 ENDPROC VMXEnable 696 542 … … 701 547 ;DECLASM(void) VMXDisable(void); 702 548 BEGINPROC VMXDisable 703 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL704 cmp byte [NAME(g_fVMXIs64bitHost)], 0705 jz .legacy_mode706 db 0xea ; jmp far .sixtyfourbit_mode707 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)708 .legacy_mode:709 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL710 549 vmxoff 711 550 .the_end: 712 551 ret 713 714 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL715 ALIGNCODE(16)716 BITS 64717 .sixtyfourbit_mode:718 vmxoff719 jmp far [.fpret wrt rip]720 .fpret: ; 16:32 Pointer to .the_end.721 dd .the_end, NAME(SUPR0AbsKernelCS)722 BITS 32723 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL724 552 ENDPROC VMXDisable 725 553 … … 743 571 vmclear [rsp] 744 572 %else ; RT_ARCH_X86 745 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL746 cmp byte [NAME(g_fVMXIs64bitHost)], 0747 jz .legacy_mode748 db 0xea ; jmp far .sixtyfourbit_mode749 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)750 .legacy_mode:751 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL752 573 xor eax, eax 753 574 vmclear [esp + 4] … … 760 581 %endif 761 582 ret 762 763 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL764 ALIGNCODE(16)765 BITS 64766 .sixtyfourbit_mode:767 lea rdx, [rsp + 4] ; &HCPhysVmcs768 and edx, 0ffffffffh769 xor eax, eax770 vmclear [rdx]771 mov r9d, VERR_VMX_INVALID_VMCS_PTR772 cmovc eax, r9d773 jmp far [.fpret wrt rip]774 .fpret: ; 16:32 Pointer to .the_end.775 dd .the_end, NAME(SUPR0AbsKernelCS)776 BITS 32777 %endif778 583 ENDPROC VMXClearVmcs 779 584 … … 797 602 vmptrld [rsp] 798 603 %else 799 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL800 cmp byte [NAME(g_fVMXIs64bitHost)], 0801 jz .legacy_mode802 db 0xea ; jmp far .sixtyfourbit_mode803 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)804 .legacy_mode:805 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL806 604 xor eax, eax 807 605 vmptrld [esp + 4] … … 814 612 %endif 815 613 ret 816 817 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL818 ALIGNCODE(16)819 BITS 64820 .sixtyfourbit_mode:821 lea rdx, [rsp + 4] ; &HCPhysVmcs822 and edx, 0ffffffffh823 xor eax, eax824 vmptrld [rdx]825 mov r9d, VERR_VMX_INVALID_VMCS_PTR826 cmovc eax, r9d827 jmp far [.fpret wrt rip]828 .fpret: ; 16:32 Pointer to .the_end.829 dd .the_end, NAME(SUPR0AbsKernelCS)830 BITS 32831 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL832 614 ENDPROC VMXActivateVmcs 833 615 … … 852 634 %endif 853 635 %else 854 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL855 cmp byte [NAME(g_fVMXIs64bitHost)], 0856 jz .legacy_mode857 db 0xea ; jmp far .sixtyfourbit_mode858 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)859 .legacy_mode:860 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL861 636 vmptrst qword [esp+04h] 862 637 %endif … … 864 639 .the_end: 865 640 ret 866 867 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL868 ALIGNCODE(16)869 BITS 64870 .sixtyfourbit_mode:871 lea rdx, [rsp + 4] ; &HCPhysVmcs872 and edx, 0ffffffffh873 vmptrst qword [rdx]874 xor eax, eax875 jmp far [.fpret wrt rip]876 .fpret: ; 16:32 Pointer to .the_end.877 dd .the_end, NAME(SUPR0AbsKernelCS)878 BITS 32879 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL880 641 %endif 881 642 ENDPROC VMXGetActivatedVmcs … … 901 662 %endif 902 663 %else 903 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL904 cmp byte [NAME(g_fVMXIs64bitHost)], 0905 jz .legacy_mode906 db 0xea ; jmp far .sixtyfourbit_mode907 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)908 .legacy_mode:909 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL910 664 mov ecx, [esp + 4] 911 665 mov edx, [esp + 8] … … 922 676 .the_end: 923 677 ret 924 925 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL926 ALIGNCODE(16)927 BITS 64928 .sixtyfourbit_mode:929 and esp, 0ffffffffh930 mov ecx, [rsp + 4] ; enmFlush931 mov edx, [rsp + 8] ; pDescriptor932 xor eax, eax933 ; invept rcx, qword [rdx]934 DB 0x66, 0x0F, 0x38, 0x80, 0xA935 mov r8d, VERR_INVALID_PARAMETER936 cmovz eax, r8d937 mov r9d, VERR_VMX_INVALID_VMCS_PTR938 cmovc eax, r9d939 jmp far [.fpret wrt rip]940 .fpret: ; 16:32 Pointer to .the_end.941 dd .the_end, NAME(SUPR0AbsKernelCS)942 BITS 32943 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL944 678 ENDPROC VMXR0InvEPT 945 679 … … 965 699 %endif 966 700 %else 967 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL968 cmp byte [NAME(g_fVMXIs64bitHost)], 0969 jz .legacy_mode970 db 0xea ; jmp far .sixtyfourbit_mode971 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)972 .legacy_mode:973 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL974 701 mov ecx, [esp + 4] 975 702 mov edx, [esp + 8] … … 986 713 .the_end: 987 714 ret 988 989 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL990 ALIGNCODE(16)991 BITS 64992 .sixtyfourbit_mode:993 and esp, 0ffffffffh994 mov ecx, [rsp + 4] ; enmFlush995 mov edx, [rsp + 8] ; pDescriptor996 xor eax, eax997 ; invvpid rcx, qword [rdx]998 DB 0x66, 0x0F, 0x38, 0x81, 0xA999 mov r8d, VERR_INVALID_PARAMETER1000 cmovz eax, r8d1001 mov r9d, VERR_VMX_INVALID_VMCS_PTR1002 cmovc eax, r9d1003 jmp far [.fpret wrt rip]1004 .fpret: ; 16:32 Pointer to .the_end.1005 dd .the_end, NAME(SUPR0AbsKernelCS)1006 BITS 321007 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL1008 715 ENDPROC VMXR0InvVPID 1009 716 … … 1065 772 %endif ; GC_ARCH_BITS != 64 1066 773 1067 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL1068 1069 ;/**1070 ; * Gets 64-bit GDTR and IDTR on darwin.1071 ; * @param pGdtr Where to store the 64-bit GDTR.1072 ; * @param pIdtr Where to store the 64-bit IDTR.1073 ; */1074 ;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);1075 ALIGNCODE(16)1076 BEGINPROC HMR0Get64bitGdtrAndIdtr1077 db 0xea ; jmp far .sixtyfourbit_mode1078 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1079 .the_end:1080 ret1081 1082 ALIGNCODE(16)1083 BITS 641084 .sixtyfourbit_mode:1085 and esp, 0ffffffffh1086 mov ecx, [rsp + 4] ; pGdtr1087 mov edx, [rsp + 8] ; pIdtr1088 sgdt [rcx]1089 sidt [rdx]1090 jmp far [.fpret wrt rip]1091 .fpret: ; 16:32 Pointer to .the_end.1092 dd .the_end, NAME(SUPR0AbsKernelCS)1093 BITS 321094 ENDPROC HMR0Get64bitGdtrAndIdtr1095 1096 1097 ;/**1098 ; * Gets 64-bit CR3 on darwin.1099 ; * @returns CR31100 ; */1101 ;DECLASM(uint64_t) HMR0Get64bitCR3(void);1102 ALIGNCODE(16)1103 BEGINPROC HMR0Get64bitCR31104 db 0xea ; jmp far .sixtyfourbit_mode1105 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)1106 .the_end:1107 ret1108 1109 ALIGNCODE(16)1110 BITS 641111 .sixtyfourbit_mode:1112 mov rax, cr31113 mov rdx, rax1114 shr rdx, 321115 jmp far [.fpret wrt rip]1116 .fpret: ; 16:32 Pointer to .the_end.1117 dd .the_end, NAME(SUPR0AbsKernelCS)1118 BITS 321119 ENDPROC HMR0Get64bitCR31120 1121 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL1122 774 1123 775 %ifdef VBOX_WITH_KERNEL_USING_XMM … … 1464 1116 ; The default setup of the StartVM routines. 1465 1117 ; 1466 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 1467 %define MY_NAME(name) name %+ _32 1468 %else 1469 %define MY_NAME(name) name 1470 %endif 1118 %define MY_NAME(name) name 1471 1119 %ifdef RT_ARCH_AMD64 1472 1120 %define MYPUSHAD MYPUSHAD64 … … 1483 1131 %include "HMR0Mixed.mac" 1484 1132 1485 1486 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL1487 ;1488 ; Write the wrapper procedures.1489 ;1490 ; These routines are probably being too paranoid about selector1491 ; restoring, but better safe than sorry...1492 ;1493 1494 ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);1495 ALIGNCODE(16)1496 BEGINPROC VMXR0StartVM321497 cmp byte [NAME(g_fVMXIs64bitHost)], 01498 je near NAME(VMXR0StartVM32_32)1499 1500 ; stack frame1501 push esi1502 push edi1503 push fs1504 push gs1505 1506 ; jmp far .thunk641507 db 0xea1508 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1509 1510 ALIGNCODE(16)1511 BITS 641512 .thunk64:1513 sub esp, 20h1514 mov edi, [rsp + 20h + 14h] ; fResume1515 mov esi, [rsp + 20h + 18h] ; pCtx1516 mov edx, [rsp + 20h + 1Ch] ; pCache1517 mov ecx, [rsp + 20h + 20h] ; pVM1518 mov r8, [rsp + 20h + 24h] ; pVCpu1519 call NAME(VMXR0StartVM32_64)1520 add esp, 20h1521 jmp far [.fpthunk32 wrt rip]1522 .fpthunk32: ; 16:32 Pointer to .thunk32.1523 dd .thunk32, NAME(SUPR0AbsKernelCS)1524 1525 BITS 321526 ALIGNCODE(16)1527 .thunk32:1528 pop gs1529 pop fs1530 pop edi1531 pop esi1532 ret1533 ENDPROC VMXR0StartVM321534 1535 1536 ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);1537 ALIGNCODE(16)1538 BEGINPROC VMXR0StartVM641539 cmp byte [NAME(g_fVMXIs64bitHost)], 01540 je .not_in_long_mode1541 1542 ; stack frame1543 push esi1544 push edi1545 push fs1546 push gs1547 1548 ; jmp far .thunk641549 db 0xea1550 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1551 1552 ALIGNCODE(16)1553 BITS 641554 .thunk64:1555 sub esp, 20h1556 mov edi, [rsp + 20h + 14h] ; fResume1557 mov esi, [rsp + 20h + 18h] ; pCtx1558 mov edx, [rsp + 20h + 1Ch] ; pCache1559 mov ecx, [rsp + 20h + 20h] ; pVM1560 mov r8, [rsp + 20h + 24h] ; pVCpu1561 call NAME(VMXR0StartVM64_64)1562 add esp, 20h1563 jmp far [.fpthunk32 wrt rip]1564 .fpthunk32: ; 16:32 Pointer to .thunk32.1565 dd .thunk32, NAME(SUPR0AbsKernelCS)1566 1567 BITS 321568 ALIGNCODE(16)1569 .thunk32:1570 pop gs1571 pop fs1572 pop edi1573 pop esi1574 ret1575 1576 .not_in_long_mode:1577 mov eax, VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE1578 ret1579 ENDPROC VMXR0StartVM641580 1581 ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);1582 ALIGNCODE(16)1583 BEGINPROC SVMR0VMRun1584 cmp byte [NAME(g_fVMXIs64bitHost)], 01585 je near NAME(SVMR0VMRun_32)1586 1587 ; stack frame1588 push esi1589 push edi1590 push fs1591 push gs1592 1593 ; jmp far .thunk641594 db 0xea1595 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1596 1597 ALIGNCODE(16)1598 BITS 641599 .thunk64:1600 sub esp, 20h1601 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys1602 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys1603 mov edx, [rsp + 20h + 24h] ; pCtx1604 mov ecx, [rsp + 20h + 28h] ; pVM1605 mov r8d, [rsp + 20h + 2Ch] ; pVCpu1606 call NAME(SVMR0VMRun_64)1607 add esp, 20h1608 jmp far [.fpthunk32 wrt rip]1609 .fpthunk32: ; 16:32 Pointer to .thunk32.1610 dd .thunk32, NAME(SUPR0AbsKernelCS)1611 1612 BITS 321613 ALIGNCODE(16)1614 .thunk32:1615 pop gs1616 pop fs1617 pop edi1618 pop esi1619 ret1620 ENDPROC SVMR0VMRun1621 1622 1623 ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);1624 ALIGNCODE(16)1625 BEGINPROC SVMR0VMRun641626 cmp byte [NAME(g_fVMXIs64bitHost)], 01627 je .not_in_long_mode1628 1629 ; stack frame1630 push esi1631 push edi1632 push fs1633 push gs1634 1635 ; jmp far .thunk641636 db 0xea1637 dd .thunk64, NAME(SUPR0Abs64bitKernelCS)1638 1639 ALIGNCODE(16)1640 BITS 641641 .thunk64:1642 sub esp, 20h1643 mov rdi, [rsp + 20h + 14h] ; pVMCBHostPhys1644 mov rsi, [rsp + 20h + 1Ch] ; pVMCBPhys1645 mov edx, [rsp + 20h + 24h] ; pCtx1646 mov ecx, [rsp + 20h + 28h] ; pVM1647 mov r8d, [rsp + 20h + 2Ch] ; pVCpu1648 call NAME(SVMR0VMRun64_64)1649 add esp, 20h1650 jmp far [.fpthunk32 wrt rip]1651 .fpthunk32: ; 16:32 Pointer to .thunk32.1652 dd .thunk32, NAME(SUPR0AbsKernelCS)1653 1654 BITS 321655 ALIGNCODE(16)1656 .thunk32:1657 pop gs1658 pop fs1659 pop edi1660 pop esi1661 ret1662 1663 .not_in_long_mode:1664 mov eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE1665 ret1666 ENDPROC SVMR0VMRun641667 1668 ;1669 ; Do it a second time pretending we're a 64-bit host.1670 ;1671 ; This *HAS* to be done at the very end of the file to avoid restoring1672 ; macros. So, add new code *BEFORE* this mess.1673 ;1674 BITS 641675 %undef RT_ARCH_X861676 %define RT_ARCH_AMD641677 %undef ASM_CALL64_MSC1678 %define ASM_CALL64_GCC1679 %define xCB 81680 %define xSP rsp1681 %define xBP rbp1682 %define xAX rax1683 %define xBX rbx1684 %define xCX rcx1685 %define xDX rdx1686 %define xDI rdi1687 %define xSI rsi1688 %define MY_NAME(name) name %+ _641689 %define MYPUSHAD MYPUSHAD641690 %define MYPOPAD MYPOPAD641691 %define MYPUSHSEGS MYPUSHSEGS641692 %define MYPOPSEGS MYPOPSEGS641693 1694 %include "HMR0Mixed.mac"1695 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL1696 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r57358 r57429 1019 1019 * 1020 1020 * @{ */ 1021 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1021 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 1022 1022 /** 1023 1023 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host). … … 1468 1468 * Note! DBGF expects a clean DR6 state before executing guest code. 1469 1469 */ 1470 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1470 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1471 1471 if ( CPUMIsGuestInLongModeEx(pCtx) 1472 1472 && !CPUMIsHyperDebugStateActivePending(pVCpu)) … … 1521 1521 if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 1522 1522 { 1523 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1523 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1524 1524 if ( CPUMIsGuestInLongModeEx(pCtx) 1525 1525 && !CPUMIsGuestDebugStateActivePending(pVCpu)) … … 1545 1545 * intercept #DB as DR6 is updated in the VMCB. 1546 1546 */ 1547 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1547 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1548 1548 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu) 1549 1549 && !CPUMIsGuestDebugStateActive(pVCpu)) … … 1689 1689 #endif 1690 1690 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */ 1691 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1691 #if HC_ARCH_BITS == 32 1692 1692 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */ 1693 1693 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64; … … 3135 3135 3136 3136 /* Store status of the shared guest-host state at the time of VMRUN. */ 3137 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)3137 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 3138 3138 if (CPUMIsGuestInLongModeEx(pCtx)) 3139 3139 { -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r56287 r57429 51 51 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu); 52 52 53 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)53 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 54 54 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 55 55 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 56 56 uint32_t *paParam); 57 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)*/57 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */ 58 58 59 59 /** -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r57358 r57429 56 56 * Defined Constants And Macros * 57 57 *********************************************************************************************************************************/ 58 #if defined(RT_ARCH_AMD64)59 # define HMVMX_IS_64BIT_HOST_MODE() (true)60 typedef RTHCUINTREG HMVMXHCUINTREG;61 #elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)62 extern "C" uint32_t g_fVMXIs64bitHost;63 # define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)64 typedef uint64_t HMVMXHCUINTREG;65 #else66 # define HMVMX_IS_64BIT_HOST_MODE() (false)67 typedef RTHCUINTREG HMVMXHCUINTREG;68 #endif69 70 58 /** Use the function table. */ 71 59 #define HMVMX_USE_FUNCTION_TABLE … … 352 340 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, 353 341 bool fStepping, uint32_t *puIntState); 354 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)342 #if HC_ARCH_BITS == 32 355 343 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu); 356 344 #endif … … 2745 2733 } 2746 2734 2747 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL2748 /*2749 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with2750 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.2751 */2752 if ( pVM->hm.s.fAllow64BitGuests2753 && !HMVMX_IS_64BIT_HOST_MODE())2754 {2755 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));2756 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;2757 }2758 #endif2759 2760 2735 /* Initialize these always, see hmR3InitFinalizeR0().*/ 2761 2736 pVM->hm.s.vmx.enmFlushEpt = VMXFLUSHEPT_NONE; … … 2772 2747 /* Check if we can use the VMCS controls for swapping the EFER MSR. */ 2773 2748 Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer); 2774 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2775 if ( HMVMX_IS_64BIT_HOST_MODE() 2776 && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 2749 #if HC_ARCH_BITS == 64 2750 if ( (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR) 2777 2751 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR) 2778 2752 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR)) … … 2828 2802 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2829 2803 2830 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)2804 #if HC_ARCH_BITS == 32 2831 2805 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu); 2832 2806 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM), … … 2864 2838 AssertRCReturn(rc, rc); 2865 2839 2866 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2867 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */ 2868 if (HMVMX_IS_64BIT_HOST_MODE()) 2869 { 2870 uint64_t uRegCR3 = HMR0Get64bitCR3(); 2871 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3); 2872 } 2873 else 2874 #endif 2875 { 2876 uReg = ASMGetCR3(); 2877 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 2878 } 2840 uReg = ASMGetCR3(); 2841 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg); 2879 2842 AssertRCReturn(rc, rc); 2880 2843 … … 2952 2915 * Host CS and SS segment registers. 2953 2916 */ 2954 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL2955 RTSEL uSelCS;2956 RTSEL uSelSS;2957 if (HMVMX_IS_64BIT_HOST_MODE())2958 {2959 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;2960 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;2961 }2962 else2963 {2964 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */2965 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;2966 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;2967 }2968 #else2969 2917 RTSEL uSelCS = ASMGetCS(); 2970 2918 RTSEL uSelSS = ASMGetSS(); 2971 #endif2972 2919 2973 2920 /* … … 3020 2967 */ 3021 2968 RTGDTR Gdtr; 2969 RTIDTR Idtr; 3022 2970 RT_ZERO(Gdtr); 3023 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 3024 if (HMVMX_IS_64BIT_HOST_MODE()) 3025 { 3026 X86XDTR64 Gdtr64; 3027 X86XDTR64 Idtr64; 3028 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64); 3029 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc); 3030 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc); 3031 3032 Gdtr.cbGdt = Gdtr64.cb; 3033 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr; 3034 } 3035 else 2971 RT_ZERO(Idtr); 2972 ASMGetGDTR(&Gdtr); 2973 ASMGetIDTR(&Idtr); 2974 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc); 2975 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc); 2976 2977 #if HC_ARCH_BITS == 64 2978 /* 2979 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the 2980 * maximum limit (0xffff) on every VM-exit. 2981 */ 2982 if (Gdtr.cbGdt != 0xffff) 2983 { 2984 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR; 2985 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64)); 2986 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 2987 } 2988 2989 /* 2990 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" 2991 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x 2992 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists 2993 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there 2994 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on 2995 * hosts where we are pretty sure it won't cause trouble. 2996 */ 2997 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) 2998 if (Idtr.cbIdt < 0x0fff) 2999 # else 3000 if (Idtr.cbIdt != 0xffff) 3001 # endif 3002 { 3003 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR; 3004 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64)); 3005 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64)); 3006 } 3036 3007 #endif 3037 {3038 RTIDTR Idtr;3039 ASMGetGDTR(&Gdtr);3040 ASMGetIDTR(&Idtr);3041 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);3042 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);3043 3044 #if HC_ARCH_BITS == 643045 /*3046 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the3047 * maximum limit (0xffff) on every VM-exit.3048 */3049 if (Gdtr.cbGdt != 0xffff)3050 {3051 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;3052 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));3053 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));3054 }3055 3056 /*3057 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"3058 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x3059 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists3060 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there3061 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on3062 * hosts where we are pretty sure it won't cause trouble.3063 */3064 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)3065 if (Idtr.cbIdt < 0x0fff)3066 # else3067 if (Idtr.cbIdt != 0xffff)3068 # endif3069 {3070 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;3071 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));3072 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));3073 }3074 #endif3075 }3076 3008 3077 3009 /* … … 3084 3016 3085 3017 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); 3086 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 3087 if (HMVMX_IS_64BIT_HOST_MODE()) 3088 { 3089 /* We need the 64-bit TR base for hybrid darwin. */ 3090 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc); 3091 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase); 3092 } 3093 else 3018 #if HC_ARCH_BITS == 64 3019 uintptr_t uTRBase = X86DESC64_BASE(pDesc); 3020 3021 /* 3022 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 3023 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 3024 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 3025 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3026 * 3027 * [1] See Intel spec. 3.5 "System Descriptor Types". 3028 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 3029 */ 3030 Assert(pDesc->System.u4Type == 11); 3031 if ( pDesc->System.u16LimitLow != 0x67 3032 || pDesc->System.u4LimitHigh) 3033 { 3034 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR; 3035 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */ 3036 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY) 3037 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY; 3038 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR; 3039 3040 /* Store the GDTR here as we need it while restoring TR. */ 3041 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 3042 } 3043 #else 3044 uintptr_t uTRBase = X86DESC_BASE(pDesc); 3094 3045 #endif 3095 { 3096 uintptr_t uTRBase; 3097 #if HC_ARCH_BITS == 64 3098 uTRBase = X86DESC64_BASE(pDesc); 3099 3100 /* 3101 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 3102 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 3103 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 3104 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3105 * 3106 * [1] See Intel spec. 3.5 "System Descriptor Types". 3107 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 3108 */ 3109 Assert(pDesc->System.u4Type == 11); 3110 if ( pDesc->System.u16LimitLow != 0x67 3111 || pDesc->System.u4LimitHigh) 3112 { 3113 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR; 3114 /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */ 3115 if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY) 3116 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY; 3117 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR; 3118 3119 /* Store the GDTR here as we need it while restoring TR. */ 3120 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 3121 } 3122 #else 3123 uTRBase = X86DESC_BASE(pDesc); 3124 #endif 3125 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase); 3126 } 3046 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase); 3127 3047 AssertRCReturn(rc, rc); 3128 3048 … … 3130 3050 * Host FS base and GS base. 3131 3051 */ 3132 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3133 if (HMVMX_IS_64BIT_HOST_MODE()) 3134 { 3135 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 3136 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 3137 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc); 3138 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc); 3139 3140 # if HC_ARCH_BITS == 64 3141 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ 3142 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS) 3143 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase; 3144 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS) 3145 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 3146 # endif 3147 } 3052 #if HC_ARCH_BITS == 64 3053 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 3054 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 3055 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc); 3056 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc); 3057 3058 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ 3059 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS) 3060 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase; 3061 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS) 3062 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 3148 3063 #endif 3149 3064 return rc; … … 3180 3095 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3181 3096 AssertRCReturn(rc, rc); 3182 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 3183 if (HMVMX_IS_64BIT_HOST_MODE()) 3184 { 3185 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 3186 AssertRCReturn(rc, rc); 3187 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3188 } 3189 else 3190 { 3191 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3192 AssertRCReturn(rc, rc); 3193 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3194 } 3195 #elif HC_ARCH_BITS == 32 3097 #if HC_ARCH_BITS == 32 3196 3098 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3197 3099 AssertRCReturn(rc, rc); … … 3246 3148 #endif 3247 3149 3248 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)3150 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3249 3151 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */ 3250 3152 if (CPUMIsGuestInLongMode(pVCpu)) … … 3386 3288 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs(). 3387 3289 */ 3388 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 3389 if (HMVMX_IS_64BIT_HOST_MODE()) 3390 { 3391 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3392 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu)); 3393 } 3394 else 3395 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3290 #if HC_ARCH_BITS == 64 3291 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3292 Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu)); 3396 3293 #else 3397 3294 if (CPUMIsGuestInLongModeEx(pMixedCtx)) … … 3403 3300 else 3404 3301 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3405 #endif /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */3302 #endif 3406 3303 3407 3304 /* If the newer VMCS fields for managing EFER exists, use it. */ … … 4195 4092 * Note! DBGF expects a clean DR6 state before executing guest code. 4196 4093 */ 4197 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)4094 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4198 4095 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4199 4096 && !CPUMIsHyperDebugStateActivePending(pVCpu)) … … 4228 4125 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */ 4229 4126 { 4230 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)4127 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4231 4128 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4232 4129 && !CPUMIsGuestDebugStateActivePending(pVCpu)) … … 4253 4150 * must intercept #DB in order to maintain a correct DR6 guest value. 4254 4151 */ 4255 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)4152 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4256 4153 else if ( !CPUMIsGuestDebugStateActivePending(pVCpu) 4257 4154 && !CPUMIsGuestDebugStateActive(pVCpu)) … … 4413 4310 } 4414 4311 /* 64-bit capable CPUs. */ 4415 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4416 if (HMVMX_IS_64BIT_HOST_MODE()) 4417 { 4418 Assert(!(pCtx->cs.u64Base >> 32)); 4419 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32)); 4420 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32)); 4421 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32)); 4422 } 4312 # if HC_ARCH_BITS == 64 4313 Assert(!(pCtx->cs.u64Base >> 32)); 4314 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32)); 4315 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32)); 4316 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32)); 4423 4317 # endif 4424 4318 } … … 4465 4359 Assert(u32GSAttr == 0xf3); 4466 4360 /* 64-bit capable CPUs. */ 4467 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4468 if (HMVMX_IS_64BIT_HOST_MODE()) 4469 { 4470 Assert(!(pCtx->cs.u64Base >> 32)); 4471 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32)); 4472 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32)); 4473 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32)); 4474 } 4361 # if HC_ARCH_BITS == 64 4362 Assert(!(pCtx->cs.u64Base >> 32)); 4363 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32)); 4364 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32)); 4365 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32)); 4475 4366 # endif 4476 4367 } … … 4779 4670 { 4780 4671 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */ 4781 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)4672 #if HC_ARCH_BITS == 32 4782 4673 if (pVM->hm.s.fAllow64BitGuests) 4783 4674 { … … 4906 4797 #endif 4907 4798 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */ 4908 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)4799 #if HC_ARCH_BITS == 32 4909 4800 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */ 4910 4801 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64) … … 4920 4811 } 4921 4812 #else 4922 /* 64-bit host or hybrid host. */4813 /* 64-bit host. */ 4923 4814 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64; 4924 4815 #endif … … 4927 4818 { 4928 4819 /* Guest is not in long mode, use the 32-bit handler. */ 4929 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 4930 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32) 4931 { 4932 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4933 { 4934 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4935 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4936 | HM_CHANGED_VMX_ENTRY_CTLS 4937 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4938 } 4939 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4940 } 4941 #else 4820 #if HC_ARCH_BITS == 32 4821 if ( pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32 4822 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4823 { 4824 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4825 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4826 | HM_CHANGED_VMX_ENTRY_CTLS 4827 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4828 } 4829 #endif 4942 4830 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 4943 #endif4944 4831 } 4945 4832 Assert(pVCpu->hm.s.vmx.pfnStartVM); … … 5026 4913 uint32_t u32Val; 5027 4914 uint64_t u64Val; 5028 HMVMXHCUINTREGuHCReg;4915 RTHCUINTREG uHCReg; 5029 4916 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc); 5030 4917 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val)); … … 5163 5050 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc); 5164 5051 Log4(("Host RIP %#RHv\n", uHCReg)); 5165 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 5166 if (HMVMX_IS_64BIT_HOST_MODE()) 5167 { 5168 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER))); 5169 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 5170 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 5171 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR))); 5172 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 5173 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE))); 5174 } 5052 # if HC_ARCH_BITS == 64 5053 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER))); 5054 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR))); 5055 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR))); 5056 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR))); 5057 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK))); 5058 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE))); 5175 5059 # endif 5176 5060 #endif /* VBOX_STRICT */ … … 5187 5071 5188 5072 5189 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)5073 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 5190 5074 #ifndef VMX_USE_CACHED_VMCS_ACCESSES 5191 5075 # error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!" … … 5618 5502 return VINF_SUCCESS; 5619 5503 } 5620 5621 /* Enable later when the assembly code uses these as callbacks. */ 5622 #if 0 5623 /* 5624 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs). 5625 * 5626 * @param pVCpu Pointer to the VMCPU. 5627 * @param pCache Pointer to the VMCS cache. 5628 * 5629 * @remarks No-long-jump zone!!! 5630 */ 5631 VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache) 5632 { 5633 AssertPtr(pCache); 5634 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++) 5635 { 5636 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]); 5637 AssertRC(rc); 5638 } 5639 pCache->Write.cValidEntries = 0; 5640 } 5641 5642 5643 /** 5644 * Stores the VMCS read-cache from the CPU (by executing VMREADs). 5645 * 5646 * @param pVCpu Pointer to the VMCPU. 5647 * @param pCache Pointer to the VMCS cache. 5648 * 5649 * @remarks No-long-jump zone!!! 5650 */ 5651 VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache) 5652 { 5653 AssertPtr(pCache); 5654 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++) 5655 { 5656 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]); 5657 AssertRC(rc); 5658 } 5659 } 5660 #endif 5661 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 5504 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */ 5662 5505 5663 5506 … … 8753 8596 8754 8597 /* Store status of the shared guest-host state at the time of VM-entry. */ 8755 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)8598 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 8756 8599 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 8757 8600 { … … 9513 9356 */ 9514 9357 uint32_t u32Eflags; 9515 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9516 if (HMVMX_IS_64BIT_HOST_MODE()) 9517 { 9518 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val); 9519 AssertRCBreak(rc); 9520 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */ 9521 if ( !fLongModeGuest 9522 || !pCtx->cs.Attr.n.u1Long) 9523 { 9524 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID); 9525 } 9526 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N 9527 * must be identical if the "IA-32e mode guest" VM-entry 9528 * control is 1 and CS.L is 1. No check applies if the 9529 * CPU supports 64 linear-address bits. */ 9530 9531 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */ 9532 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val); 9533 AssertRCBreak(rc); 9534 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */ 9535 VMX_IGS_RFLAGS_RESERVED); 9536 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 9537 u32Eflags = u64Val; 9538 } 9539 else 9358 #if HC_ARCH_BITS == 64 9359 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val); 9360 AssertRCBreak(rc); 9361 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */ 9362 if ( !fLongModeGuest 9363 || !pCtx->cs.Attr.n.u1Long) 9364 { 9365 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID); 9366 } 9367 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N 9368 * must be identical if the "IA-32e mode guest" VM-entry 9369 * control is 1 and CS.L is 1. No check applies if the 9370 * CPU supports 64 linear-address bits. */ 9371 9372 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */ 9373 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val); 9374 AssertRCBreak(rc); 9375 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */ 9376 VMX_IGS_RFLAGS_RESERVED); 9377 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 9378 u32Eflags = u64Val; 9379 #else 9380 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags); 9381 AssertRCBreak(rc); 9382 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */ 9383 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */ 9540 9384 #endif 9541 {9542 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);9543 AssertRCBreak(rc);9544 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */9545 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */9546 }9547 9385 9548 9386 if ( fLongModeGuest … … 9565 9403 * 64-bit checks. 9566 9404 */ 9567 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9568 if (HMVMX_IS_64BIT_HOST_MODE()) 9569 { 9570 if (fLongModeGuest) 9571 { 9572 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 9573 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 9574 } 9575 9576 if ( !fLongModeGuest 9577 && (u32GuestCR4 & X86_CR4_PCIDE)) 9578 { 9579 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); 9580 } 9581 9582 /** @todo CR3 field must be such that bits 63:52 and bits in the range 9583 * 51:32 beyond the processor's physical-address width are 0. */ 9584 9585 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG) 9586 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 9587 { 9588 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED); 9589 } 9590 9591 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val); 9592 AssertRCBreak(rc); 9593 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL); 9594 9595 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val); 9596 AssertRCBreak(rc); 9597 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL); 9598 } 9405 #if HC_ARCH_BITS == 64 9406 if (fLongModeGuest) 9407 { 9408 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 9409 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 9410 } 9411 9412 if ( !fLongModeGuest 9413 && (u32GuestCR4 & X86_CR4_PCIDE)) 9414 { 9415 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); 9416 } 9417 9418 /** @todo CR3 field must be such that bits 63:52 and bits in the range 9419 * 51:32 beyond the processor's physical-address width are 0. */ 9420 9421 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG) 9422 && (pCtx->dr[7] & X86_DR7_MBZ_MASK)) 9423 { 9424 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED); 9425 } 9426 9427 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val); 9428 AssertRCBreak(rc); 9429 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL); 9430 9431 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val); 9432 AssertRCBreak(rc); 9433 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL); 9599 9434 #endif 9600 9435 … … 9766 9601 } 9767 9602 /* 64-bit capable CPUs. */ 9768 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9769 if (HMVMX_IS_64BIT_HOST_MODE()) 9770 { 9771 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9772 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9773 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9774 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9775 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9776 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), 9777 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9778 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), 9779 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9780 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), 9781 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9782 } 9603 #if HC_ARCH_BITS == 64 9604 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9605 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9606 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9607 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9608 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9609 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), 9610 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9611 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), 9612 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9613 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), 9614 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9783 9615 #endif 9784 9616 } … … 9825 9657 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID); 9826 9658 /* 64-bit capable CPUs. */ 9827 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9828 if (HMVMX_IS_64BIT_HOST_MODE()) 9829 { 9830 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9831 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9832 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9833 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9834 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9835 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), 9836 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9837 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), 9838 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9839 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), 9840 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9841 } 9659 #if HC_ARCH_BITS == 64 9660 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL); 9661 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL); 9662 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE) 9663 || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL); 9664 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID); 9665 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32), 9666 VMX_IGS_LONGMODE_SS_BASE_INVALID); 9667 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32), 9668 VMX_IGS_LONGMODE_DS_BASE_INVALID); 9669 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32), 9670 VMX_IGS_LONGMODE_ES_BASE_INVALID); 9842 9671 #endif 9843 9672 } … … 9848 9677 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID); 9849 9678 /* 64-bit capable CPUs. */ 9850 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9851 if (HMVMX_IS_64BIT_HOST_MODE()) 9852 { 9853 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL); 9854 } 9679 #if HC_ARCH_BITS == 64 9680 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL); 9855 9681 #endif 9856 9682 if (fLongModeGuest) … … 9877 9703 * GDTR and IDTR. 9878 9704 */ 9879 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 9880 if (HMVMX_IS_64BIT_HOST_MODE()) 9881 { 9882 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 9883 AssertRCBreak(rc); 9884 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL); 9885 9886 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 9887 AssertRCBreak(rc); 9888 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL); 9889 } 9705 #if HC_ARCH_BITS == 64 9706 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 9707 AssertRCBreak(rc); 9708 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL); 9709 9710 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 9711 AssertRCBreak(rc); 9712 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL); 9890 9713 #endif 9891 9714 … … 9966 9789 9967 9790 /* Pending debug exceptions. */ 9968 if (HMVMX_IS_64BIT_HOST_MODE()) 9969 { 9970 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val); 9971 AssertRCBreak(rc); 9972 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */ 9973 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 9974 u32Val = u64Val; /* For pending debug exceptions checks below. */ 9975 } 9976 else 9977 { 9978 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val); 9979 AssertRCBreak(rc); 9980 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */ 9981 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 9982 } 9791 #if HC_ARCH_BITS == 64 9792 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val); 9793 AssertRCBreak(rc); 9794 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */ 9795 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED); 9796 u32Val = u64Val; /* For pending debug exceptions checks below. */ 9797 #else 9798 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val); 9799 AssertRCBreak(rc); 9800 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */ 9801 HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED); 9802 #endif 9983 9803 9984 9804 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) … … 10709 10529 #ifdef VBOX_STRICT 10710 10530 uint32_t uIntrState; 10711 HMVMXHCUINTREGuHCReg;10531 RTHCUINTREG uHCReg; 10712 10532 uint64_t u64Val; 10713 10533 uint32_t u32Val; -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r56287 r57429 45 45 46 46 47 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)47 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 48 48 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 49 49 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, … … 63 63 # endif 64 64 65 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 66 /* Don't use fAllow64BitGuests for VMXReadVmcsGstN() even though it looks right, as it can be forced to 'true'. 67 HMVMX_IS_64BIT_HOST_MODE() is what we need. */ 68 # define VMXReadVmcsHstN(idxField, p64Val) HMVMX_IS_64BIT_HOST_MODE() ? \ 69 VMXReadVmcs64(idxField, p64Val) \ 70 : (*(p64Val) &= UINT64_C(0xffffffff), \ 71 VMXReadVmcs32(idxField, (uint32_t *)(p64Val))) 72 # define VMXReadVmcsGstN VMXReadVmcsHstN 73 # define VMXReadVmcsGstNByIdxVal VMXReadVmcsGstN 74 # elif HC_ARCH_BITS == 32 65 # if HC_ARCH_BITS == 32 75 66 # define VMXReadVmcsHstN VMXReadVmcs32 76 67 # define VMXReadVmcsGstN(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal) -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r57378 r57429 1867 1867 1868 1868 1869 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)1869 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 1870 1870 case VMMR0_DO_TEST_SWITCHER3264: 1871 1871 if (idCpu == NIL_VMCPUID)
Note:
See TracChangeset
for help on using the changeset viewer.