VirtualBox

Changeset 57429 in vbox for trunk/src/VBox/VMM/VMMR0


Ignore:
Timestamp:
Aug 18, 2015 1:35:18 PM (9 years ago)
Author:
vboxsync
Message:

VMM/HM: Start removing VBOX_WITH_HYBRID_32BIT_KERNEL code.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r57358 r57429  
    14931493}
    14941494
    1495 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1495#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    14961496
    14971497/**
     
    15521552}
    15531553
    1554 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     1554#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */
    15551555
    15561556/**
  • trunk/src/VBox/VMM/VMMR0/HMR0A.asm

    r57270 r57429  
    4646;*  Defined Constants And Macros                                               *
    4747;*******************************************************************************
    48 %ifdef RT_ARCH_AMD64
    49  %define MAYBE_64_BIT
    50 %endif
    51 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    52  %define MAYBE_64_BIT
    53 %else
    54  %ifdef RT_OS_DARWIN
    55   %ifdef RT_ARCH_AMD64
    56    ;;
    57    ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
    58    ; risk loading a stale LDT value or something invalid.
    59    %define HM_64_BIT_USE_NULL_SEL
    60   %endif
     48%ifdef RT_OS_DARWIN
     49 %ifdef RT_ARCH_AMD64
     50  ;;
     51  ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't
     52  ; risk loading a stale LDT value or something invalid.
     53  %define HM_64_BIT_USE_NULL_SEL
    6154 %endif
    6255%endif
     
    220213;* External Symbols                                                            *
    221214;*******************************************************************************
    222 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    223 extern NAME(SUPR0AbsIs64bit)
    224 extern NAME(SUPR0Abs64bitKernelCS)
    225 extern NAME(SUPR0Abs64bitKernelSS)
    226 extern NAME(SUPR0Abs64bitKernelDS)
    227 extern NAME(SUPR0AbsKernelCS)
    228 %endif
    229215%ifdef VBOX_WITH_KERNEL_USING_XMM
    230216extern NAME(CPUMIsGuestFPUStateActive)
    231 %endif
    232 
    233 
    234 ;*******************************************************************************
    235 ;*  Global Variables                                                           *
    236 ;*******************************************************************************
    237 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    238 BEGINDATA
    239 ;;
    240 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without
    241 ; needing to clobber a register. (This trick doesn't quite work for PE btw.
    242 ; but that's not relevant atm.)
    243 GLOBALNAME g_fVMXIs64bitHost
    244     dd  NAME(SUPR0AbsIs64bit)
    245217%endif
    246218
     
    395367    mov         ecx, [esp + 4]          ; idxField
    396368    lea         edx, [esp + 8]          ; &u64Data
    397  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    398     cmp         byte [NAME(g_fVMXIs64bitHost)], 0
    399     jz          .legacy_mode
    400     db          0xea                    ; jmp far .sixtyfourbit_mode
    401     dd          .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    402 .legacy_mode:
    403  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    404369    vmwrite     ecx, [edx]              ; low dword
    405370    jz          .done
     
    418383.the_end:
    419384    ret
    420 
    421 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    422 ALIGNCODE(16)
    423 BITS 64
    424 .sixtyfourbit_mode:
    425     and     edx, 0ffffffffh
    426     and     ecx, 0ffffffffh
    427     xor     eax, eax
    428     vmwrite rcx, [rdx]
    429     mov     r8d, VERR_VMX_INVALID_VMCS_FIELD
    430     cmovz   eax, r8d
    431     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    432     cmovc   eax, r9d
    433     jmp far [.fpret wrt rip]
    434 .fpret:                                 ; 16:32 Pointer to .the_end.
    435     dd      .the_end, NAME(SUPR0AbsKernelCS)
    436 BITS 32
    437 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    438385ENDPROC VMXWriteVmcs64
    439386
     
    462409    mov         ecx, [esp + 4]          ; idxField
    463410    mov         edx, [esp + 8]          ; pData
    464  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    465     cmp         byte [NAME(g_fVMXIs64bitHost)], 0
    466     jz          .legacy_mode
    467     db          0xea                    ; jmp far .sixtyfourbit_mode
    468     dd          .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    469 .legacy_mode:
    470  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    471411    vmread      [edx], ecx              ; low dword
    472412    jz          .done
     
    485425.the_end:
    486426    ret
    487 
    488 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    489 ALIGNCODE(16)
    490 BITS 64
    491 .sixtyfourbit_mode:
    492     and     edx, 0ffffffffh
    493     and     ecx, 0ffffffffh
    494     xor     eax, eax
    495     vmread  [rdx], rcx
    496     mov     r8d, VERR_VMX_INVALID_VMCS_FIELD
    497     cmovz   eax, r8d
    498     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    499     cmovc   eax, r9d
    500     jmp far [.fpret wrt rip]
    501 .fpret:                                 ; 16:32 Pointer to .the_end.
    502     dd      .the_end, NAME(SUPR0AbsKernelCS)
    503 BITS 32
    504 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    505427ENDPROC VMXReadVmcs64
    506428
     
    531453    mov     ecx, [esp + 4]              ; idxField
    532454    mov     edx, [esp + 8]              ; pu32Data
    533  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    534     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    535     jz      .legacy_mode
    536     db      0xea                        ; jmp far .sixtyfourbit_mode
    537     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    538 .legacy_mode:
    539  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    540455    xor     eax, eax
    541456    vmread  [edx], ecx
     
    549464.the_end:
    550465    ret
    551 
    552 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    553 ALIGNCODE(16)
    554 BITS 64
    555 .sixtyfourbit_mode:
    556     and     edx, 0ffffffffh
    557     and     ecx, 0ffffffffh
    558     xor     eax, eax
    559     vmread  r10, rcx
    560     mov     [rdx], r10d
    561     mov     r8d, VERR_VMX_INVALID_VMCS_FIELD
    562     cmovz   eax, r8d
    563     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    564     cmovc   eax, r9d
    565     jmp far [.fpret wrt rip]
    566 .fpret:                                 ; 16:32 Pointer to .the_end.
    567     dd      .the_end, NAME(SUPR0AbsKernelCS)
    568 BITS 32
    569 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    570466ENDPROC VMXReadVmcs32
    571467
     
    596492    mov     ecx, [esp + 4]              ; idxField
    597493    mov     edx, [esp + 8]              ; u32Data
    598  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    599     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    600     jz      .legacy_mode
    601     db      0xea                        ; jmp far .sixtyfourbit_mode
    602     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    603 .legacy_mode:
    604  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    605494    xor     eax, eax
    606495    vmwrite ecx, edx
     
    614503.the_end:
    615504    ret
    616 
    617 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    618 ALIGNCODE(16)
    619 BITS 64
    620 .sixtyfourbit_mode:
    621     and     edx, 0ffffffffh
    622     and     ecx, 0ffffffffh
    623     xor     eax, eax
    624     vmwrite rcx, rdx
    625     mov     r8d, VERR_VMX_INVALID_VMCS_FIELD
    626     cmovz   eax, r8d
    627     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    628     cmovc   eax, r9d
    629     jmp far [.fpret wrt rip]
    630 .fpret:                                 ; 16:32 Pointer to .the_end.
    631     dd      .the_end, NAME(SUPR0AbsKernelCS)
    632 BITS 32
    633 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    634505ENDPROC VMXWriteVmcs32
    635506
     
    652523    vmxon   [rsp]
    653524%else  ; RT_ARCH_X86
    654  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    655     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    656     jz      .legacy_mode
    657     db      0xea                        ; jmp far .sixtyfourbit_mode
    658     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    659 .legacy_mode:
    660  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    661525    xor     eax, eax
    662526    vmxon   [esp + 4]
     
    675539%endif
    676540    ret
    677 
    678 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    679 ALIGNCODE(16)
    680 BITS 64
    681 .sixtyfourbit_mode:
    682     lea     rdx, [rsp + 4]              ; &HCPhysVMXOn.
    683     and     edx, 0ffffffffh
    684     xor     eax, eax
    685     vmxon   [rdx]
    686     mov     r8d, VERR_VMX_VMXON_FAILED
    687     cmovz   eax, r8d
    688     mov     r9d, VERR_VMX_INVALID_VMXON_PTR
    689     cmovc   eax, r9d
    690     jmp far [.fpret wrt rip]
    691 .fpret:                                 ; 16:32 Pointer to .the_end.
    692     dd      .the_end, NAME(SUPR0AbsKernelCS)
    693 BITS 32
    694 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    695541ENDPROC VMXEnable
    696542
     
    701547;DECLASM(void) VMXDisable(void);
    702548BEGINPROC VMXDisable
    703 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    704     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    705     jz      .legacy_mode
    706     db      0xea                        ; jmp far .sixtyfourbit_mode
    707     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    708 .legacy_mode:
    709 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    710549    vmxoff
    711550.the_end:
    712551    ret
    713 
    714 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    715 ALIGNCODE(16)
    716 BITS 64
    717 .sixtyfourbit_mode:
    718     vmxoff
    719     jmp far [.fpret wrt rip]
    720 .fpret:                                 ; 16:32 Pointer to .the_end.
    721     dd      .the_end, NAME(SUPR0AbsKernelCS)
    722 BITS 32
    723 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    724552ENDPROC VMXDisable
    725553
     
    743571    vmclear [rsp]
    744572%else  ; RT_ARCH_X86
    745  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    746     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    747     jz      .legacy_mode
    748     db      0xea                        ; jmp far .sixtyfourbit_mode
    749     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    750 .legacy_mode:
    751  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    752573    xor     eax, eax
    753574    vmclear [esp + 4]
     
    760581%endif
    761582    ret
    762 
    763 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    764 ALIGNCODE(16)
    765 BITS 64
    766 .sixtyfourbit_mode:
    767     lea     rdx, [rsp + 4]              ; &HCPhysVmcs
    768     and     edx, 0ffffffffh
    769     xor     eax, eax
    770     vmclear [rdx]
    771     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    772     cmovc   eax, r9d
    773     jmp far [.fpret wrt rip]
    774 .fpret:                                 ; 16:32 Pointer to .the_end.
    775     dd      .the_end, NAME(SUPR0AbsKernelCS)
    776 BITS 32
    777 %endif
    778583ENDPROC VMXClearVmcs
    779584
     
    797602    vmptrld [rsp]
    798603%else
    799  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    800     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    801     jz      .legacy_mode
    802     db      0xea                        ; jmp far .sixtyfourbit_mode
    803     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    804 .legacy_mode:
    805  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    806604    xor     eax, eax
    807605    vmptrld [esp + 4]
     
    814612%endif
    815613    ret
    816 
    817 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    818 ALIGNCODE(16)
    819 BITS 64
    820 .sixtyfourbit_mode:
    821     lea     rdx, [rsp + 4]              ; &HCPhysVmcs
    822     and     edx, 0ffffffffh
    823     xor     eax, eax
    824     vmptrld [rdx]
    825     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    826     cmovc   eax, r9d
    827     jmp far [.fpret wrt rip]
    828 .fpret:                                 ; 16:32 Pointer to .the_end.
    829     dd      .the_end, NAME(SUPR0AbsKernelCS)
    830 BITS 32
    831 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    832614ENDPROC VMXActivateVmcs
    833615
     
    852634  %endif
    853635 %else
    854   %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    855     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    856     jz      .legacy_mode
    857     db      0xea                        ; jmp far .sixtyfourbit_mode
    858     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    859 .legacy_mode:
    860   %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    861636    vmptrst qword [esp+04h]
    862637 %endif
     
    864639.the_end:
    865640    ret
    866 
    867  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    868 ALIGNCODE(16)
    869 BITS 64
    870 .sixtyfourbit_mode:
    871     lea     rdx, [rsp + 4]              ; &HCPhysVmcs
    872     and     edx, 0ffffffffh
    873     vmptrst qword [rdx]
    874     xor     eax, eax
    875     jmp far [.fpret wrt rip]
    876 .fpret:                                 ; 16:32 Pointer to .the_end.
    877     dd      .the_end, NAME(SUPR0AbsKernelCS)
    878 BITS 32
    879  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    880641%endif
    881642ENDPROC VMXGetActivatedVmcs
     
    901662 %endif
    902663%else
    903  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    904     cmp         byte [NAME(g_fVMXIs64bitHost)], 0
    905     jz          .legacy_mode
    906     db          0xea                        ; jmp far .sixtyfourbit_mode
    907     dd          .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    908 .legacy_mode:
    909  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    910664    mov         ecx, [esp + 4]
    911665    mov         edx, [esp + 8]
     
    922676.the_end:
    923677    ret
    924 
    925 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    926 ALIGNCODE(16)
    927 BITS 64
    928 .sixtyfourbit_mode:
    929     and     esp, 0ffffffffh
    930     mov     ecx, [rsp + 4]              ; enmFlush
    931     mov     edx, [rsp + 8]              ; pDescriptor
    932     xor     eax, eax
    933 ;    invept  rcx, qword [rdx]
    934     DB      0x66, 0x0F, 0x38, 0x80, 0xA
    935     mov     r8d, VERR_INVALID_PARAMETER
    936     cmovz   eax, r8d
    937     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    938     cmovc   eax, r9d
    939     jmp far [.fpret wrt rip]
    940 .fpret:                                 ; 16:32 Pointer to .the_end.
    941     dd      .the_end, NAME(SUPR0AbsKernelCS)
    942 BITS 32
    943 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    944678ENDPROC VMXR0InvEPT
    945679
     
    965699 %endif
    966700%else
    967  %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    968     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    969     jz      .legacy_mode
    970     db      0xea                        ; jmp far .sixtyfourbit_mode
    971     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    972 .legacy_mode:
    973  %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    974701    mov         ecx, [esp + 4]
    975702    mov         edx, [esp + 8]
     
    986713.the_end:
    987714    ret
    988 
    989 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    990 ALIGNCODE(16)
    991 BITS 64
    992 .sixtyfourbit_mode:
    993     and     esp, 0ffffffffh
    994     mov     ecx, [rsp + 4]              ; enmFlush
    995     mov     edx, [rsp + 8]              ; pDescriptor
    996     xor     eax, eax
    997 ;    invvpid rcx, qword [rdx]
    998     DB      0x66, 0x0F, 0x38, 0x81, 0xA
    999     mov     r8d, VERR_INVALID_PARAMETER
    1000     cmovz   eax, r8d
    1001     mov     r9d, VERR_VMX_INVALID_VMCS_PTR
    1002     cmovc   eax, r9d
    1003     jmp far [.fpret wrt rip]
    1004 .fpret:                                 ; 16:32 Pointer to .the_end.
    1005     dd      .the_end, NAME(SUPR0AbsKernelCS)
    1006 BITS 32
    1007 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    1008715ENDPROC VMXR0InvVPID
    1009716
     
    1065772%endif ; GC_ARCH_BITS != 64
    1066773
    1067 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    1068 
    1069 ;/**
    1070 ; * Gets 64-bit GDTR and IDTR on darwin.
    1071 ; * @param  pGdtr        Where to store the 64-bit GDTR.
    1072 ; * @param  pIdtr        Where to store the 64-bit IDTR.
    1073 ; */
    1074 ;DECLASM(void) HMR0Get64bitGdtrAndIdtr(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);
    1075 ALIGNCODE(16)
    1076 BEGINPROC HMR0Get64bitGdtrAndIdtr
    1077     db      0xea                        ; jmp far .sixtyfourbit_mode
    1078     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    1079 .the_end:
    1080     ret
    1081 
    1082 ALIGNCODE(16)
    1083 BITS 64
    1084 .sixtyfourbit_mode:
    1085     and     esp, 0ffffffffh
    1086     mov     ecx, [rsp + 4]              ; pGdtr
    1087     mov     edx, [rsp + 8]              ; pIdtr
    1088     sgdt    [rcx]
    1089     sidt    [rdx]
    1090     jmp far [.fpret wrt rip]
    1091 .fpret:                                 ; 16:32 Pointer to .the_end.
    1092     dd      .the_end, NAME(SUPR0AbsKernelCS)
    1093 BITS 32
    1094 ENDPROC   HMR0Get64bitGdtrAndIdtr
    1095 
    1096 
    1097 ;/**
    1098 ; * Gets 64-bit CR3 on darwin.
    1099 ; * @returns CR3
    1100 ; */
    1101 ;DECLASM(uint64_t) HMR0Get64bitCR3(void);
    1102 ALIGNCODE(16)
    1103 BEGINPROC HMR0Get64bitCR3
    1104     db      0xea                        ; jmp far .sixtyfourbit_mode
    1105     dd      .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS)
    1106 .the_end:
    1107     ret
    1108 
    1109 ALIGNCODE(16)
    1110 BITS 64
    1111 .sixtyfourbit_mode:
    1112     mov     rax, cr3
    1113     mov     rdx, rax
    1114     shr     rdx, 32
    1115     jmp far [.fpret wrt rip]
    1116 .fpret:                                 ; 16:32 Pointer to .the_end.
    1117     dd      .the_end, NAME(SUPR0AbsKernelCS)
    1118 BITS 32
    1119 ENDPROC   HMR0Get64bitCR3
    1120 
    1121 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    1122774
    1123775%ifdef VBOX_WITH_KERNEL_USING_XMM
     
    14641116; The default setup of the StartVM routines.
    14651117;
    1466 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    1467  %define MY_NAME(name)   name %+ _32
    1468 %else
    1469  %define MY_NAME(name)   name
    1470 %endif
     1118%define MY_NAME(name)   name
    14711119%ifdef RT_ARCH_AMD64
    14721120 %define MYPUSHAD       MYPUSHAD64
     
    14831131%include "HMR0Mixed.mac"
    14841132
    1485 
    1486 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    1487  ;
    1488  ; Write the wrapper procedures.
    1489  ;
    1490  ; These routines are probably being too paranoid about selector
    1491  ; restoring, but better safe than sorry...
    1492  ;
    1493 
    1494 ; DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    1495 ALIGNCODE(16)
    1496 BEGINPROC VMXR0StartVM32
    1497     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    1498     je near NAME(VMXR0StartVM32_32)
    1499 
    1500     ; stack frame
    1501     push    esi
    1502     push    edi
    1503     push    fs
    1504     push    gs
    1505 
    1506     ; jmp far .thunk64
    1507     db      0xea
    1508     dd      .thunk64, NAME(SUPR0Abs64bitKernelCS)
    1509 
    1510 ALIGNCODE(16)
    1511 BITS 64
    1512 .thunk64:
    1513     sub     esp, 20h
    1514     mov     edi, [rsp + 20h + 14h]      ; fResume
    1515     mov     esi, [rsp + 20h + 18h]      ; pCtx
    1516     mov     edx, [rsp + 20h + 1Ch]      ; pCache
    1517     mov     ecx, [rsp + 20h + 20h]      ; pVM
    1518     mov     r8,  [rsp + 20h + 24h]      ; pVCpu
    1519     call    NAME(VMXR0StartVM32_64)
    1520     add     esp, 20h
    1521     jmp far [.fpthunk32 wrt rip]
    1522 .fpthunk32:                             ; 16:32 Pointer to .thunk32.
    1523     dd      .thunk32, NAME(SUPR0AbsKernelCS)
    1524 
    1525 BITS 32
    1526 ALIGNCODE(16)
    1527 .thunk32:
    1528     pop     gs
    1529     pop     fs
    1530     pop     edi
    1531     pop     esi
    1532     ret
    1533 ENDPROC   VMXR0StartVM32
    1534 
    1535 
    1536 ; DECLASM(int) VMXR0StartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    1537 ALIGNCODE(16)
    1538 BEGINPROC VMXR0StartVM64
    1539     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    1540     je      .not_in_long_mode
    1541 
    1542     ; stack frame
    1543     push    esi
    1544     push    edi
    1545     push    fs
    1546     push    gs
    1547 
    1548     ; jmp far .thunk64
    1549     db      0xea
    1550     dd      .thunk64, NAME(SUPR0Abs64bitKernelCS)
    1551 
    1552 ALIGNCODE(16)
    1553 BITS 64
    1554 .thunk64:
    1555     sub     esp, 20h
    1556     mov     edi, [rsp + 20h + 14h]      ; fResume
    1557     mov     esi, [rsp + 20h + 18h]      ; pCtx
    1558     mov     edx, [rsp + 20h + 1Ch]      ; pCache
    1559     mov     ecx, [rsp + 20h + 20h]      ; pVM
    1560     mov     r8,  [rsp + 20h + 24h]      ; pVCpu
    1561     call    NAME(VMXR0StartVM64_64)
    1562     add     esp, 20h
    1563     jmp far [.fpthunk32 wrt rip]
    1564 .fpthunk32:                             ; 16:32 Pointer to .thunk32.
    1565     dd      .thunk32, NAME(SUPR0AbsKernelCS)
    1566 
    1567 BITS 32
    1568 ALIGNCODE(16)
    1569 .thunk32:
    1570     pop     gs
    1571     pop     fs
    1572     pop     edi
    1573     pop     esi
    1574     ret
    1575 
    1576 .not_in_long_mode:
    1577     mov     eax, VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE
    1578     ret
    1579 ENDPROC   VMXR0StartVM64
    1580 
    1581 ;DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    1582 ALIGNCODE(16)
    1583 BEGINPROC SVMR0VMRun
    1584     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    1585     je near NAME(SVMR0VMRun_32)
    1586 
    1587     ; stack frame
    1588     push    esi
    1589     push    edi
    1590     push    fs
    1591     push    gs
    1592 
    1593     ; jmp far .thunk64
    1594     db      0xea
    1595     dd      .thunk64, NAME(SUPR0Abs64bitKernelCS)
    1596 
    1597 ALIGNCODE(16)
    1598 BITS 64
    1599 .thunk64:
    1600     sub     esp, 20h
    1601     mov     rdi, [rsp + 20h + 14h]      ; pVMCBHostPhys
    1602     mov     rsi, [rsp + 20h + 1Ch]      ; pVMCBPhys
    1603     mov     edx, [rsp + 20h + 24h]      ; pCtx
    1604     mov     ecx, [rsp + 20h + 28h]      ; pVM
    1605     mov     r8d, [rsp + 20h + 2Ch]      ; pVCpu
    1606     call    NAME(SVMR0VMRun_64)
    1607     add     esp, 20h
    1608     jmp far [.fpthunk32 wrt rip]
    1609 .fpthunk32:                             ; 16:32 Pointer to .thunk32.
    1610     dd      .thunk32, NAME(SUPR0AbsKernelCS)
    1611 
    1612 BITS 32
    1613 ALIGNCODE(16)
    1614 .thunk32:
    1615     pop     gs
    1616     pop     fs
    1617     pop     edi
    1618     pop     esi
    1619     ret
    1620 ENDPROC   SVMR0VMRun
    1621 
    1622 
    1623 ; DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    1624 ALIGNCODE(16)
    1625 BEGINPROC SVMR0VMRun64
    1626     cmp     byte [NAME(g_fVMXIs64bitHost)], 0
    1627     je      .not_in_long_mode
    1628 
    1629     ; stack frame
    1630     push    esi
    1631     push    edi
    1632     push    fs
    1633     push    gs
    1634 
    1635     ; jmp far .thunk64
    1636     db      0xea
    1637     dd      .thunk64, NAME(SUPR0Abs64bitKernelCS)
    1638 
    1639 ALIGNCODE(16)
    1640 BITS 64
    1641 .thunk64:
    1642     sub     esp, 20h
    1643     mov     rdi, [rsp + 20h + 14h]      ; pVMCBHostPhys
    1644     mov     rsi, [rsp + 20h + 1Ch]      ; pVMCBPhys
    1645     mov     edx, [rsp + 20h + 24h]      ; pCtx
    1646     mov     ecx, [rsp + 20h + 28h]      ; pVM
    1647     mov     r8d, [rsp + 20h + 2Ch]      ; pVCpu
    1648     call    NAME(SVMR0VMRun64_64)
    1649     add     esp, 20h
    1650     jmp far [.fpthunk32 wrt rip]
    1651 .fpthunk32:                             ; 16:32 Pointer to .thunk32.
    1652     dd      .thunk32, NAME(SUPR0AbsKernelCS)
    1653 
    1654 BITS 32
    1655 ALIGNCODE(16)
    1656 .thunk32:
    1657     pop     gs
    1658     pop     fs
    1659     pop     edi
    1660     pop     esi
    1661     ret
    1662 
    1663 .not_in_long_mode:
    1664     mov     eax, VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE
    1665     ret
    1666 ENDPROC   SVMR0VMRun64
    1667 
    1668  ;
    1669  ; Do it a second time pretending we're a 64-bit host.
    1670  ;
    1671  ; This *HAS* to be done at the very end of the file to avoid restoring
    1672  ; macros. So, add new code *BEFORE* this mess.
    1673  ;
    1674  BITS 64
    1675  %undef  RT_ARCH_X86
    1676  %define RT_ARCH_AMD64
    1677  %undef  ASM_CALL64_MSC
    1678  %define ASM_CALL64_GCC
    1679  %define xCB             8
    1680  %define xSP            rsp
    1681  %define xBP            rbp
    1682  %define xAX            rax
    1683  %define xBX            rbx
    1684  %define xCX            rcx
    1685  %define xDX            rdx
    1686  %define xDI            rdi
    1687  %define xSI            rsi
    1688  %define MY_NAME(name)   name %+ _64
    1689  %define MYPUSHAD       MYPUSHAD64
    1690  %define MYPOPAD        MYPOPAD64
    1691  %define MYPUSHSEGS     MYPUSHSEGS64
    1692  %define MYPOPSEGS      MYPOPSEGS64
    1693 
    1694  %include "HMR0Mixed.mac"
    1695 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL
    1696 
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r57358 r57429  
    10191019 *
    10201020 * @{ */
    1021 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1021#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    10221022/**
    10231023 * Prepares for and executes VMRUN (64-bit guests on a 32-bit host).
     
    14681468         * Note! DBGF expects a clean DR6 state before executing guest code.
    14691469         */
    1470 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1470#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    14711471        if (   CPUMIsGuestInLongModeEx(pCtx)
    14721472            && !CPUMIsHyperDebugStateActivePending(pVCpu))
     
    15211521        if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
    15221522        {
    1523 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1523#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    15241524            if (   CPUMIsGuestInLongModeEx(pCtx)
    15251525                && !CPUMIsGuestDebugStateActivePending(pVCpu))
     
    15451545         * intercept #DB as DR6 is updated in the VMCB.
    15461546         */
    1547 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1547#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    15481548        else if (   !CPUMIsGuestDebugStateActivePending(pVCpu)
    15491549                 && !CPUMIsGuestDebugStateActive(pVCpu))
     
    16891689#endif
    16901690        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);    /* Guaranteed by hmR3InitFinalizeR0(). */
    1691 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1691#if HC_ARCH_BITS == 32
    16921692        /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
    16931693        pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;
     
    31353135
    31363136    /* Store status of the shared guest-host state at the time of VMRUN. */
    3137 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3137#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    31383138    if (CPUMIsGuestInLongModeEx(pCtx))
    31393139    {
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.h

    r56287 r57429  
    5151VMMR0DECL(int)  SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu);
    5252
    53 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     53#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    5454DECLASM(int)   SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    5555VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
    5656                                         uint32_t *paParam);
    57 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     57#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */
    5858
    5959/**
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r57358 r57429  
    5656*   Defined Constants And Macros                                                                                                 *
    5757*********************************************************************************************************************************/
    58 #if defined(RT_ARCH_AMD64)
    59 # define HMVMX_IS_64BIT_HOST_MODE()   (true)
    60 typedef RTHCUINTREG                   HMVMXHCUINTREG;
    61 #elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    62 extern "C" uint32_t g_fVMXIs64bitHost;
    63 # define HMVMX_IS_64BIT_HOST_MODE()   (g_fVMXIs64bitHost != 0)
    64 typedef uint64_t                      HMVMXHCUINTREG;
    65 #else
    66 # define HMVMX_IS_64BIT_HOST_MODE()   (false)
    67 typedef RTHCUINTREG                   HMVMXHCUINTREG;
    68 #endif
    69 
    7058/** Use the function table. */
    7159#define HMVMX_USE_FUNCTION_TABLE
     
    352340                                                 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,
    353341                                                 bool fStepping, uint32_t *puIntState);
    354 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     342#if HC_ARCH_BITS == 32
    355343static int                hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
    356344#endif
     
    27452733    }
    27462734
    2747 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    2748     /*
    2749      * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
    2750      * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
    2751      */
    2752     if (   pVM->hm.s.fAllow64BitGuests
    2753         && !HMVMX_IS_64BIT_HOST_MODE())
    2754     {
    2755         LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
    2756         return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
    2757     }
    2758 #endif
    2759 
    27602735    /* Initialize these always, see hmR3InitFinalizeR0().*/
    27612736    pVM->hm.s.vmx.enmFlushEpt  = VMXFLUSHEPT_NONE;
     
    27722747    /* Check if we can use the VMCS controls for swapping the EFER MSR. */
    27732748    Assert(!pVM->hm.s.vmx.fSupportsVmcsEfer);
    2774 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    2775     if (   HMVMX_IS_64BIT_HOST_MODE()
    2776         && (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
     2749#if HC_ARCH_BITS == 64
     2750    if (   (pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
    27772751        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR)
    27782752        && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1  & VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR))
     
    28282802                                    hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
    28292803
    2830 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     2804#if HC_ARCH_BITS == 32
    28312805        rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
    28322806        AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
     
    28642838    AssertRCReturn(rc, rc);
    28652839
    2866 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    2867     /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
    2868     if (HMVMX_IS_64BIT_HOST_MODE())
    2869     {
    2870         uint64_t uRegCR3 = HMR0Get64bitCR3();
    2871         rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
    2872     }
    2873     else
    2874 #endif
    2875     {
    2876         uReg = ASMGetCR3();
    2877         rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
    2878     }
     2840    uReg = ASMGetCR3();
     2841    rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
    28792842    AssertRCReturn(rc, rc);
    28802843
     
    29522915     * Host CS and SS segment registers.
    29532916     */
    2954 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    2955     RTSEL uSelCS;
    2956     RTSEL uSelSS;
    2957     if (HMVMX_IS_64BIT_HOST_MODE())
    2958     {
    2959         uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
    2960         uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
    2961     }
    2962     else
    2963     {
    2964         /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
    2965         uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
    2966         uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
    2967     }
    2968 #else
    29692917    RTSEL uSelCS = ASMGetCS();
    29702918    RTSEL uSelSS = ASMGetSS();
    2971 #endif
    29722919
    29732920    /*
     
    30202967     */
    30212968    RTGDTR Gdtr;
     2969    RTIDTR Idtr;
    30222970    RT_ZERO(Gdtr);
    3023 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    3024     if (HMVMX_IS_64BIT_HOST_MODE())
    3025     {
    3026         X86XDTR64 Gdtr64;
    3027         X86XDTR64 Idtr64;
    3028         HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
    3029         rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr);     AssertRCReturn(rc, rc);
    3030         rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr);     AssertRCReturn(rc, rc);
    3031 
    3032         Gdtr.cbGdt = Gdtr64.cb;
    3033         Gdtr.pGdt  = (uintptr_t)Gdtr64.uAddr;
    3034     }
    3035     else
     2971    RT_ZERO(Idtr);
     2972    ASMGetGDTR(&Gdtr);
     2973    ASMGetIDTR(&Idtr);
     2974    rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);      AssertRCReturn(rc, rc);
     2975    rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);      AssertRCReturn(rc, rc);
     2976
     2977#if HC_ARCH_BITS == 64
     2978    /*
     2979     * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
     2980     * maximum limit (0xffff) on every VM-exit.
     2981     */
     2982    if (Gdtr.cbGdt != 0xffff)
     2983    {
     2984        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
     2985        AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
     2986        memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
     2987    }
     2988
     2989    /*
     2990     * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
     2991     * and Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the limit as 0xfff, VT-x
     2992     * bloating the limit to 0xffff shouldn't cause any different CPU behavior.  However, several hosts either insists
     2993     * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
     2994     * but botches sidt alignment in at least one consumer).  So, we're only allowing IDTR.LIMIT to be left at 0xffff on
     2995     * hosts where we are pretty sure it won't cause trouble.
     2996     */
     2997# if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
     2998    if (Idtr.cbIdt <  0x0fff)
     2999# else
     3000    if (Idtr.cbIdt != 0xffff)
     3001# endif
     3002    {
     3003        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
     3004        AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
     3005        memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
     3006    }
    30363007#endif
    3037     {
    3038         RTIDTR Idtr;
    3039         ASMGetGDTR(&Gdtr);
    3040         ASMGetIDTR(&Idtr);
    3041         rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt);      AssertRCReturn(rc, rc);
    3042         rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt);      AssertRCReturn(rc, rc);
    3043 
    3044 #if HC_ARCH_BITS == 64
    3045         /*
    3046          * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
    3047          * maximum limit (0xffff) on every VM-exit.
    3048          */
    3049         if (Gdtr.cbGdt != 0xffff)
    3050         {
    3051             pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
    3052             AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
    3053             memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
    3054         }
    3055 
    3056         /*
    3057          * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT"
    3058          * and Intel spec. 6.2 "Exception and Interrupt Vectors".)  Therefore if the host has the limit as 0xfff, VT-x
    3059          * bloating the limit to 0xffff shouldn't cause any different CPU behavior.  However, several hosts either insists
    3060          * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there
    3061          * but botches sidt alignment in at least one consumer).  So, we're only allowing IDTR.LIMIT to be left at 0xffff on
    3062          * hosts where we are pretty sure it won't cause trouble.
    3063          */
    3064 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
    3065         if (Idtr.cbIdt <  0x0fff)
    3066 # else
    3067         if (Idtr.cbIdt != 0xffff)
    3068 # endif
    3069         {
    3070             pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
    3071             AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
    3072             memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
    3073         }
    3074 #endif
    3075     }
    30763008
    30773009    /*
     
    30843016
    30853017    PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
    3086 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    3087     if (HMVMX_IS_64BIT_HOST_MODE())
    3088     {
    3089         /* We need the 64-bit TR base for hybrid darwin. */
    3090         uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
    3091         rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
    3092     }
    3093     else
     3018#if HC_ARCH_BITS == 64
     3019    uintptr_t uTRBase = X86DESC64_BASE(pDesc);
     3020
     3021    /*
     3022     * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
     3023     * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
     3024     * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
     3025     * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
     3026     *
     3027     * [1] See Intel spec. 3.5 "System Descriptor Types".
     3028     * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
     3029     */
     3030    Assert(pDesc->System.u4Type == 11);
     3031    if (   pDesc->System.u16LimitLow != 0x67
     3032        || pDesc->System.u4LimitHigh)
     3033    {
     3034        pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
     3035        /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
     3036        if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
     3037            pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
     3038        pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
     3039
     3040        /* Store the GDTR here as we need it while restoring TR. */
     3041        memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
     3042    }
     3043#else
     3044    uintptr_t uTRBase = X86DESC_BASE(pDesc);
    30943045#endif
    3095     {
    3096         uintptr_t uTRBase;
    3097 #if HC_ARCH_BITS == 64
    3098         uTRBase = X86DESC64_BASE(pDesc);
    3099 
    3100         /*
    3101          * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
    3102          * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
    3103          * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
    3104          * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
    3105          *
    3106          * [1] See Intel spec. 3.5 "System Descriptor Types".
    3107          * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
    3108          */
    3109         Assert(pDesc->System.u4Type == 11);
    3110         if (   pDesc->System.u16LimitLow != 0x67
    3111             || pDesc->System.u4LimitHigh)
    3112         {
    3113             pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
    3114             /* If the host has made GDT read-only, we would need to temporarily toggle CR0.WP before writing the GDT. */
    3115             if (pVM->hm.s.fHostKernelFeatures & SUPKERNELFEATURES_GDT_READ_ONLY)
    3116                 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDT_READ_ONLY;
    3117             pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
    3118 
    3119             /* Store the GDTR here as we need it while restoring TR. */
    3120             memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
    3121         }
    3122 #else
    3123         uTRBase = X86DESC_BASE(pDesc);
    3124 #endif
    3125         rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
    3126     }
     3046    rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
    31273047    AssertRCReturn(rc, rc);
    31283048
     
    31303050     * Host FS base and GS base.
    31313051     */
    3132 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    3133     if (HMVMX_IS_64BIT_HOST_MODE())
    3134     {
    3135         uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
    3136         uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
    3137         rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);          AssertRCReturn(rc, rc);
    3138         rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);          AssertRCReturn(rc, rc);
    3139 
    3140 # if HC_ARCH_BITS == 64
    3141         /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
    3142         if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
    3143             pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
    3144         if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
    3145             pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
    3146 # endif
    3147     }
     3052#if HC_ARCH_BITS == 64
     3053    uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
     3054    uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
     3055    rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase);          AssertRCReturn(rc, rc);
     3056    rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase);          AssertRCReturn(rc, rc);
     3057
     3058    /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
     3059    if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
     3060        pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
     3061    if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
     3062        pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
    31483063#endif
    31493064    return rc;
     
    31803095    rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,        ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
    31813096    AssertRCReturn(rc, rc);
    3182 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    3183     if (HMVMX_IS_64BIT_HOST_MODE())
    3184     {
    3185         rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP,     ASMRdMsr(MSR_IA32_SYSENTER_ESP));
    3186         AssertRCReturn(rc, rc);
    3187         rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP,     ASMRdMsr(MSR_IA32_SYSENTER_EIP));
    3188     }
    3189     else
    3190     {
    3191         rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,     ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    3192         AssertRCReturn(rc, rc);
    3193         rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP,     ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
    3194     }
    3195 #elif HC_ARCH_BITS == 32
     3097#if HC_ARCH_BITS == 32
    31963098    rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP,         ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
    31973099    AssertRCReturn(rc, rc);
     
    32463148#endif
    32473149
    3248 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     3150#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    32493151    /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */
    32503152    if (CPUMIsGuestInLongMode(pVCpu))
     
    33863288         * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
    33873289         */
    3388 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    3389         if (HMVMX_IS_64BIT_HOST_MODE())
    3390         {
    3391             val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
    3392             Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
    3393         }
    3394         else
    3395             Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
     3290#if HC_ARCH_BITS == 64
     3291        val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
     3292        Log4(("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));
    33963293#else
    33973294        if (CPUMIsGuestInLongModeEx(pMixedCtx))
     
    34033300        else
    34043301            Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
    3405 #endif  /* HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     3302#endif
    34063303
    34073304        /* If the newer VMCS fields for managing EFER exists, use it. */
     
    41954092         * Note! DBGF expects a clean DR6 state before executing guest code.
    41964093         */
    4197 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4094#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    41984095        if (   CPUMIsGuestInLongModeEx(pMixedCtx)
    41994096            && !CPUMIsHyperDebugStateActivePending(pVCpu))
     
    42284125        if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
    42294126        {
    4230 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4127#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    42314128            if (   CPUMIsGuestInLongModeEx(pMixedCtx)
    42324129                && !CPUMIsGuestDebugStateActivePending(pVCpu))
     
    42534150         * must intercept #DB in order to maintain a correct DR6 guest value.
    42544151         */
    4255 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4152#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    42564153        else if (   !CPUMIsGuestDebugStateActivePending(pVCpu)
    42574154                 && !CPUMIsGuestDebugStateActive(pVCpu))
     
    44134310        }
    44144311        /* 64-bit capable CPUs. */
    4415 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4416         if (HMVMX_IS_64BIT_HOST_MODE())
    4417         {
    4418             Assert(!(pCtx->cs.u64Base >> 32));
    4419             Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
    4420             Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
    4421             Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
    4422         }
     4312# if HC_ARCH_BITS == 64
     4313        Assert(!(pCtx->cs.u64Base >> 32));
     4314        Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
     4315        Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
     4316        Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
    44234317# endif
    44244318    }
     
    44654359        Assert(u32GSAttr == 0xf3);
    44664360        /* 64-bit capable CPUs. */
    4467 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4468         if (HMVMX_IS_64BIT_HOST_MODE())
    4469         {
    4470             Assert(!(pCtx->cs.u64Base >> 32));
    4471             Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
    4472             Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
    4473             Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
    4474         }
     4361# if HC_ARCH_BITS == 64
     4362        Assert(!(pCtx->cs.u64Base >> 32));
     4363        Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
     4364        Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
     4365        Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
    44754366# endif
    44764367    }
     
    47794670    {
    47804671        /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */
    4781 #if HC_ARCH_BITS == 32 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4672#if HC_ARCH_BITS == 32
    47824673        if (pVM->hm.s.fAllow64BitGuests)
    47834674        {
     
    49064797#endif
    49074798        Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);    /* Guaranteed by hmR3InitFinalizeR0(). */
    4908 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     4799#if HC_ARCH_BITS == 32
    49094800        /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
    49104801        if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64)
     
    49204811        }
    49214812#else
    4922         /* 64-bit host or hybrid host. */
     4813        /* 64-bit host. */
    49234814        pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
    49244815#endif
     
    49274818    {
    49284819        /* Guest is not in long mode, use the 32-bit handler. */
    4929 #if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    4930         if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32)
    4931         {
    4932             if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
    4933             {
    4934                 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
    4935                 AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_VMX_EXIT_CTLS
    4936                                                  | HM_CHANGED_VMX_ENTRY_CTLS
    4937                                                  | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
    4938             }
    4939             pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    4940         }
    4941 #else
     4820#if HC_ARCH_BITS == 32
     4821        if (   pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32
     4822            && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */
     4823        {
     4824            /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */
     4825            AssertMsg(HMCPU_CF_IS_SET(pVCpu,   HM_CHANGED_VMX_EXIT_CTLS
     4826                                             | HM_CHANGED_VMX_ENTRY_CTLS
     4827                                             | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu)));
     4828        }
     4829#endif
    49424830        pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
    4943 #endif
    49444831    }
    49454832    Assert(pVCpu->hm.s.vmx.pfnStartVM);
     
    50264913                uint32_t        u32Val;
    50274914                uint64_t        u64Val;
    5028                 HMVMXHCUINTREG  uHCReg;
     4915                RTHCUINTREG     uHCReg;
    50294916                rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val);                  AssertRC(rc);
    50304917                Log4(("VMX_VMCS32_CTRL_PIN_EXEC                %#RX32\n", u32Val));
     
    51635050                rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg);           AssertRC(rc);
    51645051                Log4(("Host RIP %#RHv\n", uHCReg));
    5165 # if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    5166                 if (HMVMX_IS_64BIT_HOST_MODE())
    5167                 {
    5168                     Log4(("MSR_K6_EFER            = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
    5169                     Log4(("MSR_K8_CSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
    5170                     Log4(("MSR_K8_LSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
    5171                     Log4(("MSR_K6_STAR            = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
    5172                     Log4(("MSR_K8_SF_MASK         = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
    5173                     Log4(("MSR_K8_KERNEL_GS_BASE  = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
    5174                 }
     5052# if HC_ARCH_BITS == 64
     5053                Log4(("MSR_K6_EFER            = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
     5054                Log4(("MSR_K8_CSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
     5055                Log4(("MSR_K8_LSTAR           = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
     5056                Log4(("MSR_K6_STAR            = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
     5057                Log4(("MSR_K8_SF_MASK         = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
     5058                Log4(("MSR_K8_KERNEL_GS_BASE  = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
    51755059# endif
    51765060#endif /* VBOX_STRICT */
     
    51875071
    51885072
    5189 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     5073#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
    51905074#ifndef VMX_USE_CACHED_VMCS_ACCESSES
    51915075# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
     
    56185502    return VINF_SUCCESS;
    56195503}
    5620 
    5621 /* Enable later when the assembly code uses these as callbacks. */
    5622 #if 0
    5623 /*
    5624  * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
    5625  *
    5626  * @param   pVCpu           Pointer to the VMCPU.
    5627  * @param   pCache          Pointer to the VMCS cache.
    5628  *
    5629  * @remarks No-long-jump zone!!!
    5630  */
    5631 VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
    5632 {
    5633     AssertPtr(pCache);
    5634     for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
    5635     {
    5636         int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
    5637         AssertRC(rc);
    5638     }
    5639     pCache->Write.cValidEntries = 0;
    5640 }
    5641 
    5642 
    5643 /**
    5644  * Stores the VMCS read-cache from the CPU (by executing VMREADs).
    5645  *
    5646  * @param   pVCpu           Pointer to the VMCPU.
    5647  * @param   pCache          Pointer to the VMCS cache.
    5648  *
    5649  * @remarks No-long-jump zone!!!
    5650  */
    5651 VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
    5652 {
    5653     AssertPtr(pCache);
    5654     for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
    5655     {
    5656         int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
    5657         AssertRC(rc);
    5658     }
    5659 }
    5660 #endif
    5661 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
     5504#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */
    56625505
    56635506
     
    87538596
    87548597    /* Store status of the shared guest-host state at the time of VM-entry. */
    8755 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     8598#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    87568599    if (CPUMIsGuestInLongModeEx(pMixedCtx))
    87578600    {
     
    95139356         */
    95149357        uint32_t u32Eflags;
    9515 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9516         if (HMVMX_IS_64BIT_HOST_MODE())
    9517         {
    9518             rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
    9519             AssertRCBreak(rc);
    9520             /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
    9521             if (   !fLongModeGuest
    9522                 || !pCtx->cs.Attr.n.u1Long)
    9523             {
    9524                 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
    9525             }
    9526             /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
    9527              *        must be identical if the "IA-32e mode guest" VM-entry
    9528              *        control is 1 and CS.L is 1. No check applies if the
    9529              *        CPU supports 64 linear-address bits. */
    9530 
    9531             /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
    9532             rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
    9533             AssertRCBreak(rc);
    9534             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)),                     /* Bit 63:22, Bit 15, 5, 3 MBZ. */
    9535                               VMX_IGS_RFLAGS_RESERVED);
    9536             HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);       /* Bit 1 MB1. */
    9537             u32Eflags = u64Val;
    9538         }
    9539         else
     9358#if HC_ARCH_BITS == 64
     9359        rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
     9360        AssertRCBreak(rc);
     9361        /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
     9362        if (   !fLongModeGuest
     9363            || !pCtx->cs.Attr.n.u1Long)
     9364        {
     9365            HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
     9366        }
     9367        /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
     9368         *        must be identical if the "IA-32e mode guest" VM-entry
     9369         *        control is 1 and CS.L is 1. No check applies if the
     9370         *        CPU supports 64 linear-address bits. */
     9371
     9372        /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
     9373        rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
     9374        AssertRCBreak(rc);
     9375        HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)),                     /* Bit 63:22, Bit 15, 5, 3 MBZ. */
     9376                          VMX_IGS_RFLAGS_RESERVED);
     9377        HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);       /* Bit 1 MB1. */
     9378        u32Eflags = u64Val;
     9379#else
     9380        rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
     9381        AssertRCBreak(rc);
     9382        HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED);          /* Bit 31:22, Bit 15, 5, 3 MBZ. */
     9383        HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);    /* Bit 1 MB1. */
    95409384#endif
    9541         {
    9542             rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
    9543             AssertRCBreak(rc);
    9544             HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED);          /* Bit 31:22, Bit 15, 5, 3 MBZ. */
    9545             HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1);    /* Bit 1 MB1. */
    9546         }
    95479385
    95489386        if (   fLongModeGuest
     
    95659403         * 64-bit checks.
    95669404         */
    9567 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9568         if (HMVMX_IS_64BIT_HOST_MODE())
    9569         {
    9570             if (fLongModeGuest)
    9571             {
    9572                 HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
    9573                 HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
    9574             }
    9575 
    9576             if (   !fLongModeGuest
    9577                 && (u32GuestCR4 & X86_CR4_PCIDE))
    9578             {
    9579                 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
    9580             }
    9581 
    9582             /** @todo CR3 field must be such that bits 63:52 and bits in the range
    9583              *        51:32 beyond the processor's physical-address width are 0. */
    9584 
    9585             if (   (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
    9586                 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
    9587             {
    9588                 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
    9589             }
    9590 
    9591             rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
    9592             AssertRCBreak(rc);
    9593             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
    9594 
    9595             rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
    9596             AssertRCBreak(rc);
    9597             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
    9598         }
     9405#if HC_ARCH_BITS == 64
     9406        if (fLongModeGuest)
     9407        {
     9408            HMVMX_CHECK_BREAK(u32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);
     9409            HMVMX_CHECK_BREAK(u32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);
     9410        }
     9411
     9412        if (   !fLongModeGuest
     9413            && (u32GuestCR4 & X86_CR4_PCIDE))
     9414        {
     9415            HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
     9416        }
     9417
     9418        /** @todo CR3 field must be such that bits 63:52 and bits in the range
     9419         *        51:32 beyond the processor's physical-address width are 0. */
     9420
     9421        if (   (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
     9422            && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
     9423        {
     9424            HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
     9425        }
     9426
     9427        rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
     9428        AssertRCBreak(rc);
     9429        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
     9430
     9431        rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
     9432        AssertRCBreak(rc);
     9433        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
    95999434#endif
    96009435
     
    97669601            }
    97679602            /* 64-bit capable CPUs. */
    9768 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9769             if (HMVMX_IS_64BIT_HOST_MODE())
    9770             {
    9771                 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    9772                 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    9773                 HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    9774                                   || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    9775                 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    9776                 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
    9777                                   VMX_IGS_LONGMODE_SS_BASE_INVALID);
    9778                 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
    9779                                   VMX_IGS_LONGMODE_DS_BASE_INVALID);
    9780                 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
    9781                                   VMX_IGS_LONGMODE_ES_BASE_INVALID);
    9782             }
     9603#if HC_ARCH_BITS == 64
     9604            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
     9605            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
     9606            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
     9607                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
     9608            HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     9609            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
     9610                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
     9611            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
     9612                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
     9613            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
     9614                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
    97839615#endif
    97849616        }
     
    98259657            HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
    98269658            /* 64-bit capable CPUs. */
    9827 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9828             if (HMVMX_IS_64BIT_HOST_MODE())
    9829             {
    9830                 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
    9831                 HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
    9832                 HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
    9833                                   || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
    9834                 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
    9835                 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
    9836                                   VMX_IGS_LONGMODE_SS_BASE_INVALID);
    9837                 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
    9838                                   VMX_IGS_LONGMODE_DS_BASE_INVALID);
    9839                 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
    9840                                   VMX_IGS_LONGMODE_ES_BASE_INVALID);
    9841             }
     9659#if HC_ARCH_BITS == 64
     9660            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
     9661            HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
     9662            HMVMX_CHECK_BREAK(   (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
     9663                              || X86_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
     9664            HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
     9665            HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
     9666                              VMX_IGS_LONGMODE_SS_BASE_INVALID);
     9667            HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
     9668                              VMX_IGS_LONGMODE_DS_BASE_INVALID);
     9669            HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
     9670                              VMX_IGS_LONGMODE_ES_BASE_INVALID);
    98429671#endif
    98439672        }
     
    98489677        HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
    98499678        /* 64-bit capable CPUs. */
    9850 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9851         if (HMVMX_IS_64BIT_HOST_MODE())
    9852         {
    9853             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
    9854         }
     9679#if HC_ARCH_BITS == 64
     9680        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
    98559681#endif
    98569682        if (fLongModeGuest)
     
    98779703         * GDTR and IDTR.
    98789704         */
    9879 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    9880         if (HMVMX_IS_64BIT_HOST_MODE())
    9881         {
    9882             rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
    9883             AssertRCBreak(rc);
    9884             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
    9885 
    9886             rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
    9887             AssertRCBreak(rc);
    9888             HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
    9889         }
     9705#if HC_ARCH_BITS == 64
     9706        rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
     9707        AssertRCBreak(rc);
     9708        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
     9709
     9710        rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
     9711        AssertRCBreak(rc);
     9712        HMVMX_CHECK_BREAK(X86_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
    98909713#endif
    98919714
     
    99669789
    99679790        /* Pending debug exceptions. */
    9968         if (HMVMX_IS_64BIT_HOST_MODE())
    9969         {
    9970             rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
    9971             AssertRCBreak(rc);
    9972             /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
    9973             HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
    9974             u32Val = u64Val;    /* For pending debug exceptions checks below. */
    9975         }
    9976         else
    9977         {
    9978             rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
    9979             AssertRCBreak(rc);
    9980             /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
    9981             HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
    9982         }
     9791#if HC_ARCH_BITS == 64
     9792        rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
     9793        AssertRCBreak(rc);
     9794        /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
     9795        HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
     9796        u32Val = u64Val;    /* For pending debug exceptions checks below. */
     9797#else
     9798        rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
     9799        AssertRCBreak(rc);
     9800        /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
     9801        HMVMX_CHECK_BREAK(!(u32Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
     9802#endif
    99839803
    99849804        if (   (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
     
    1070910529#ifdef VBOX_STRICT
    1071010530    uint32_t       uIntrState;
    10711     HMVMXHCUINTREG uHCReg;
     10531    RTHCUINTREG    uHCReg;
    1071210532    uint64_t       u64Val;
    1071310533    uint32_t       u32Val;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.h

    r56287 r57429  
    4545
    4646
    47 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     47# if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    4848DECLASM(int)    VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);
    4949VMMR0DECL(int)  VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
     
    6363# endif
    6464
    65 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
    66 /* Don't use fAllow64BitGuests for VMXReadVmcsGstN() even though it looks right, as it can be forced to 'true'.
    67    HMVMX_IS_64BIT_HOST_MODE() is what we need. */
    68 #  define VMXReadVmcsHstN(idxField, p64Val)               HMVMX_IS_64BIT_HOST_MODE() ?                      \
    69                                                             VMXReadVmcs64(idxField, p64Val)                 \
    70                                                           : (*(p64Val) &= UINT64_C(0xffffffff),             \
    71                                                              VMXReadVmcs32(idxField, (uint32_t *)(p64Val)))
    72 #  define VMXReadVmcsGstN                                 VMXReadVmcsHstN
    73 #  define VMXReadVmcsGstNByIdxVal                         VMXReadVmcsGstN
    74 # elif HC_ARCH_BITS == 32
     65# if HC_ARCH_BITS == 32
    7566#  define VMXReadVmcsHstN                                 VMXReadVmcs32
    7667#  define VMXReadVmcsGstN(idxField, pVal)                 VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal)
  • trunk/src/VBox/VMM/VMMR0/VMMR0.cpp

    r57378 r57429  
    18671867
    18681868
    1869 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
     1869#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS)
    18701870        case VMMR0_DO_TEST_SWITCHER3264:
    18711871            if (idCpu == NIL_VMCPUID)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette