Changeset 46267 in vbox
- Timestamp:
- May 26, 2013 11:29:24 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 86004
- Location:
- trunk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/hm_vmx.h
r46004 r46267 37 37 */ 38 38 39 /** @name Host-state restoration flags. 40 * @{ 41 */ 42 /* If you change these values don't forget to update the assembly defines as well! */ 43 #define VMX_RESTORE_HOST_SEL_DS RT_BIT(0) 44 #define VMX_RESTORE_HOST_SEL_ES RT_BIT(1) 45 #define VMX_RESTORE_HOST_SEL_FS RT_BIT(2) 46 #define VMX_RESTORE_HOST_SEL_GS RT_BIT(3) 47 #define VMX_RESTORE_HOST_GDTR RT_BIT(4) 48 #define VMX_RESTORE_HOST_IDTR RT_BIT(5) 49 #define VMX_RESTORE_HOST_LDTR RT_BIT(6) 50 /** @} */ 51 52 /** 53 * Host-state restoration structure. 54 * This holds host-state fields that require manual restoration. The layout is 55 * critical as it's used from assembly code. 56 */ 57 #pragma pack(1) 58 typedef struct VMXRESTOREHOST 59 { 60 RTSEL uHostSelDS; /* 0x00 */ 61 RTSEL uHostSelES; /* 0x02 */ 62 RTSEL uHostSelFS; /* 0x04 */ 63 RTSEL uHostSelGS; /* 0x06 */ 64 uint64_t uHostFSBase; /* 0x08 */ 65 uint64_t uHostGSBase; /* 0x10 */ 66 X86XDTR64 HostGdtr; /* 0x18 */ 67 X86XDTR64 HostIdtr; /* 0x22 */ 68 } VMXRESTOREHOST; 69 #pragma pack() 70 /** Pointer to VMXRESTOREHOST. */ 71 typedef VMXRESTOREHOST *PVMXRESTOREHOST; 72 AssertCompileSize(X86XDTR64, 10); 73 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelES, 2); 74 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelFS, 4); 75 AssertCompileMemberOffset(VMXRESTOREHOST, uHostSelGS, 6); 76 AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase, 8); 77 AssertCompileMemberOffset(VMXRESTOREHOST, uHostGSBase, 16); 78 AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr, 24); 79 AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr, 34); 80 AssertCompileSize(VMXRESTOREHOST, 44); 81 39 82 /** @name VMX VMCS-Read cache indices. 40 83 * @{ 41 84 */ 42 85 #ifndef VBOX_WITH_OLD_VTX_CODE 43 # define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 44 # define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 45 # define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 46 # define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 47 # define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 48 # define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 49 # define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 50 # define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 51 # define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 52 # define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 53 # define VMX_VMCS_GUEST_RSP_CACHE_IDX 54 # define VMX_VMCS_GUEST_RIP_CACHE_IDX 55 # define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 56 # define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 57 # define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 58 # define VMX_VMCS_MAX_CACHE_IDX 59 # define VMX_VMCS_GUEST_CR3_CACHE_IDX 60 # define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX 86 # define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 0 87 # define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 1 88 # define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 2 89 # define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 3 90 # define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 4 91 # define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 5 92 # define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 6 93 # define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 7 94 # define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 8 95 # define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 9 96 # define VMX_VMCS_GUEST_RSP_CACHE_IDX 10 97 # define VMX_VMCS_GUEST_RIP_CACHE_IDX 11 98 # define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 12 99 # define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 13 100 # define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 14 101 # define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX + 1) 102 # define VMX_VMCS_GUEST_CR3_CACHE_IDX 15 103 # define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS_GUEST_CR3_CACHE_IDX + 1) 61 104 #else /* VBOX_WITH_OLD_VTX_CODE */ 62 # define VMX_VMCS_GUEST_RIP_CACHE_IDX 63 # define VMX_VMCS_GUEST_RSP_CACHE_IDX 64 # define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 65 # define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 66 # define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 67 # define VMX_VMCS_GUEST_CR0_CACHE_IDX 68 # define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 69 # define VMX_VMCS_GUEST_CR4_CACHE_IDX 70 # define VMX_VMCS_GUEST_DR7_CACHE_IDX 71 # define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 72 # define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 73 # define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 74 # define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 75 # define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 76 # define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 77 # define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 78 # define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 79 # define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 80 # define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 81 # define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 82 # define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 83 # define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 84 # define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 85 # define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 86 # define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 87 # define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 88 # define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 89 # define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 90 # define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 91 # define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 92 # define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 93 # define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 94 # define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 95 # define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 96 # define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 97 # define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 98 # define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 99 # define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 100 # define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 101 # define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 102 # define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 103 # define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 104 # define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 105 # define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 106 # define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 107 # define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 108 # define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 109 # define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 110 # define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 111 # define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 112 # define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 113 # define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE_CACHE_IDX 114 # define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 115 # define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 116 # define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 117 # define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 118 # define VMX_VMCS32_RO_IDT_ERROR_CODE_CACHE_IDX 119 # define VMX_VMCS_MAX_CACHE_IDX 120 # define VMX_VMCS_GUEST_CR3_CACHE_IDX 121 # define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL_CACHE_IDX 122 # define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX 105 # define VMX_VMCS_GUEST_RIP_CACHE_IDX 0 106 # define VMX_VMCS_GUEST_RSP_CACHE_IDX 1 107 # define VMX_VMCS_GUEST_RFLAGS_CACHE_IDX 2 108 # define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE_CACHE_IDX 3 109 # define VMX_VMCS_CTRL_CR0_READ_SHADOW_CACHE_IDX 4 110 # define VMX_VMCS_GUEST_CR0_CACHE_IDX 5 111 # define VMX_VMCS_CTRL_CR4_READ_SHADOW_CACHE_IDX 6 112 # define VMX_VMCS_GUEST_CR4_CACHE_IDX 7 113 # define VMX_VMCS_GUEST_DR7_CACHE_IDX 8 114 # define VMX_VMCS32_GUEST_SYSENTER_CS_CACHE_IDX 9 115 # define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX 10 116 # define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX 11 117 # define VMX_VMCS32_GUEST_GDTR_LIMIT_CACHE_IDX 12 118 # define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX 13 119 # define VMX_VMCS32_GUEST_IDTR_LIMIT_CACHE_IDX 14 120 # define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX 15 121 # define VMX_VMCS16_GUEST_FIELD_CS_CACHE_IDX 16 122 # define VMX_VMCS32_GUEST_CS_LIMIT_CACHE_IDX 17 123 # define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX 18 124 # define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS_CACHE_IDX 19 125 # define VMX_VMCS16_GUEST_FIELD_DS_CACHE_IDX 20 126 # define VMX_VMCS32_GUEST_DS_LIMIT_CACHE_IDX 21 127 # define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX 22 128 # define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS_CACHE_IDX 23 129 # define VMX_VMCS16_GUEST_FIELD_ES_CACHE_IDX 24 130 # define VMX_VMCS32_GUEST_ES_LIMIT_CACHE_IDX 25 131 # define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX 26 132 # define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS_CACHE_IDX 27 133 # define VMX_VMCS16_GUEST_FIELD_FS_CACHE_IDX 28 134 # define VMX_VMCS32_GUEST_FS_LIMIT_CACHE_IDX 29 135 # define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX 30 136 # define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS_CACHE_IDX 31 137 # define VMX_VMCS16_GUEST_FIELD_GS_CACHE_IDX 32 138 # define VMX_VMCS32_GUEST_GS_LIMIT_CACHE_IDX 33 139 # define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX 34 140 # define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS_CACHE_IDX 35 141 # define VMX_VMCS16_GUEST_FIELD_SS_CACHE_IDX 36 142 # define VMX_VMCS32_GUEST_SS_LIMIT_CACHE_IDX 37 143 # define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX 38 144 # define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS_CACHE_IDX 39 145 # define VMX_VMCS16_GUEST_FIELD_TR_CACHE_IDX 40 146 # define VMX_VMCS32_GUEST_TR_LIMIT_CACHE_IDX 41 147 # define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX 42 148 # define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS_CACHE_IDX 43 149 # define VMX_VMCS16_GUEST_FIELD_LDTR_CACHE_IDX 44 150 # define VMX_VMCS32_GUEST_LDTR_LIMIT_CACHE_IDX 45 151 # define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX 46 152 # define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS_CACHE_IDX 47 153 # define VMX_VMCS32_RO_EXIT_REASON_CACHE_IDX 48 154 # define VMX_VMCS32_RO_VM_INSTR_ERROR_CACHE_IDX 49 155 # define VMX_VMCS32_RO_EXIT_INSTR_LENGTH_CACHE_IDX 50 156 # define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE_CACHE_IDX 51 157 # define VMX_VMCS32_RO_EXIT_INSTR_INFO_CACHE_IDX 52 158 # define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO_CACHE_IDX 53 159 # define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX 54 160 # define VMX_VMCS32_RO_IDT_INFO_CACHE_IDX 55 161 # define VMX_VMCS32_RO_IDT_ERROR_CODE_CACHE_IDX 56 162 # define VMX_VMCS_MAX_CACHE_IDX (VMX_VMCS32_RO_IDT_ERROR_CODE_CACHE_IDX + 1) 163 # define VMX_VMCS_GUEST_CR3_CACHE_IDX 57 164 # define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL_CACHE_IDX 58 165 # define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX (VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL_CACHE_IDX + 1) 123 166 #endif /* VBOX_WITH_OLD_VTX_CODE */ 124 167 /** @} */ 125 126 168 127 169 /** @name VMX EPT paging structures … … 1477 1519 1478 1520 /** 1521 * Restores some host-state fields that need not be done on every VM-exit. 1522 * 1523 * @returns VBox status code. 1524 * @param fRestoreHostFlags Flags of which host registers needs to be 1525 * restored. 1526 * @param pRestoreHost Pointer to the host-restore structure. 1527 */ 1528 DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost); 1529 1530 1531 /** 1479 1532 * Executes VMXON 1480 1533 * -
trunk/include/VBox/vmm/hm_vmx.mac
r45947 r46267 152 152 %define VMX_VMCS_HOST_RIP 06C16h 153 153 154 %define VMX_RESTORE_HOST_SEL_DS 1h ;RT_BIT(0) 155 %define VMX_RESTORE_HOST_SEL_ES 2h ;RT_BIT(1) 156 %define VMX_RESTORE_HOST_SEL_FS 4h ;RT_BIT(2) 157 %define VMX_RESTORE_HOST_SEL_GS 8h ;RT_BIT(3) 158 %define VMX_RESTORE_HOST_GDTR 10h ;RT_BIT(4) 159 %define VMX_RESTORE_HOST_IDTR 20h ;RT_BIT(5) 160 %define VMX_RESTORE_HOST_LDTR 40h ;RT_BIT(6) 161 -
trunk/include/VBox/vmm/vm.h
r46155 r46267 149 149 struct HMCPU s; 150 150 #endif 151 uint8_t padding[5 440]; /* multiple of 64 */151 uint8_t padding[5504]; /* multiple of 64 */ 152 152 } hm; 153 153 … … 226 226 227 227 /** Align the following members on page boundary. */ 228 uint8_t abAlignment2[ 320];228 uint8_t abAlignment2[256]; 229 229 230 230 /** PGM part. */ -
trunk/include/VBox/vmm/vm.mac
r45870 r46267 135 135 136 136 .cpum resb 3584 137 .hm resb 5 440137 .hm resb 5504 138 138 .em resb 1472 139 139 .iem resb 3072 -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r45955 r46267 58 58 %define HM_64_BIT_USE_NULL_SEL 59 59 %endif 60 %endif 61 %endif 62 63 %ifndef VBOX_WITH_OLD_VTX_CODE 64 %ifdef RT_ARCH_AMD64 65 %define VBOX_SKIP_RESTORE_SEG 60 66 %endif 61 67 %endif … … 155 161 %endif 156 162 163 %ifdef VBOX_SKIP_RESTORE_SEG 164 %macro MYPUSHSEGS64 2 165 %endmacro 166 167 %macro MYPOPSEGS64 2 168 %endmacro 169 %else ; !VBOX_SKIP_RESTORE_SEG 157 170 ; trashes, rax, rdx & rcx 158 171 %macro MYPUSHSEGS64 2 … … 218 231 %endif 219 232 %endmacro 233 %endif ; VBOX_SKIP_RESTORE_SEG 220 234 221 235 %macro MYPUSHAD32 0 … … 270 284 271 285 BEGINCODE 286 287 288 ;/** 289 ; * Restores host-state fields. 290 ; * 291 ; * @returns VBox status code 292 ; * @param u32RestoreHostFlags x86: [ebp + 08h] msc: rcx gcc: rdi u32RestoreHost - RestoreHost flags. 293 ; * @param pRestoreHost x86: [ebp + 0ch] msc: rdx gcc: rsi pRestoreHost - Pointer to the RestoreHost struct. 294 ; */ 295 ALIGNCODE(16) 296 BEGINPROC VMXRestoreHostState 297 %ifdef RT_ARCH_AMD64 298 %ifndef ASM_CALL64_GCC 299 ; On msc R10, R11 are scratch, RDI and RSI are not. So we must save and restore them! 300 mov r10, rdi 301 mov r11, rsi 302 ; Switch to common register usage (i.e. gcc's in this function) 303 mov rdi, rcx 304 mov rsi, rdx 305 %endif 306 307 test edi, VMX_RESTORE_HOST_GDTR 308 jz near .test_idtr 309 lgdt [rsi + 18h] ; pRestoreHost->HostGdtr 310 311 .test_idtr: 312 test edi, VMX_RESTORE_HOST_IDTR 313 jz near .test_ds 314 lidt [rsi + 22h] ; pRestoreHost->HostIdtr 315 316 .test_ds: 317 test edi, VMX_RESTORE_HOST_SEL_DS 318 jz near .test_es 319 mov ax, word [rsi] ; pRestoreHost->uHostSelDS 320 mov ds, ax 321 322 .test_es: 323 test edi, VMX_RESTORE_HOST_SEL_ES 324 jz near .test_fs 325 mov ax, word [rsi + 2] ; pRestoreHost->uHostSelES 326 mov es, ax 327 328 .test_fs: 329 ; We're only restoring the selector. The base is valid and restored by VT-x. If we get an interrupt in between FS & GS 330 ; below, we are fine as the base is what is relevant in 64-bit mode. We need to disable interrupts only during 331 ; writing of the selector as that zaps (trashes) the upper-part of the base until we wrmsr the full 64-bit base. 332 333 test edi, VMX_RESTORE_HOST_SEL_FS 334 jz near .test_gs 335 mov ax, word [rsi + 4] ; pRestoreHost->uHostSelFS 336 cli ; Disable interrupts as mov fs, ax will zap the upper part of the base 337 mov fs, ax 338 mov eax, dword [rsi + 8] ; pRestoreHost->uHostFSBase - Lo 339 mov edx, dword [rsi + 0Ch] ; pRestoreHost->uHostFSBase - Hi 340 mov ecx, MSR_K8_FS_BASE 341 wrmsr 342 sti ; Re-enable interrupts as fsbase is consistent now 343 344 .test_gs: 345 test edi, VMX_RESTORE_HOST_SEL_GS 346 jz near .restore_success 347 mov ax, word [rsi + 6] ; pRestoreHost->uHostSelGS 348 cli ; Disable interrupts as mov gs, ax will zap the upper part of the base 349 mov gs, ax 350 mov eax, dword [rsi + 10h] ; pRestoreHost->uHostGSBase - Lo 351 mov edx, dword [rsi + 14h] ; pRestoreHost->uHostGSBase - Hi 352 mov ecx, MSR_K8_GS_BASE 353 wrmsr 354 sti ; Re-enable interrupts as gsbase is consistent now 355 356 .restore_success: 357 mov eax, VINF_SUCCESS 358 %ifndef ASM_CALL64_GCC 359 ; Restore RDI and RSI on MSC. 360 mov rdi, r10 361 mov rsi, r11 362 %endif 363 %else ; RT_ARCH_X86 364 mov eax, VERR_NOT_IMPLEMENTED 365 %endif 366 ret 367 ENDPROC VMXRestoreHostState 272 368 273 369 -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r46099 r46267 18 18 ; 19 19 20 %ifndef VBOX_WITH_OLD_VTX_CODE 21 %ifdef RT_ARCH_AMD64 22 %define VMX_SKIP_GDTR_IDTR 23 %endif 24 %endif 20 25 21 26 ;/** … … 116 121 117 122 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 123 %ifdef VMX_SKIP_GDTR_IDTR 118 124 sub xSP, xS*2 119 125 sgdt [xSP] … … 121 127 sub xSP, xS*2 122 128 sidt [xSP] 129 %endif 123 130 124 131 %ifdef VBOX_WITH_DR6_EXPERIMENT … … 173 180 174 181 ; Restore base and limit of the IDTR & GDTR. 182 %ifdef VMX_SKIP_GDTR_IDTR 175 183 lidt [xSP] 176 184 add xSP, xS*2 177 185 lgdt [xSP] 178 186 add xSP, xS*2 187 %endif 179 188 180 189 push xDI … … 263 272 .vmxstart_invalid_vmcs_ptr: 264 273 ; Restore base and limit of the IDTR & GDTR 274 %ifdef VMX_SKIP_GDTR_IDTR 265 275 lidt [xSP] 266 276 add xSP, xS*2 267 277 lgdt [xSP] 268 278 add xSP, xS*2 279 %endif 269 280 270 281 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 300 311 .vmxstart_start_failed: 301 312 ; Restore base and limit of the IDTR & GDTR. 313 %ifdef VMX_SKIP_GDTR_IDTR 302 314 lidt [xSP] 303 315 add xSP, xS*2 304 316 lgdt [xSP] 305 317 add xSP, xS*2 318 %endif 306 319 307 320 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 437 450 438 451 ; VT-x only saves the base of the GDTR & IDTR and resets the limit to 0xffff; we must restore the limit correctly! 452 %ifdef VMX_SKIP_GDTR_IDTR 439 453 sub xSP, xS*2 440 454 sgdt [xSP] … … 442 456 sub xSP, xS*2 443 457 sidt [xSP] 458 %endif 444 459 445 460 %ifdef VBOX_WITH_DR6_EXPERIMENT … … 502 517 503 518 ; Restore base and limit of the IDTR & GDTR 519 %ifdef VMX_SKIP_GDTR_IDTR 504 520 lidt [xSP] 505 521 add xSP, xS*2 506 522 lgdt [xSP] 507 523 add xSP, xS*2 524 %endif 508 525 509 526 push xDI … … 609 626 .vmxstart64_invalid_vmcs_ptr: 610 627 ; Restore base and limit of the IDTR & GDTR. 628 %ifdef VMX_SKIP_GDTR_IDTR 611 629 lidt [xSP] 612 630 add xSP, xS*2 613 631 lgdt [xSP] 614 632 add xSP, xS*2 633 %endif 615 634 616 635 ; Restore TSS selector; must mark it as not busy before using ltr (!) … … 659 678 .vmxstart64_start_failed: 660 679 ; Restore base and limit of the IDTR & GDTR. 680 %ifdef VMX_SKIP_GDTR_IDTR 661 681 lidt [xSP] 662 682 add xSP, xS*2 663 683 lgdt [xSP] 664 684 add xSP, xS*2 685 %endif 665 686 666 687 ; Restore TSS selector; must mark it as not busy before using ltr (!) -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46192 r46267 2091 2091 { 2092 2092 int rc = VERR_INTERNAL_ERROR_5; 2093 RTSEL uSelCS = 0;2094 RTSEL uSelSS = 0;2095 2093 RTSEL uSelDS = 0; 2096 2094 RTSEL uSelES = 0; … … 2100 2098 2101 2099 /* 2102 * Host Selectorregisters.2100 * Host DS, ES, FS and GS segment registers. 2103 2101 */ 2102 #if HC_ARCH_BITS == 64 2103 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 2104 uSelDS = ASMGetDS(); 2105 uSelES = ASMGetES(); 2106 uSelFS = ASMGetFS(); 2107 uSelGS = ASMGetGS(); 2108 #endif 2109 2110 /* 2111 * Host CS and SS segment registers. 2112 */ 2113 RTSEL uSelCS; 2114 RTSEL uSelSS; 2104 2115 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2105 2116 if (HMVMX_IS_64BIT_HOST_MODE()) … … 2119 2130 #endif 2120 2131 2121 /* Note: VT-x is picky about the RPL of the selectors here; we'll restore them manually. */ 2132 /* 2133 * Host TR segment register. 2134 */ 2122 2135 uSelTR = ASMGetTR(); 2123 2136 2137 #if HC_ARCH_BITS == 64 2138 /* 2139 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them 2140 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 2141 */ 2142 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT)) 2143 { 2144 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS; 2145 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS; 2146 uSelDS = 0; 2147 } 2148 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT)) 2149 { 2150 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES; 2151 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES; 2152 uSelES = 0; 2153 } 2154 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT)) 2155 { 2156 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS; 2157 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS; 2158 uSelFS = 0; 2159 } 2160 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT)) 2161 { 2162 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS; 2163 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS; 2164 uSelGS = 0; 2165 } 2166 #endif 2167 2124 2168 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */ 2125 /** @todo Verify if we have any platform that actually run with DS or ES with2126 * RPL != 0 in kernel space. */2127 2169 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT)); 2128 2170 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT)); … … 2144 2186 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc); 2145 2187 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc); 2146 /* Avoid the VMWRITEs as we set the following segments to 0 and the VMCS fields are already 0 (since g_HvmR0 is static) */ 2147 #if 0 2188 #if HC_ARCH_BITS == 64 2148 2189 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc); 2149 2190 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc); … … 2156 2197 * Host GDTR and IDTR. 2157 2198 */ 2158 /** @todo Despite VT-x -not- restoring the limits on GDTR and IDTR it should2159 * be safe to -not- save and restore GDTR and IDTR in the assembly2160 * code and just do it here and don't care if the limits are zapped on2161 * VM-exit. */2162 2199 RTGDTR Gdtr; 2163 2200 RT_ZERO(Gdtr); … … 2182 2219 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc); 2183 2220 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc); 2221 2222 #if HC_ARCH_BITS == 64 2223 /* 2224 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the 2225 * maximum limit (0xffff) on every VM-exit. 2226 */ 2227 if (Gdtr.cbGdt != 0xffff) 2228 { 2229 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR; 2230 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64)); 2231 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64)); 2232 } 2233 2234 /* 2235 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff 2236 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and 2237 * Intel spec. 6.2 "Exception and Interrupt Vectors". 2238 */ 2239 if (Idtr.cbIdt < 0x0fff) 2240 { 2241 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR; 2242 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64)); 2243 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64)); 2244 } 2245 #endif 2184 2246 } 2185 2247 … … 2190 2252 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt) 2191 2253 { 2192 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel Gdtr.cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));2254 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt)); 2193 2255 return VERR_VMX_INVALID_HOST_STATE; 2194 2256 } … … 2217 2279 /* 2218 2280 * Host FS base and GS base. 2219 * For 32-bit hosts the base is handled by the assembly code where we push/pop FS and GS which .2220 * would take care of the bases. In 64-bit, the MSRs come into play.2221 2281 */ 2222 2282 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 2225 2285 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE); 2226 2286 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE); 2227 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); 2228 AssertRCReturn(rc, rc); 2229 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); 2230 AssertRCReturn(rc, rc); 2287 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc); 2288 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc); 2289 2290 # if HC_ARCH_BITS == 64 2291 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */ 2292 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS) 2293 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase; 2294 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS) 2295 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 2296 # endif 2231 2297 } 2232 2298 #endif … … 5775 5841 } 5776 5842 5843 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 5844 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 5845 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 5777 5846 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 5778 5847 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1); … … 6425 6494 * we are actually going to be preempted, not all the time like we 6426 6495 * currently do. */ 6496 6497 /* Restore host-state bits that VT-x only restores partially. */ 6498 if (pVCpu->hm.s.vmx.fRestoreHostFlags) 6499 { 6500 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost); 6501 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 6502 } 6503 6427 6504 /* 6428 6505 * Sync the current VMCS (writes back internal data back into the VMCS region in memory) … … 6740 6817 #endif 6741 6818 6742 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);6743 6819 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about 6744 6820 to start executing. */ 6821 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 6745 6822 } 6746 6823 -
trunk/src/VBox/VMM/include/HMInternal.h
r46192 r46267 682 682 PGMMODE enmPrevGuestMode; 683 683 #else 684 uint32_t fRestoreHostFlags; 685 VMXRESTOREHOST RestoreHost; 684 686 /** Set if guest was executing in real mode (extra checks). */ 685 687 bool fWasInRealMode;
Note:
See TracChangeset
for help on using the changeset viewer.