- Timestamp:
- Jan 16, 2018 7:05:36 PM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 120319
- Location:
- trunk
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk
- Property svn:mergeinfo
-
old new 8 8 /branches/VBox-5.0:104445,104938,104943,104950,104952-104953,104987-104988,104990,106453 9 9 /branches/VBox-5.1:112367,115992,116543,116550,116568,116573 10 /branches/VBox-5.2:120083,120099,120213,120221,120239 10 11 /branches/andy/draganddrop:90781-91268 11 12 /branches/andy/guestctrl20:78916,78930
-
- Property svn:mergeinfo
-
trunk/include/VBox/settings.h
r69107 r70606 905 905 fAPIC, // requires settings version 1.16 (VirtualBox 5.1) 906 906 fX2APIC; // requires settings version 1.16 (VirtualBox 5.1) 907 bool fIBPBOnVMExit; //< added out of cycle, after 1.16 was out. 908 bool fIBPBOnVMEntry; //< added out of cycle, after 1.16 was out. 907 909 typedef enum LongModeType { LongMode_Enabled, LongMode_Disabled, LongMode_Legacy } LongModeType; 908 910 LongModeType enmLongMode; -
trunk/include/VBox/vmm/cpum.h
r70555 r70606 1026 1026 /** Supports CLFLUSHOPT. */ 1027 1027 uint32_t fClFlushOpt : 1; 1028 /** Supports IA32_PRED_CMD.IBPB. */ 1029 uint32_t fIbpb : 1; 1030 /** Supports IA32_SPEC_CTRL.IBRS. */ 1031 uint32_t fIbrs : 1; 1032 /** Supports IA32_SPEC_CTRL.STIBP. */ 1033 uint32_t fStibp : 1; 1034 /** Supports IA32_ARCH_CAP. */ 1035 uint32_t fArchCap : 1; 1028 1036 1029 1037 /** Supports AMD 3DNow instructions. */ … … 1059 1067 1060 1068 /** Alignment padding / reserved for future use. */ 1061 uint32_t fPadding : 23;1069 uint32_t fPadding : 19; 1062 1070 1063 1071 /** SVM: Supports Nested-paging. */ -
trunk/include/VBox/vmm/cpum.mac
r69764 r70606 147 147 %define XSTATE_SIZE 8192 148 148 149 ;; Note! Updates here must be reflected in CPUMInternal.mac too! 149 150 struc CPUMCTX 150 151 .eax resq 1 … … 250 251 .fXStateMask resq 1 251 252 .pXStateR0 RTR0PTR_RES 1 253 alignb 8 252 254 .pXStateR3 RTR3PTR_RES 1 255 alignb 8 253 256 .pXStateRC RTRCPTR_RES 1 254 257 .aoffXState resw 64 255 %if HC_ARCH_BITS == 64 256 .abPadding resb 4 257 %else 258 .abPadding resb 12 259 %endif 258 .fWorldSwitcher resd 1 259 alignb 8 260 260 .hwvirt.svm.uMsrHSavePa resq 1 261 261 .hwvirt.svm.GCPhysVmcb resq 1 … … 284 284 endstruc 285 285 286 %define CPUMCTX_WSF_IBPB_EXIT RT_BIT_32(0) 287 %define CPUMCTX_WSF_IBPB_ENTRY RT_BIT_32(1) 286 288 287 289 %define CPUMSELREG_FLAGS_VALID 0x0001 -
trunk/include/VBox/vmm/cpumctx.h
r69764 r70606 458 458 /** Pointer to the FPU/SSE/AVX/XXXX state ring-0 mapping. */ 459 459 R0PTRTYPE(PX86XSAVEAREA) pXStateR0; 460 #if HC_ARCH_BITS == 32 461 uint32_t uXStateR0Padding; 462 #endif 460 463 /** Pointer to the FPU/SSE/AVX/XXXX state ring-3 mapping. */ 461 464 R3PTRTYPE(PX86XSAVEAREA) pXStateR3; 465 #if HC_ARCH_BITS == 32 466 uint32_t uXStateR3Padding; 467 #endif 462 468 /** Pointer to the FPU/SSE/AVX/XXXX state raw-mode mapping. */ 463 469 RCPTRTYPE(PX86XSAVEAREA) pXStateRC; … … 465 471 uint16_t aoffXState[64]; 466 472 467 /** 724 - Size padding. */468 uint 8_t abPadding[HC_ARCH_BITS == 64 ? 4 : 12];473 /** 0x2d4 - World switcher flags, CPUMCTX_WSF_XXX. */ 474 uint32_t fWorldSwitcher; 469 475 470 476 /** 728 - Hardware virtualization state. */ … … 579 585 AssertCompileMemberOffset(CPUMCTX, fXStateMask, 568); 580 586 AssertCompileMemberOffset(CPUMCTX, pXStateR0, 576); 581 AssertCompileMemberOffset(CPUMCTX, pXStateR3, HC_ARCH_BITS == 64 ? 584 : 580);582 AssertCompileMemberOffset(CPUMCTX, pXStateRC, HC_ARCH_BITS == 64 ? 592 : 584);583 AssertCompileMemberOffset(CPUMCTX, aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);587 AssertCompileMemberOffset(CPUMCTX, pXStateR3, 584); 588 AssertCompileMemberOffset(CPUMCTX, pXStateRC, 592); 589 AssertCompileMemberOffset(CPUMCTX, aoffXState, 596); 584 590 AssertCompileMemberOffset(CPUMCTX, hwvirt, 728); 585 591 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa, 728); … … 737 743 #endif /* !VBOX_FOR_DTRACE_LIB */ 738 744 745 746 /** @name CPUMCTX_WSF_XXX 747 * @{ */ 748 /** Touch IA32_PRED_CMD.IBPB on VM exit. */ 749 #define CPUMCTX_WSF_IBPB_EXIT RT_BIT_32(0) 750 /** Touch IA32_PRED_CMD.IBPB on VM entry. */ 751 #define CPUMCTX_WSF_IBPB_ENTRY RT_BIT_32(1) 752 /** @} */ 753 754 739 755 /** 740 756 * Additional guest MSRs (i.e. not part of the CPU context structure). -
trunk/include/iprt/x86.h
r70265 r70606 598 598 /** ECX Bit 0 - PREFETCHWT1 - Supports the PREFETCHWT1 instruction. */ 599 599 #define X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 RT_BIT_32(0) 600 /** ECX Bit 2 - UIMP - Supports user mode instruction prevention. */ 601 #define X86_CPUID_STEXT_FEATURE_ECX_UMIP RT_BIT_32(2) 602 /** ECX Bit 3 - PKU - Supports protection keys for user-mode pages. */ 603 #define X86_CPUID_STEXT_FEATURE_ECX_PKU RT_BIT_32(3) 604 /** ECX Bit 4 - OSPKE - Protection keys for user mode pages enabled. */ 605 #define X86_CPUID_STEXT_FEATURE_ECX_OSPKE RT_BIT_32(4) 606 /** ECX Bits 17-21 - MAWAU - Value used by BNDLDX and BNDSTX. */ 607 #define X86_CPUID_STEXT_FEATURE_ECX_MAWAU UINT32_C(0x003e0000) 608 /** ECX Bit 22 - RDPID - Support pread process ID. */ 609 #define X86_CPUID_STEXT_FEATURE_ECX_RDPID RT_BIT_32(2) 610 /** ECX Bit 30 - SGX_LC - Supports SGX launch configuration. */ 611 #define X86_CPUID_STEXT_FEATURE_ECX_SGX_LC RT_BIT_32(30) 612 613 /** EDX Bit 26 - IBRS & IBPB - Supports the IBRS flag in IA32_SPEC_CTRL and 614 * IBPB command in IA32_PRED_CMD. */ 615 #define X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT_32(26) 616 /** EDX Bit 27 - IBRS & IBPB - Supports the STIBP flag in IA32_SPEC_CTRL. */ 617 #define X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT_32(27) 618 619 /** EDX Bit 29 - ARCHCAP - Supports the IA32_ARCH_CAP MSR. */ 620 #define X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP RT_BIT_32(29) 621 600 622 /** @} */ 601 623 … … 742 764 743 765 766 /** @name CPUID AMD extended feature extensions ID (EBX). 767 * CPUID query with EAX=0x80000008. 768 * @{ 769 */ 770 /** Bit 0 - CLZERO - Clear zero instruction. */ 771 #define X86_CPUID_AMD_EFEID_EBX_CLZERO RT_BIT_32(0) 772 /** Bit 1 - IRPerf - Instructions retired count support. */ 773 #define X86_CPUID_AMD_EFEID_EBX_IRPERF RT_BIT_32(1) 774 /** Bit 2 - XSaveErPtr - Always XSAVE* and XRSTR* error pointers. */ 775 #define X86_CPUID_AMD_EFEID_EBX_XSAVE_ER_PTR RT_BIT_32(2) 776 /* AMD pipeline length: 9 feature bits ;-) */ 777 /** Bit 12 - IBPB - Supports the IBPB command in IA32_PRED_CMD. */ 778 #define X86_CPUID_AMD_EFEID_EBX_IBPB RT_BIT_32(12) 779 /** @} */ 780 781 744 782 /** @name CPUID AMD SVM Feature information. 745 783 * CPUID query with EAX=0x8000000a. … … 1114 1152 #define MSR_IA32_TSC_ADJUST 0x3B 1115 1153 1154 /** Spectre control register. 1155 * Logical processor scope. Reset value 0, unaffected by SIPI & INIT. */ 1156 #define MSR_IA32_SPEC_CTRL 0x48 1157 /** IBRS - Indirect branch restricted speculation. */ 1158 #define MSR_IA32_SPEC_CTRL_F_IBRS RT_BIT_32(0) 1159 /** STIBP - Single thread indirect branch predictors. */ 1160 #define MSR_IA32_SPEC_CTRL_F_STIBP RT_BIT_32(1) 1161 1162 /** Prediction command register. 1163 * Write only, logical processor scope, no state since write only. */ 1164 #define MSR_IA32_PRED_CMD 0x49 1165 /** IBPB - Indirect branch prediction barrie when written as 1. */ 1166 #define MSR_IA32_PRED_CMD_F_IBPB RT_BIT_32(0) 1167 1116 1168 /** BIOS update trigger (microcode update). */ 1117 1169 #define MSR_IA32_BIOS_UPDT_TRIG 0x79 … … 1148 1200 /** MTRR Capabilities. */ 1149 1201 #define MSR_IA32_MTRR_CAP 0xFE 1202 1203 /** Architecture capabilities (bugfixes). 1204 * @note May move */ 1205 #define MSR_IA32_ARCH_CAP UINT32_C(0x10a) 1206 /** CPU is no subject to spectre problems. */ 1207 #define MSR_IA32_ARCH_CAP_F_SPECTRE_FIX RT_BIT_32(0) 1208 /** CPU has better IBRS and you can leave it on all the time. */ 1209 #define MSR_IA32_ARCH_CAP_F_BETTER_IBRS RT_BIT_32(1) 1150 1210 1151 1211 /** Cache control/info. */ -
trunk/include/iprt/x86.mac
r70459 r70606 177 177 %define X86_CPUID_STEXT_FEATURE_EBX_SHA RT_BIT_32(29) 178 178 %define X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 RT_BIT_32(0) 179 %define X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT_32(26) 180 %define X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT_32(27) 181 %define X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP RT_BIT_32(29) 179 182 %define X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF RT_BIT_32(0) 180 183 %define X86_CPUID_EXT_FEATURE_EDX_SYSCALL RT_BIT_32(11) … … 380 383 %define MSR_IA32_FEATURE_CONTROL_VMXON RT_BIT_32(2) 381 384 %define MSR_IA32_TSC_ADJUST 0x3B 385 %define MSR_IA32_SPEC_CTRL 0x48 386 %define MSR_IA32_SPEC_CTRL_F_IBRS RT_BIT_32(0) 387 %define MSR_IA32_SPEC_CTRL_F_STIBP RT_BIT_32(1) 388 %define MSR_IA32_PRED_CMD 0x49 389 %define MSR_IA32_PRED_CMD_F_IBPB RT_BIT_32(0) 382 390 %define MSR_IA32_BIOS_UPDT_TRIG 0x79 383 391 %define MSR_IA32_BIOS_SIGN_ID 0x8B … … 393 401 %define MSR_IA32_APERF 0xE8 394 402 %define MSR_IA32_MTRR_CAP 0xFE 403 %define MSR_IA32_ARCH_CAP 0x10a 404 %define MSR_IA32_ARCH_CAP_F_SPECTRE_FIX RT_BIT_32(0) 405 %define MSR_IA32_ARCH_CAP_F_BETTER_IBRS RT_BIT_32(1) 395 406 %define MSR_BBL_CR_CTL3 0x11e 396 407 %ifndef MSR_IA32_SYSENTER_CS -
trunk/src/VBox
- Property svn:mergeinfo
-
old new 8 8 /branches/VBox-5.0/src/VBox:104938,104943,104950,104987-104988,104990,106453 9 9 /branches/VBox-5.1/src/VBox:112367,116543,116550,116568,116573 10 /branches/VBox-5.2/src/VBox:120083,120099,120213,120221,120239 10 11 /branches/andy/draganddrop/src/VBox:90781-91268 11 12 /branches/andy/guestctrl20/src/VBox:78916,78930
-
- Property svn:mergeinfo
-
trunk/src/VBox/Frontends
- Property svn:mergeinfo
-
old new 7 7 /branches/VBox-4.3/src/VBox/Frontends:91223 8 8 /branches/VBox-4.3/trunk/src/VBox/Frontends:91223 9 /branches/VBox-5.2/src/VBox/Frontends:120213 9 10 /branches/andy/draganddrop/src/VBox/Frontends:90781-91268 10 11 /branches/andy/guestctrl20/src/VBox/Frontends:78916,78930
-
- Property svn:mergeinfo
-
trunk/src/VBox/Frontends/VBoxManage/VBoxManageHelp.cpp
r70588 r70606 512 512 " [--pae on|off]\n" 513 513 " [--longmode on|off]\n" 514 " [--ibpb-on-vm-exit on|off]\n" 515 " [--ibpb-on-vm-entry on|off]\n" 514 516 " [--cpu-profile \"host|Intel 80[86|286|386]\"]\n" 515 517 " [--cpuid-portability-level <0..3>\n" -
trunk/src/VBox/Frontends/VBoxManage/VBoxManageModifyVM.cpp
r70587 r70606 75 75 MODIFYVM_VTXVPID, 76 76 MODIFYVM_VTXUX, 77 MODIFYVM_IBPB_ON_VM_EXIT, 78 MODIFYVM_IBPB_ON_VM_ENTRY, 77 79 MODIFYVM_CPUS, 78 80 MODIFYVM_CPUHOTPLUG, … … 255 257 { "--vtxvpid", MODIFYVM_VTXVPID, RTGETOPT_REQ_BOOL_ONOFF }, 256 258 { "--vtxux", MODIFYVM_VTXUX, RTGETOPT_REQ_BOOL_ONOFF }, 259 { "--ibpb-on-vm-exit", MODIFYVM_IBPB_ON_VM_EXIT, RTGETOPT_REQ_BOOL_ONOFF }, 260 { "--ibpb-on-vm-entry", MODIFYVM_IBPB_ON_VM_ENTRY, RTGETOPT_REQ_BOOL_ONOFF }, 257 261 { "--cpuid-set", MODIFYVM_SETCPUID, RTGETOPT_REQ_UINT32_OPTIONAL_PAIR | RTGETOPT_FLAG_HEX }, 258 262 { "--cpuid-remove", MODIFYVM_DELCPUID, RTGETOPT_REQ_UINT32_OPTIONAL_PAIR | RTGETOPT_FLAG_HEX }, … … 794 798 } 795 799 800 case MODIFYVM_IBPB_ON_VM_EXIT: 801 CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_IBPBOnVMExit, ValueUnion.f)); 802 break; 803 804 case MODIFYVM_IBPB_ON_VM_ENTRY: 805 CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_IBPBOnVMEntry, ValueUnion.f)); 806 break; 807 796 808 case MODIFYVM_CPUS: 797 809 { -
trunk/src/VBox/Main/idl/VirtualBox.xidl
r70593 r70606 1002 1002 Since this feature implies that the APIC feature is present, it 1003 1003 automatically enables the APIC feature when set. 1004 </desc> 1005 </const> 1006 <const name="IBPBOnVMExit" value="6"> 1007 <desc> 1008 If set, force an indirect branch prediction barrier on VM exits if the 1009 host CPU supports it. This setting will significantly slow down workloads 1010 causing many VM exits, so it is only recommended for situation where there 1011 real need to be paranoid. 1012 </desc> 1013 </const> 1014 <const name="IBPBOnVMEntry" value="7"> 1015 <desc> 1016 If set, force an indirect branch prediction barrier on VM entry if the 1017 host CPU supports it. This setting will significantly slow down workloads 1018 causing many VM exits, so it is only recommended for situation where there 1019 real need to be paranoid. 1004 1020 </desc> 1005 1021 </const> -
trunk/src/VBox/Main/include/MachineImpl.h
r70582 r70606 288 288 BOOL mAPIC; 289 289 BOOL mX2APIC; 290 BOOL mIBPBOnVMExit; 291 BOOL mIBPBOnVMEntry; 290 292 ULONG mCPUCount; 291 293 BOOL mCPUHotPlugEnabled; -
trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
r70579 r70606 1158 1158 hrc = pMachine->GetHWVirtExProperty(HWVirtExPropertyType_UnrestrictedExecution, &fEnableUX); H(); 1159 1159 InsertConfigInteger(pHM, "EnableUX", fEnableUX); 1160 1161 /* Indirect branch prediction boundraries. */ 1162 BOOL fIBPBOnVMExit = false; 1163 hrc = pMachine->GetCPUProperty(CPUPropertyType_IBPBOnVMExit, &fIBPBOnVMExit); H(); 1164 InsertConfigInteger(pHM, "IBPBOnVMExit", fIBPBOnVMExit); 1165 1166 BOOL fIBPBOnVMEntry = false; 1167 hrc = pMachine->GetCPUProperty(CPUPropertyType_IBPBOnVMEntry, &fIBPBOnVMEntry); H(); 1168 InsertConfigInteger(pHM, "IBPBOnVMEntry", fIBPBOnVMEntry); 1160 1169 1161 1170 /* Reset overwrite. */ -
trunk/src/VBox/Main/src-server/MachineImpl.cpp
r70582 r70606 196 196 mAPIC = true; 197 197 mX2APIC = false; 198 mIBPBOnVMExit = false; 199 mIBPBOnVMEntry = false; 198 200 mHPETEnabled = false; 199 201 mCpuExecutionCap = 100; /* Maximum CPU execution cap by default. */ … … 2256 2258 break; 2257 2259 2260 case CPUPropertyType_IBPBOnVMExit: 2261 *aValue = mHWData->mIBPBOnVMExit; 2262 break; 2263 2264 case CPUPropertyType_IBPBOnVMEntry: 2265 *aValue = mHWData->mIBPBOnVMEntry; 2266 break; 2267 2258 2268 default: 2259 2269 return E_INVALIDARG; … … 2303 2313 if (aValue) 2304 2314 mHWData->mAPIC = !!aValue; 2315 break; 2316 2317 case CPUPropertyType_IBPBOnVMExit: 2318 i_setModified(IsModified_MachineData); 2319 mHWData.backup(); 2320 mHWData->mIBPBOnVMExit = !!aValue; 2321 break; 2322 2323 case CPUPropertyType_IBPBOnVMEntry: 2324 i_setModified(IsModified_MachineData); 2325 mHWData.backup(); 2326 mHWData->mIBPBOnVMEntry = !!aValue; 2305 2327 break; 2306 2328 … … 8988 9010 mHWData->mAPIC = data.fAPIC; 8989 9011 mHWData->mX2APIC = data.fX2APIC; 9012 mHWData->mIBPBOnVMExit = data.fIBPBOnVMExit; 9013 mHWData->mIBPBOnVMEntry = data.fIBPBOnVMEntry; 8990 9014 mHWData->mCPUCount = data.cCPUs; 8991 9015 mHWData->mCPUHotPlugEnabled = data.fCpuHotPlug; … … 10311 10335 data.fAPIC = !!mHWData->mAPIC; 10312 10336 data.fX2APIC = !!mHWData->mX2APIC; 10337 data.fIBPBOnVMExit = !!mHWData->mIBPBOnVMExit; 10338 data.fIBPBOnVMEntry = !!mHWData->mIBPBOnVMEntry; 10313 10339 data.cCPUs = mHWData->mCPUCount; 10314 10340 data.fCpuHotPlug = !!mHWData->mCPUHotPlugEnabled; -
trunk/src/VBox/Main/xml/Settings.cpp
r69070 r70606 2778 2778 fAPIC(true), 2779 2779 fX2APIC(false), 2780 fIBPBOnVMExit(false), 2781 fIBPBOnVMEntry(false), 2780 2782 enmLongMode(HC_ARCH_BITS == 64 ? Hardware::LongMode_Enabled : Hardware::LongMode_Disabled), 2781 2783 cCPUs(1), … … 2931 2933 && fAPIC == h.fAPIC 2932 2934 && fX2APIC == h.fX2APIC 2935 && fIBPBOnVMExit == h.fIBPBOnVMExit 2936 && fIBPBOnVMEntry == h.fIBPBOnVMEntry 2933 2937 && cCPUs == h.cCPUs 2934 2938 && fCpuHotPlug == h.fCpuHotPlug … … 3933 3937 if (hw.fX2APIC) 3934 3938 hw.fAPIC = true; 3939 pelmCPUChild = pelmHwChild->findChildElement("IBPBOn"); 3940 if (pelmCPUChild) 3941 { 3942 pelmCPUChild->getAttributeValue("vmexit", hw.fIBPBOnVMExit); 3943 pelmCPUChild->getAttributeValue("vmentry", hw.fIBPBOnVMEntry); 3944 } 3935 3945 3936 3946 if ((pelmCPUChild = pelmHwChild->findChildElement("CpuIdTree"))) … … 5259 5269 if (m->sv >= SettingsVersion_v1_16) 5260 5270 { 5271 if (hw.fIBPBOnVMEntry || hw.fIBPBOnVMExit) 5272 { 5273 xml::ElementNode *pelmChild = pelmCPU->createChild("IBPBOn"); 5274 if (hw.fIBPBOnVMExit) 5275 pelmChild->setAttribute("vmexit", hw.fIBPBOnVMExit); 5276 if (hw.fIBPBOnVMEntry) 5277 pelmChild->setAttribute("vmentry", hw.fIBPBOnVMEntry); 5278 } 5261 5279 } 5262 5280 if (m->sv >= SettingsVersion_v1_14 && hw.enmLongMode != Hardware::LongMode_Legacy) … … 6930 6948 || hardwareMachine.biosSettings.apicMode != APICMode_APIC 6931 6949 || !hardwareMachine.fAPIC 6932 || hardwareMachine.fX2APIC) 6950 || hardwareMachine.fX2APIC 6951 || hardwareMachine.fIBPBOnVMExit 6952 || hardwareMachine.fIBPBOnVMEntry) 6933 6953 { 6934 6954 m->sv = SettingsVersion_v1_16; -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r69221 r70606 49 49 ; Use define because I'm too lazy to convert the struct. 50 50 %define XMM_OFF_IN_X86FXSTATE 160 51 52 ;; Spectre filler for 32-bit mode. 53 ; Some user space address that points to a 4MB page boundrary in hope that it 54 ; will somehow make it less useful. 55 %define SPECTRE_FILLER32 0x227fffff 56 ;; Spectre filler for 64-bit mode. 57 ; Choosen to be an invalid address (also with 5 level paging). 58 %define SPECTRE_FILLER64 0x02204204207fffff 59 ;; Spectre filler for the current CPU mode. 60 %ifdef RT_ARCH_AMD64 61 %define SPECTRE_FILLER SPECTRE_FILLER64 62 %else 63 %define SPECTRE_FILLER SPECTRE_FILLER32 64 %endif 51 65 52 66 ;; … … 224 238 %define MYPOPSEGS MYPOPSEGS32 225 239 %endif 240 241 ;; 242 ; Creates an indirect branch prediction barrier on CPUs that need and supports that. 243 ; @clobbers eax, edx, ecx 244 ; @param 1 How to address CPUMCTX. 245 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 246 %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2 247 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 248 jz %%no_indirect_branch_barrier 249 mov ecx, MSR_IA32_PRED_CMD 250 mov eax, MSR_IA32_PRED_CMD_F_IBPB 251 xor edx, edx 252 wrmsr 253 %%no_indirect_branch_barrier: 254 %endmacro 226 255 227 256 … … 1185 1214 1186 1215 mov [ss:xDI + CPUMCTX.eax], eax 1216 mov xAX, SPECTRE_FILLER 1187 1217 mov [ss:xDI + CPUMCTX.ebx], ebx 1218 mov xBX, xAX 1188 1219 mov [ss:xDI + CPUMCTX.ecx], ecx 1220 mov xCX, xAX 1189 1221 mov [ss:xDI + CPUMCTX.edx], edx 1222 mov xDX, xAX 1190 1223 mov [ss:xDI + CPUMCTX.esi], esi 1224 mov xSI, xAX 1191 1225 mov [ss:xDI + CPUMCTX.ebp], ebp 1226 mov xBP, xAX 1192 1227 mov xAX, cr2 1193 1228 mov [ss:xDI + CPUMCTX.cr2], xAX … … 1199 1234 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above. 1200 1235 %endif 1236 1237 ; Fight spectre. 1238 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT 1201 1239 1202 1240 %ifndef VMX_SKIP_TR … … 1416 1454 ; Don't mess with ESP anymore!!! 1417 1455 1456 ; Fight spectre. 1457 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1458 1418 1459 ; Load guest general purpose registers. 1419 1460 mov eax, [xSI + CPUMCTX.eax] … … 1490 1531 1491 1532 mov qword [xDI + CPUMCTX.eax], rax 1533 mov rax, SPECTRE_FILLER64 1492 1534 mov qword [xDI + CPUMCTX.ebx], rbx 1535 mov rbx, rax 1493 1536 mov qword [xDI + CPUMCTX.ecx], rcx 1537 mov rcx, rax 1494 1538 mov qword [xDI + CPUMCTX.edx], rdx 1539 mov rdx, rax 1495 1540 mov qword [xDI + CPUMCTX.esi], rsi 1541 mov rsi, rax 1496 1542 mov qword [xDI + CPUMCTX.ebp], rbp 1543 mov rbp, rax 1497 1544 mov qword [xDI + CPUMCTX.r8], r8 1545 mov r8, rax 1498 1546 mov qword [xDI + CPUMCTX.r9], r9 1547 mov r9, rax 1499 1548 mov qword [xDI + CPUMCTX.r10], r10 1549 mov r10, rax 1500 1550 mov qword [xDI + CPUMCTX.r11], r11 1551 mov r11, rax 1501 1552 mov qword [xDI + CPUMCTX.r12], r12 1553 mov r12, rax 1502 1554 mov qword [xDI + CPUMCTX.r13], r13 1555 mov r13, rax 1503 1556 mov qword [xDI + CPUMCTX.r14], r14 1557 mov r14, rax 1504 1558 mov qword [xDI + CPUMCTX.r15], r15 1559 mov r15, rax 1505 1560 mov rax, cr2 1506 1561 mov qword [xDI + CPUMCTX.cr2], rax … … 1508 1563 pop xAX ; The guest rdi we pushed above 1509 1564 mov qword [xDI + CPUMCTX.edi], rax 1565 1566 ; Fight spectre. 1567 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT 1510 1568 1511 1569 %ifndef VMX_SKIP_TR … … 1704 1762 ; Note: assumes success! 1705 1763 ; Don't mess with ESP anymore!!! 1764 1765 ; Fight spectre. 1766 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1706 1767 1707 1768 ; Load guest general purpose registers. … … 1833 1894 vmsave 1834 1895 1896 ; Fight spectre. 1897 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1898 1835 1899 ; Setup xAX for VMLOAD. 1836 1900 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; x86: take low dword only) … … 1870 1934 1871 1935 mov [ss:xAX + CPUMCTX.ebx], ebx 1936 mov xBX, SPECTRE_FILLER 1872 1937 mov [ss:xAX + CPUMCTX.ecx], ecx 1938 mov xCX, xBX 1873 1939 mov [ss:xAX + CPUMCTX.edx], edx 1940 mov xDX, xBX 1874 1941 mov [ss:xAX + CPUMCTX.esi], esi 1942 mov xSI, xBX 1875 1943 mov [ss:xAX + CPUMCTX.edi], edi 1944 mov xDI, xBX 1876 1945 mov [ss:xAX + CPUMCTX.ebp], ebp 1946 mov xBP, xBX 1947 1948 ; Fight spectre. Note! Trashes xAX! 1949 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT 1877 1950 1878 1951 ; Restore the host xcr0 if necessary. … … 1978 2051 vmsave 1979 2052 2053 ; Fight spectre. 2054 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 2055 1980 2056 ; Setup rax for VMLOAD. 1981 2057 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only) … … 2022 2098 2023 2099 mov qword [rax + CPUMCTX.ebx], rbx 2100 mov rbx, SPECTRE_FILLER64 2024 2101 mov qword [rax + CPUMCTX.ecx], rcx 2102 mov rcx, rbx 2025 2103 mov qword [rax + CPUMCTX.edx], rdx 2104 mov rdx, rbx 2026 2105 mov qword [rax + CPUMCTX.esi], rsi 2106 mov rsi, rbx 2027 2107 mov qword [rax + CPUMCTX.edi], rdi 2108 mov rdi, rbx 2028 2109 mov qword [rax + CPUMCTX.ebp], rbp 2110 mov rbp, rbx 2029 2111 mov qword [rax + CPUMCTX.r8], r8 2112 mov r8, rbx 2030 2113 mov qword [rax + CPUMCTX.r9], r9 2114 mov r9, rbx 2031 2115 mov qword [rax + CPUMCTX.r10], r10 2116 mov r10, rbx 2032 2117 mov qword [rax + CPUMCTX.r11], r11 2118 mov r11, rbx 2033 2119 mov qword [rax + CPUMCTX.r12], r12 2120 mov r12, rbx 2034 2121 mov qword [rax + CPUMCTX.r13], r13 2122 mov r13, rbx 2035 2123 mov qword [rax + CPUMCTX.r14], r14 2124 mov r14, rbx 2036 2125 mov qword [rax + CPUMCTX.r15], r15 2126 mov r15, rbx 2127 2128 ; Fight spectre. Note! Trashes rax! 2129 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT 2037 2130 2038 2131 ; Restore the host xcr0 if necessary. -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r70555 r70606 1702 1702 pFeatures->uStepping); 1703 1703 1704 PCCPUMCPUIDLEAF pLeaf= cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);1705 if (p Leaf)1706 pFeatures->cMaxPhysAddrWidth = p Leaf->uEax & 0xff;1704 PCCPUMCPUIDLEAF const pExtLeaf8 = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008); 1705 if (pExtLeaf8) 1706 pFeatures->cMaxPhysAddrWidth = pExtLeaf8->uEax & 0xff; 1707 1707 else if (pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE36) 1708 1708 pFeatures->cMaxPhysAddrWidth = 36; … … 1743 1743 pFeatures->fAvx512Foundation = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F); 1744 1744 pFeatures->fClFlushOpt = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT); 1745 1746 pFeatures->fIbpb = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB); 1747 pFeatures->fIbrs = pFeatures->fIbpb; 1748 pFeatures->fStibp = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_STIBP); 1749 pFeatures->fArchCap = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP); 1745 1750 } 1746 1751 … … 1782 1787 pFeatures->fMmx |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX); 1783 1788 pFeatures->fTsc |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC); 1789 pFeatures->fIbpb |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB); 1784 1790 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX); 1785 1791 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP); … … 2255 2261 2256 2262 /* 2257 * Configure XSAVE offsets according to the CPUID info .2263 * Configure XSAVE offsets according to the CPUID info and set the feature flags. 2258 2264 */ 2259 2265 memset(&pVM->aCpus[0].cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVM->aCpus[0].cpum.s.Guest.aoffXState)); … … 3125 3131 //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet. 3126 3132 ; 3127 pCurLeaf->uEdx &= 0; 3133 pCurLeaf->uEdx &= 0; /** @todo X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB, X86_CPUID_STEXT_FEATURE_EDX_STIBP and X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP */ 3128 3134 3129 3135 if (pCpum->u8PortableCpuIdLevel > 0) … … 3508 3514 { 3509 3515 pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */ 3510 pCurLeaf->uEbx = 0; /* reserved */3516 pCurLeaf->uEbx = 0; /* reserved - [12] == IBPB */ 3511 3517 pCurLeaf->uEdx = 0; /* reserved */ 3512 3518 … … 5983 5989 { 5984 5990 DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0), 5991 DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0), 5985 5992 DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0), 5986 DBGFREGSUBFIELD_RO("OSPKU\0" "CR4.PKU mirror", 4, 1, 0), 5993 DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0), 5994 DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0), 5995 DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0), 5996 DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0), 5997 DBGFREGSUBFIELD_TERMINATOR() 5998 }; 5999 6000 /** CPUID(7,0).EDX field descriptions. */ 6001 static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] = 6002 { 6003 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0), 6004 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0), 6005 DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0), 5987 6006 DBGFREGSUBFIELD_TERMINATOR() 5988 6007 }; … … 6073 6092 }; 6074 6093 6094 /** CPUID(0x80000008,0).EBX field descriptions. */ 6095 static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] = 6096 { 6097 DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0), 6098 DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0), 6099 DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR*)", 2, 1, 0), 6100 DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0), 6101 DBGFREGSUBFIELD_TERMINATOR() 6102 }; 6103 6075 6104 6076 6105 static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc, … … 6275 6304 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56); 6276 6305 if (pCurLeaf->uEdx || Host.uEdx) 6277 pHlp->pfnPrintf(pHlp, "%36s %#x (%#x)\n", "Ext Features EDX:", pCurLeaf->uEdx, Host.uEdx);6306 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56); 6278 6307 } 6279 6308 else … … 6282 6311 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36); 6283 6312 if (pCurLeaf->uEdx) 6284 pHlp->pfnPrintf(pHlp, "%36s %#x\n", "Ext Features EDX:", pCurLeaf->uEdx);6313 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36); 6285 6314 } 6286 6315 break; … … 6770 6799 } 6771 6800 6772 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0)) != NULL) 6773 { 6774 uint32_t uEAX = pCurLeaf->uEax; 6775 uint32_t uECX = pCurLeaf->uEcx; 6776 6777 pHlp->pfnPrintf(pHlp, 6778 "Physical Address Width: %d bits\n" 6779 "Virtual Address Width: %d bits\n" 6780 "Guest Physical Address Width: %d bits\n", 6781 (uEAX >> 0) & 0xff, 6782 (uEAX >> 8) & 0xff, 6783 (uEAX >> 16) & 0xff); 6784 pHlp->pfnPrintf(pHlp, 6785 "Physical Core Count: %d\n", 6786 ((uECX >> 0) & 0xff) + 1); 6801 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0); 6802 if (pCurLeaf != NULL) 6803 { 6804 if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity)) 6805 { 6806 if (iVerbosity < 1) 6807 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34); 6808 else 6809 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56); 6810 } 6811 6812 if (iVerbosity) 6813 { 6814 uint32_t uEAX = pCurLeaf->uEax; 6815 uint32_t uECX = pCurLeaf->uEcx; 6816 6817 pHlp->pfnPrintf(pHlp, 6818 "Physical Address Width: %d bits\n" 6819 "Virtual Address Width: %d bits\n" 6820 "Guest Physical Address Width: %d bits\n", 6821 (uEAX >> 0) & 0xff, 6822 (uEAX >> 8) & 0xff, 6823 (uEAX >> 16) & 0xff); 6824 pHlp->pfnPrintf(pHlp, 6825 "Physical Core Count: %d\n", 6826 ((uECX >> 0) & 0xff) + 1); 6827 } 6787 6828 } 6788 6829 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r70557 r70606 453 453 "|EnableLargePages" 454 454 "|EnableVPID" 455 "|IBPBOnVMExit" 456 "|IBPBOnVMEntry" 455 457 "|TPRPatchingEnabled" 456 458 "|64bitEnabled" … … 611 613 * available. */ 612 614 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true); 615 AssertLogRelRCReturn(rc, rc); 616 617 /** @cfgm{/HM/IBPBOnVMExit, bool} 618 * Costly paranoia setting. */ 619 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false); 620 AssertLogRelRCReturn(rc, rc); 621 622 /** @cfgm{/HM/IBPBOnVMEntry, bool} 623 * Costly paranoia setting. */ 624 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false); 613 625 AssertLogRelRCReturn(rc, rc); 614 626 … … 1163 1175 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */ 1164 1176 pVM->hm.s.fTprPatchingAllowed = false; 1177 } 1178 1179 /* 1180 * Sync options. 1181 */ 1182 /** @todo Move this out of of CPUMCTX and into some ring-0 only HM structure. 1183 * That will require a little bit of work, of course. */ 1184 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 1185 { 1186 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 1187 PCPUMCTX pCpuCtx = CPUMQueryGuestCtxPtr(pVCpu); 1188 pCpuCtx->fWorldSwitcher &= ~(CPUMCTX_WSF_IBPB_EXIT | CPUMCTX_WSF_IBPB_ENTRY); 1189 if (pVM->cpum.ro.HostFeatures.fIbpb) 1190 { 1191 if (pVM->hm.s.fIbpbOnVmExit) 1192 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_EXIT; 1193 if (pVM->hm.s.fIbpbOnVmEntry) 1194 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_ENTRY; 1195 } 1196 if (iCpu == 0) 1197 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%d fIbpbOnVmEntry=%d)\n", 1198 pCpuCtx->fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry)); 1165 1199 } 1166 1200 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r69764 r70606 119 119 ; 120 120 ; Guest context state 121 ; (Identical to the .Hyper chunk below .)121 ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.) 122 122 ; 123 123 .Guest resq 0 … … 220 220 .Guest.msrKERNELGSBASE resb 8 221 221 .Guest.uMsrPadding0 resb 8 222 alignb 8 222 223 .Guest.aXcr resq 2 223 224 .Guest.fXStateMask resq 1 224 225 .Guest.pXStateR0 RTR0PTR_RES 1 226 alignb 8 225 227 .Guest.pXStateR3 RTR3PTR_RES 1 228 alignb 8 226 229 .Guest.pXStateRC RTRCPTR_RES 1 227 230 .Guest.aoffXState resw 64 228 %if HC_ARCH_BITS == 64 229 .Guest.abPadding resb 4 230 %else 231 .Guest.abPadding resb 12 232 %endif 231 .Guest.fWorldSwitcher resd 1 232 alignb 8 233 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 234 .Guest.hwvirt.svm.GCPhysVmcb resq 1 … … 506 506 .Hyper.msrKERNELGSBASE resb 8 507 507 .Hyper.uMsrPadding0 resb 8 508 alignb 8 508 509 .Hyper.aXcr resq 2 509 510 .Hyper.fXStateMask resq 1 510 511 .Hyper.pXStateR0 RTR0PTR_RES 1 512 alignb 8 511 513 .Hyper.pXStateR3 RTR3PTR_RES 1 514 alignb 8 512 515 .Hyper.pXStateRC RTRCPTR_RES 1 513 516 .Hyper.aoffXState resw 64 514 %if HC_ARCH_BITS == 64 515 .Hyper.abPadding resb 4 516 %else 517 .Hyper.abPadding resb 12 518 %endif 517 .Hyper.fWorldSwitcher resd 1 518 alignb 8 519 519 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 520 520 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 -
trunk/src/VBox/VMM/include/HMInternal.h
r70415 r70606 417 417 /** Set if posted interrupt processing is enabled. */ 418 418 bool fPostedIntrs; 419 /** Alignment. */ 420 bool fAlignment0; 421 422 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ 423 uint32_t fHostKernelFeatures; 419 /** Set if indirect branch prediction barrier on VM exit. */ 420 bool fIbpbOnVmExit; 421 /** Set if indirect branch prediction barrier on VM entry. */ 422 bool fIbpbOnVmEntry; 423 /** Explicit padding. */ 424 bool afPadding[3]; 424 425 425 426 /** Maximum ASID allowed. */ … … 429 430 uint32_t cMaxResumeLoops; 430 431 432 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ 433 uint32_t fHostKernelFeatures; 434 435 /** Size of the guest patch memory block. */ 436 uint32_t cbGuestPatchMem; 431 437 /** Guest allocated memory for patching purposes. */ 432 438 RTGCPTR pGuestPatchMem; 433 439 /** Current free pointer inside the patch block. */ 434 440 RTGCPTR pFreeGuestPatchMem; 435 /** Size of the guest patch memory block. */436 uint32_t cbGuestPatchMem;437 uint32_t u32Alignment0;438 441 439 442 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
Note:
See TracChangeset
for help on using the changeset viewer.