- Timestamp:
- May 21, 2019 1:56:11 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk
- Property svn:mergeinfo
-
old new 9 9 /branches/VBox-5.1:112367,115992,116543,116550,116568,116573 10 10 /branches/VBox-5.2:119536,120083,120099,120213,120221,120239,123597-123598,123600-123601,123755,124260,124263,124271,124273,124277-124279,124284-124286,124288-124290,125768,125779-125780,125812 11 /branches/VBox-6.0:130474-130475,130477,130479 11 12 /branches/aeichner/vbox-chromium-cleanup:129816,129818-129851,129853-129861,129871-129872,129876,129880,129882,130013-130015,130036,130094-130095 12 13 /branches/andy/draganddrop:90781-91268
-
- Property svn:mergeinfo
-
trunk/src/VBox
- Property svn:mergeinfo
-
old new 9 9 /branches/VBox-5.1/src/VBox:112367,116543,116550,116568,116573 10 10 /branches/VBox-5.2/src/VBox:119536,120083,120099,120213,120221,120239,123597-123598,123600-123601,123755,124263,124273,124277-124279,124284-124286,124288-124290,125768,125779-125780,125812,127158-127159,127162-127167,127180 11 /branches/VBox-6.0/src/VBox:130474-130475,130477,130479 11 12 /branches/aeichner/vbox-chromium-cleanup/src/VBox:129818-129851,129853-129861,129871-129872,129876,129880,129882,130013-130015,130094-130095 12 13 /branches/andy/draganddrop/src/VBox:90781-91268
-
- Property svn:mergeinfo
-
trunk/src/VBox/Frontends
- Property svn:mergeinfo
-
old new 8 8 /branches/VBox-4.3/trunk/src/VBox/Frontends:91223 9 9 /branches/VBox-5.2/src/VBox/Frontends:120213,124288 10 /branches/VBox-6.0/src/VBox/Frontends:130474-130475,130477,130479 10 11 /branches/andy/draganddrop/src/VBox/Frontends:90781-91268 11 12 /branches/andy/guestctrl20/src/VBox/Frontends:78916,78930
-
- Property svn:mergeinfo
-
trunk/src/VBox/Frontends/VBoxManage/VBoxManageHelp.cpp
r78512 r78632 523 523 " [--l1d-flush-on-sched on|off]\n" 524 524 " [--l1d-flush-on-vm-entry on|off]\n" 525 " [--mds-clear-on-sched on|off]\n" 526 " [--mds-clear-on-vm-entry on|off]\n" 525 527 " [--nested-hw-virt on|off]\n" 526 528 " [--cpu-profile \"host|Intel 80[86|286|386]\"]\n" -
trunk/src/VBox/Frontends/VBoxManage/VBoxManageModifyVM.cpp
r78064 r78632 80 80 MODIFYVM_L1D_FLUSH_ON_SCHED, 81 81 MODIFYVM_L1D_FLUSH_ON_VM_ENTRY, 82 MODIFYVM_MDS_CLEAR_ON_SCHED, 83 MODIFYVM_MDS_CLEAR_ON_VM_ENTRY, 82 84 MODIFYVM_NESTED_HW_VIRT, 83 85 MODIFYVM_CPUS, … … 269 271 { "--l1d-flush-on-sched", MODIFYVM_L1D_FLUSH_ON_SCHED, RTGETOPT_REQ_BOOL_ONOFF }, 270 272 { "--l1d-flush-on-vm-entry", MODIFYVM_L1D_FLUSH_ON_VM_ENTRY, RTGETOPT_REQ_BOOL_ONOFF }, 273 { "--mds-clear-on-sched", MODIFYVM_MDS_CLEAR_ON_SCHED, RTGETOPT_REQ_BOOL_ONOFF }, 274 { "--mds-clear-on-vm-entry", MODIFYVM_MDS_CLEAR_ON_VM_ENTRY, RTGETOPT_REQ_BOOL_ONOFF }, 271 275 { "--nested-hw-virt", MODIFYVM_NESTED_HW_VIRT, RTGETOPT_REQ_BOOL_ONOFF }, 272 276 { "--cpuid-set", MODIFYVM_SETCPUID, RTGETOPT_REQ_UINT32_OPTIONAL_PAIR | RTGETOPT_FLAG_HEX }, … … 828 832 break; 829 833 834 case MODIFYVM_MDS_CLEAR_ON_SCHED: 835 CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_MDSClearOnEMTScheduling, ValueUnion.f)); 836 break; 837 838 case MODIFYVM_MDS_CLEAR_ON_VM_ENTRY: 839 CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_MDSClearOnVMEntry, ValueUnion.f)); 840 break; 841 830 842 case MODIFYVM_NESTED_HW_VIRT: 831 843 CHECK_ERROR(sessionMachine, SetCPUProperty(CPUPropertyType_HWVirt, ValueUnion.f)); -
trunk/src/VBox/Main/idl/VirtualBox.xidl
r78534 r78632 1078 1078 causing many VM exits, so it is only recommended for situation where there 1079 1079 is a real need to be paranoid. 1080 </desc> 1081 </const> 1082 <const name="MDSClearOnEMTScheduling" value="13"> 1083 <desc> 1084 If set and the host is affected by CVE-2018-12126, CVE-2018-12127, or 1085 CVE-2018-12130, clears the relevant MDS buffers when the EMT is scheduled 1086 to do ring-0 guest execution. There could be a small performance penalty 1087 for certain typs of workloads. For security reasons this setting will be 1088 enabled by default. 1089 </desc> 1090 </const> 1091 <const name="MDSClearOnVMEntry" value="14"> 1092 <desc> 1093 If set and the host is affected by CVE-2018-12126, CVE-2018-12127, or 1094 CVE-2018-12130, clears the relevant MDS buffers on every VM entry. This 1095 setting may slow down workloads causing many VM exits, so it is only 1096 recommended for situation where there is a real need to be paranoid. 1080 1097 </desc> 1081 1098 </const> -
trunk/src/VBox/Main/include/MachineImpl.h
r78296 r78632 290 290 BOOL mL1DFlushOnSched; 291 291 BOOL mL1DFlushOnVMEntry; 292 BOOL mMDSClearOnSched; 293 BOOL mMDSClearOnVMEntry; 292 294 BOOL mNestedHWVirt; 293 295 ULONG mCPUCount; -
trunk/src/VBox/Main/src-client/ConsoleImpl2.cpp
r78509 r78632 1191 1191 hrc = pMachine->GetCPUProperty(CPUPropertyType_L1DFlushOnVMEntry, &fL1DFlushOnVMEntry); H(); 1192 1192 InsertConfigInteger(pHM, "L1DFlushOnVMEntry", fL1DFlushOnVMEntry); 1193 1194 BOOL fMDSClearOnSched = true; 1195 hrc = pMachine->GetCPUProperty(CPUPropertyType_MDSClearOnEMTScheduling, &fMDSClearOnSched); H(); 1196 InsertConfigInteger(pHM, "MDSClearOnSched", fMDSClearOnSched); 1197 1198 BOOL fMDSClearOnVMEntry = false; 1199 hrc = pMachine->GetCPUProperty(CPUPropertyType_MDSClearOnVMEntry, &fMDSClearOnVMEntry); H(); 1200 InsertConfigInteger(pHM, "MDSClearOnVMEntry", fMDSClearOnVMEntry); 1193 1201 1194 1202 /* Reset overwrite. */ -
trunk/src/VBox/Main/src-server/MachineImpl.cpp
r78565 r78632 198 198 mL1DFlushOnSched = true; 199 199 mL1DFlushOnVMEntry = false; 200 mMDSClearOnSched = true; 201 mMDSClearOnVMEntry = false; 200 202 mNestedHWVirt = false; 201 203 mHPETEnabled = false; … … 2036 2038 break; 2037 2039 2040 case CPUPropertyType_MDSClearOnEMTScheduling: 2041 *aValue = mHWData->mMDSClearOnSched; 2042 break; 2043 2044 case CPUPropertyType_MDSClearOnVMEntry: 2045 *aValue = mHWData->mMDSClearOnVMEntry; 2046 break; 2047 2038 2048 default: 2039 2049 return E_INVALIDARG; … … 2125 2135 mHWData.backup(); 2126 2136 mHWData->mL1DFlushOnVMEntry = !!aValue; 2137 break; 2138 2139 case CPUPropertyType_MDSClearOnEMTScheduling: 2140 i_setModified(IsModified_MachineData); 2141 mHWData.backup(); 2142 mHWData->mMDSClearOnSched = !!aValue; 2143 break; 2144 2145 case CPUPropertyType_MDSClearOnVMEntry: 2146 i_setModified(IsModified_MachineData); 2147 mHWData.backup(); 2148 mHWData->mMDSClearOnVMEntry = !!aValue; 2127 2149 break; 2128 2150 … … 8885 8907 mHWData->mL1DFlushOnSched = data.fL1DFlushOnSched; 8886 8908 mHWData->mL1DFlushOnVMEntry = data.fL1DFlushOnVMEntry; 8909 mHWData->mMDSClearOnSched = data.fMDSClearOnSched; 8910 mHWData->mMDSClearOnVMEntry = data.fMDSClearOnVMEntry; 8887 8911 mHWData->mNestedHWVirt = data.fNestedHWVirt; 8888 8912 mHWData->mCPUCount = data.cCPUs; … … 10212 10236 data.fL1DFlushOnSched = !!mHWData->mL1DFlushOnSched; 10213 10237 data.fL1DFlushOnVMEntry = !!mHWData->mL1DFlushOnVMEntry; 10238 data.fMDSClearOnSched = !!mHWData->mMDSClearOnSched; 10239 data.fMDSClearOnVMEntry = !!mHWData->mMDSClearOnVMEntry; 10214 10240 data.fNestedHWVirt = !!mHWData->mNestedHWVirt; 10215 10241 data.cCPUs = mHWData->mCPUCount; -
trunk/src/VBox/Main/xml/Settings.cpp
r78509 r78632 3089 3089 fL1DFlushOnSched(true), 3090 3090 fL1DFlushOnVMEntry(false), 3091 fMDSClearOnSched(true), 3092 fMDSClearOnVMEntry(false), 3091 3093 fNestedHWVirt(false), 3092 3094 enmLongMode(HC_ARCH_BITS == 64 ? Hardware::LongMode_Enabled : Hardware::LongMode_Disabled), … … 3224 3226 && fL1DFlushOnSched == h.fL1DFlushOnSched 3225 3227 && fL1DFlushOnVMEntry == h.fL1DFlushOnVMEntry 3228 && fMDSClearOnSched == h.fMDSClearOnSched 3229 && fMDSClearOnVMEntry == h.fMDSClearOnVMEntry 3226 3230 && fNestedHWVirt == h.fNestedHWVirt 3227 3231 && cCPUs == h.cCPUs … … 4250 4254 pelmCPUChild->getAttributeValue("vmentry", hw.fL1DFlushOnVMEntry); 4251 4255 } 4256 pelmCPUChild = pelmHwChild->findChildElement("MDSClearOn"); 4257 if (pelmCPUChild) 4258 { 4259 pelmCPUChild->getAttributeValue("scheduling", hw.fMDSClearOnSched); 4260 pelmCPUChild->getAttributeValue("vmentry", hw.fMDSClearOnVMEntry); 4261 } 4252 4262 pelmCPUChild = pelmHwChild->findChildElement("NestedHWVirt"); 4253 4263 if (pelmCPUChild) … … 5641 5651 if (hw.fL1DFlushOnVMEntry) 5642 5652 pelmChild->setAttribute("vmentry", hw.fL1DFlushOnVMEntry); 5653 } 5654 if (!hw.fMDSClearOnSched || hw.fMDSClearOnVMEntry) 5655 { 5656 xml::ElementNode *pelmChild = pelmCPU->createChild("MDSClearOn"); 5657 if (!hw.fMDSClearOnSched) 5658 pelmChild->setAttribute("scheduling", hw.fMDSClearOnSched); 5659 if (hw.fMDSClearOnVMEntry) 5660 pelmChild->setAttribute("vmentry", hw.fMDSClearOnVMEntry); 5643 5661 } 5644 5662 } … … 7445 7463 || hardwareMachine.fSpecCtrlByHost 7446 7464 || !hardwareMachine.fL1DFlushOnSched 7447 || hardwareMachine.fL1DFlushOnVMEntry) 7465 || hardwareMachine.fL1DFlushOnVMEntry 7466 || !hardwareMachine.fMDSClearOnSched 7467 || hardwareMachine.fMDSClearOnVMEntry) 7448 7468 { 7449 7469 m->sv = SettingsVersion_v1_16; -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r78431 r78632 268 268 269 269 /* 270 * Copy MSR_IA32_ARCH_CAPABILITIES bits over into the host feature structure. 270 * Copy MSR_IA32_ARCH_CAPABILITIES bits over into the host and guest feature 271 * structure and as well as the guest MSR. 271 272 */ 272 273 pVM->cpum.s.HostFeatures.fArchRdclNo = 0; … … 274 275 pVM->cpum.s.HostFeatures.fArchRsbOverride = 0; 275 276 pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = 0; 277 pVM->cpum.s.HostFeatures.fArchMdsNo = 0; 276 278 uint32_t const cStdRange = ASMCpuId_EAX(0); 277 279 if ( ASMIsValidStdRange(cStdRange) … … 283 285 { 284 286 uint64_t const fArchVal = ASMRdMsr(MSR_IA32_ARCH_CAPABILITIES); 285 pVM->cpum.s.HostFeatures.fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO); 286 pVM->cpum.s.HostFeatures.fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL); 287 pVM->cpum.s.HostFeatures.fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO); 288 pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D); 287 pVM->cpum.s.GuestFeatures.fArchRdclNo 288 = pVM->cpum.s.HostFeatures.fArchRdclNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RDCL_NO); 289 pVM->cpum.s.GuestFeatures.fArchIbrsAll 290 = pVM->cpum.s.HostFeatures.fArchIbrsAll = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_IBRS_ALL); 291 pVM->cpum.s.GuestFeatures.fArchRsbOverride 292 = pVM->cpum.s.HostFeatures.fArchRsbOverride = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_RSBO); 293 pVM->cpum.s.GuestFeatures.fArchVmmNeedNotFlushL1d 294 = pVM->cpum.s.HostFeatures.fArchVmmNeedNotFlushL1d = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_VMM_NEED_NOT_FLUSH_L1D); 295 pVM->cpum.s.GuestFeatures.fArchMdsNo 296 = pVM->cpum.s.HostFeatures.fArchMdsNo = RT_BOOL(fArchVal & MSR_IA32_ARCH_CAP_F_MDS_NO); 297 298 if (pVM->cpum.s.GuestFeatures.fArchCap) 299 for (VMCPUID i = 0; i < pVM->cCpus; i++) 300 pVM->aCpus[i].cpum.s.GuestMsrs.msr.ArchCaps = fArchVal; 289 301 } 290 302 else -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r78220 r78632 260 260 ; @param 2 Which IBPB flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 261 261 ; @param 3 Which FLUSH flag to test for (CPUMCTX_WSF_L1D_ENTRY) 262 %macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 3 262 ; @param 4 Which MDS flag to test for (CPUMCTX_WSF_MDS_ENTRY) 263 %macro INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER 4 263 264 ; Only one test+jmp when disabled CPUs. 264 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 )265 test byte [%1 + CPUMCTX.fWorldSwitcher], (%2 | %3 | %4) 265 266 jz %%no_barrier_needed 266 267 … … 282 283 mov ecx, MSR_IA32_FLUSH_CMD 283 284 wrmsr 285 jmp %%no_mds_buffer_flushing ; MDS flushing is included in L1D_FLUSH. 284 286 %%no_cache_flush_barrier: 287 288 ; MDS buffer flushing. 289 test byte [%1 + CPUMCTX.fWorldSwitcher], %4 290 jz %%no_mds_buffer_flushing 291 sub xSP, xSP 292 mov [xSP], ds 293 verw [xSP] 294 add xSP, xSP 295 %%no_mds_buffer_flushing: 285 296 286 297 %%no_barrier_needed: … … 1488 1499 1489 1500 ; Fight spectre and similar. 1490 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY 1501 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 1491 1502 1492 1503 ; Load guest general purpose registers. … … 1797 1808 1798 1809 ; Fight spectre and similar. 1799 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY 1810 INDIRECT_BRANCH_PREDICTION_AND_L1_CACHE_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY, CPUMCTX_WSF_L1D_ENTRY, CPUMCTX_WSF_MDS_ENTRY 1800 1811 1801 1812 ; Load guest general purpose registers. … … 1855 1866 ENDPROC VMXR0StartVM64 1856 1867 %endif ; RT_ARCH_AMD64 1868 1869 1870 ;; 1871 ; Clears the MDS buffers using VERW. 1872 ALIGNCODE(16) 1873 BEGINPROC hmR0MdsClear 1874 sub xSP, xCB 1875 mov [xSP], ds 1876 verw [xSP] 1877 add xSP, xCB 1878 ret 1879 ENDPROC hmR0MdsClear 1857 1880 1858 1881 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r78622 r78632 8934 8934 if (pVCpu->CTX_SUFF(pVM)->hm.s.fL1dFlushOnSched) 8935 8935 ASMWrMsr(MSR_IA32_FLUSH_CMD, MSR_IA32_FLUSH_CMD_F_L1D); 8936 else if (pVCpu->CTX_SUFF(pVM)->hm.s.fMdsClearOnSched) 8937 hmR0MdsClear(); 8936 8938 } 8937 8939 return rc; -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r77032 r78632 83 83 /* [24(0x18)] = */ kCpumMicroarch_Intel_Unknown, 84 84 /* [25(0x19)] = */ kCpumMicroarch_Intel_Unknown, 85 /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem, 85 /* [26(0x1a)] = */ kCpumMicroarch_Intel_Core7_Nehalem, /* Nehalem-EP */ 86 86 /* [27(0x1b)] = */ kCpumMicroarch_Intel_Unknown, 87 87 /* [28(0x1c)] = */ kCpumMicroarch_Intel_Atom_Bonnell, /* Diamonville, Pineview, */ … … 135 135 /* [76(0x4c)] = */ kCpumMicroarch_Intel_Atom_Airmount, 136 136 /* [77(0x4d)] = */ kCpumMicroarch_Intel_Atom_Silvermont, 137 /* [78(0x4e)] = */ kCpumMicroarch_Intel_Core7_Skylake, /* unconfirmed */138 /* [79(0x4f)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* unconfirmed,Broadwell-E */137 /* [78(0x4e)] = */ kCpumMicroarch_Intel_Core7_Skylake, 138 /* [79(0x4f)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* Broadwell-E */ 139 139 /* [80(0x50)] = */ kCpumMicroarch_Intel_Unknown, 140 140 /* [81(0x51)] = */ kCpumMicroarch_Intel_Unknown, … … 142 142 /* [83(0x53)] = */ kCpumMicroarch_Intel_Unknown, 143 143 /* [84(0x54)] = */ kCpumMicroarch_Intel_Unknown, 144 /* [85(0x55)] = */ kCpumMicroarch_Intel_Core7_Skylake, /* server cpu */144 /* [85(0x55)] = */ kCpumMicroarch_Intel_Core7_Skylake, /* server cpu; skylake <= 4, cascade lake > 5 */ 145 145 /* [86(0x56)] = */ kCpumMicroarch_Intel_Core7_Broadwell, /* Xeon D-1540, Broadwell-DE */ 146 146 /* [87(0x57)] = */ kCpumMicroarch_Intel_Phi_KnightsLanding, … … 167 167 /*[108(0x6c)] = */ kCpumMicroarch_Intel_Unknown, 168 168 /*[109(0x6d)] = */ kCpumMicroarch_Intel_Unknown, 169 /*[110(0x6e)] = */ kCpumMicroarch_Intel_ Unknown,169 /*[110(0x6e)] = */ kCpumMicroarch_Intel_Atom_Airmount, /* or silvermount? */ 170 170 /*[111(0x6f)] = */ kCpumMicroarch_Intel_Unknown, 171 171 /*[112(0x70)] = */ kCpumMicroarch_Intel_Unknown, … … 174 174 /*[115(0x73)] = */ kCpumMicroarch_Intel_Unknown, 175 175 /*[116(0x74)] = */ kCpumMicroarch_Intel_Unknown, 176 /*[117(0x75)] = */ kCpumMicroarch_Intel_ Unknown,176 /*[117(0x75)] = */ kCpumMicroarch_Intel_Atom_Airmount, /* or silvermount? */ 177 177 /*[118(0x76)] = */ kCpumMicroarch_Intel_Unknown, 178 178 /*[119(0x77)] = */ kCpumMicroarch_Intel_Unknown, … … 199 199 /*[140(0x8c)] = */ kCpumMicroarch_Intel_Unknown, 200 200 /*[141(0x8d)] = */ kCpumMicroarch_Intel_Unknown, 201 /*[142(0x8e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping 0xA is CoffeeLake, 9 is KabyLake. */201 /*[142(0x8e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping >= 0xB is Whiskey Lake, 0xA is CoffeeLake. */ 202 202 /*[143(0x8f)] = */ kCpumMicroarch_Intel_Unknown, 203 203 /*[144(0x90)] = */ kCpumMicroarch_Intel_Unknown, … … 215 215 /*[156(0x9c)] = */ kCpumMicroarch_Intel_Unknown, 216 216 /*[157(0x9d)] = */ kCpumMicroarch_Intel_Unknown, 217 /*[158(0x9e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping 0xA is CoffeeLake, 9 is KabyLake. */217 /*[158(0x9e)] = */ kCpumMicroarch_Intel_Core7_KabyLake, /* Stepping >= 0xB is Whiskey Lake, 0xA is CoffeeLake. */ 218 218 /*[159(0x9f)] = */ kCpumMicroarch_Intel_Unknown, 219 219 }; … … 370 370 { 371 371 CPUMMICROARCH enmMicroArch = g_aenmIntelFamily06[bModel]; 372 if ( enmMicroArch == kCpumMicroarch_Intel_Core7_KabyLake 373 && bStepping >= 0xa) 374 enmMicroArch = kCpumMicroarch_Intel_Core7_CoffeeLake; 372 if (enmMicroArch == kCpumMicroarch_Intel_Core7_KabyLake) 373 { 374 if (bStepping >= 0xa && bStepping <= 0xc) 375 enmMicroArch = kCpumMicroarch_Intel_Core7_CoffeeLake; 376 else if (bStepping >= 0xc) 377 enmMicroArch = kCpumMicroarch_Intel_Core7_WhiskeyLake; 378 } 379 else if ( enmMicroArch == kCpumMicroarch_Intel_Core7_Skylake 380 && bModel == 0x55 381 && bStepping >= 5) 382 enmMicroArch = kCpumMicroarch_Intel_Core7_CascadeLake; 375 383 return enmMicroArch; 376 384 } … … 520 528 CASE_RET_STR(kCpumMicroarch_Intel_Core7_KabyLake); 521 529 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CoffeeLake); 530 CASE_RET_STR(kCpumMicroarch_Intel_Core7_WhiskeyLake); 531 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CascadeLake); 522 532 CASE_RET_STR(kCpumMicroarch_Intel_Core7_CannonLake); 523 533 CASE_RET_STR(kCpumMicroarch_Intel_Core7_IceLake); … … 1893 1903 pFeatures->fFlushCmd = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD); 1894 1904 pFeatures->fArchCap = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP); 1905 pFeatures->fMdsClear = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR); 1895 1906 } 1896 1907 … … 2492 2503 CPUMISAEXTCFG enmInvpcid; 2493 2504 CPUMISAEXTCFG enmFlushCmdMsr; 2505 CPUMISAEXTCFG enmMdsClear; 2506 CPUMISAEXTCFG enmArchCapMsr; 2494 2507 2495 2508 CPUMISAEXTCFG enmAbm; … … 3291 3304 ; 3292 3305 pCurLeaf->uEdx &= 0 3306 | (pConfig->enmMdsClear ? X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR : 0) 3293 3307 //| X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB RT_BIT(26) 3294 3308 //| X86_CPUID_STEXT_FEATURE_EDX_STIBP RT_BIT(27) 3295 3309 | (pConfig->enmFlushCmdMsr ? X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD : 0) 3296 //| X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP RT_BIT(29)3310 | (pConfig->enmArchCapMsr ? X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP : 0) 3297 3311 ; 3298 3312 … … 3323 3337 PORTABLE_DISABLE_FEATURE_BIT( 1, pCurLeaf->uEcx, PREFETCHWT1, X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1); 3324 3338 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, FLUSH_CMD, X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD, pConfig->enmFlushCmdMsr); 3339 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, MD_CLEAR, X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR, pConfig->enmMdsClear); 3340 PORTABLE_DISABLE_FEATURE_BIT_CFG(3, pCurLeaf->uEdx, ARCHCAP, X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP, pConfig->enmArchCapMsr); 3325 3341 } 3342 3343 /* Dependencies. */ 3344 if (!(pCurLeaf->uEdx & X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD)) 3345 pCurLeaf->uEdx &= ~X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR; 3326 3346 3327 3347 /* Force standard feature bits. */ … … 3338 3358 if (pConfig->enmFlushCmdMsr == CPUMISAEXTCFG_ENABLED_ALWAYS) 3339 3359 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_FLUSH_CMD; 3360 if (pConfig->enmMdsClear == CPUMISAEXTCFG_ENABLED_ALWAYS) 3361 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_MD_CLEAR; 3362 if (pConfig->enmArchCapMsr == CPUMISAEXTCFG_ENABLED_ALWAYS) 3363 pCurLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP; 3340 3364 break; 3341 3365 } … … 4315 4339 AssertLogRelRCReturn(rc, rc); 4316 4340 4341 /** @cfgm{/CPUM/IsaExts/MdsClear, isaextcfg, true} 4342 * Whether to advertise the VERW and MDS related IA32_FLUSH_CMD MSR bits to 4343 * the guest. Requires FlushCmdMsr to be present too. 4344 */ 4345 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "MdsClear", &pConfig->enmMdsClear, CPUMISAEXTCFG_ENABLED_SUPPORTED); 4346 AssertLogRelRCReturn(rc, rc); 4347 4348 /** @cfgm{/CPUM/IsaExts/ArchCapMSr, isaextcfg, true} 4349 * Whether to expose the MSR_IA32_ARCH_CAPABILITIES MSR to the guest. 4350 */ 4351 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "ArchCapMsr", &pConfig->enmArchCapMsr, CPUMISAEXTCFG_ENABLED_SUPPORTED); 4352 AssertLogRelRCReturn(rc, rc); 4353 4317 4354 4318 4355 /* AMD: */ … … 4891 4928 if (pVM->cpum.s.HostFeatures.fArchCap) 4892 4929 { 4893 pLeaf->uEdx |= X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP;4894 4895 4930 /* Install the architectural capabilities MSR. */ 4896 4931 pMsrRange = cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES); … … 5071 5106 pLeaf = cpumR3CpuIdGetExactLeaf(&pVM->cpum.s, UINT32_C(0x00000007), 0); 5072 5107 if (pLeaf) 5073 pLeaf->uEdx &= ~( X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP 5074 | X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP); 5108 pLeaf->uEdx &= ~(X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB | X86_CPUID_STEXT_FEATURE_EDX_STIBP); 5075 5109 pVM->cpum.s.GuestFeatures.fSpeculationControl = 0; 5076 5110 Log(("CPUM: ClearGuestCpuIdFeature: Disabled speculation control!\n")); … … 6387 6421 static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] = 6388 6422 { 6423 DBGFREGSUBFIELD_RO("MD_CLEAR\0" "Supports MDS related buffer clearing", 10, 1, 0), 6389 6424 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0), 6390 6425 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0), -
trunk/src/VBox/VMM/VMMR3/CPUMR3Db.cpp
r76886 r78632 609 609 { 610 610 PCCPUMMSRRANGE papToAdd[10]; 611 uint32_t cToAdd = 0;611 uint32_t cToAdd = 0; 612 612 613 613 /* … … 630 630 }; 631 631 papToAdd[cToAdd++] = &s_FlushCmd; 632 } 633 634 /* 635 * The MSR_IA32_ARCH_CAPABILITIES was introduced in various spectre MCUs, or at least 636 * documented in relation to such. 637 */ 638 if (pVM->cpum.s.GuestFeatures.fArchCap && !cpumLookupMsrRange(pVM, MSR_IA32_ARCH_CAPABILITIES)) 639 { 640 static CPUMMSRRANGE const s_ArchCaps = 641 { 642 /*.uFirst =*/ MSR_IA32_ARCH_CAPABILITIES, 643 /*.uLast =*/ MSR_IA32_ARCH_CAPABILITIES, 644 /*.enmRdFn =*/ kCpumMsrRdFn_Ia32ArchCapabilities, 645 /*.enmWrFn =*/ kCpumMsrWrFn_ReadOnly, 646 /*.offCpumCpu =*/ UINT16_MAX, 647 /*.fReserved =*/ 0, 648 /*.uValue =*/ 0, 649 /*.fWrIgnMask =*/ 0, 650 /*.fWrGpMask =*/ UINT64_MAX, 651 /*.szName = */ "IA32_ARCH_CAPABILITIES" 652 }; 653 papToAdd[cToAdd++] = &s_ArchCaps; 632 654 } 633 655 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r78254 r78632 233 233 "|L1DFlushOnSched" 234 234 "|L1DFlushOnVMEntry" 235 "|MDSClearOnSched" 236 "|MDSClearOnVMEntry" 235 237 "|TPRPatchingEnabled" 236 238 "|64bitEnabled" … … 425 427 426 428 /** @cfgm{/HM/L1DFlushOnSched, bool, true} 427 * CV S-2018-3646 workaround, ignored on CPUs that aren't affected. */429 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */ 428 430 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnSched", &pVM->hm.s.fL1dFlushOnSched, true); 429 431 AssertLogRelRCReturn(rc, rc); 430 432 431 433 /** @cfgm{/HM/L1DFlushOnVMEntry, bool} 432 * CV S-2018-3646 workaround, ignored on CPUs that aren't affected. */434 * CVE-2018-3646 workaround, ignored on CPUs that aren't affected. */ 433 435 rc = CFGMR3QueryBoolDef(pCfgHm, "L1DFlushOnVMEntry", &pVM->hm.s.fL1dFlushOnVmEntry, false); 434 436 AssertLogRelRCReturn(rc, rc); … … 442 444 rc = CFGMR3QueryBoolDef(pCfgHm, "SpecCtrlByHost", &pVM->hm.s.fSpecCtrlByHost, false); 443 445 AssertLogRelRCReturn(rc, rc); 446 447 /** @cfgm{/HM/MDSClearOnSched, bool, true} 448 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround, 449 * ignored on CPUs that aren't affected. */ 450 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnSched", &pVM->hm.s.fMdsClearOnSched, true); 451 AssertLogRelRCReturn(rc, rc); 452 453 /** @cfgm{/HM/MDSClearOnVmEntry, bool, false} 454 * CVE-2018-12126, CVE-2018-12130, CVE-2018-12127, CVE-2019-11091 workaround, 455 * ignored on CPUs that aren't affected. */ 456 rc = CFGMR3QueryBoolDef(pCfgHm, "MDSClearOnVmEntry", &pVM->hm.s.fMdsClearOnVmEntry, false); 457 AssertLogRelRCReturn(rc, rc); 458 459 /* Disable MDSClearOnSched if MDSClearOnVmEntry is enabled. */ 460 if (pVM->hm.s.fMdsClearOnVmEntry) 461 pVM->hm.s.fMdsClearOnSched = false; 444 462 445 463 /** @cfgm{/HM/LovelyMesaDrvWorkaround,bool} … … 1094 1112 1095 1113 /* 1114 * Check if MDS flush is needed/possible. 1115 * On atoms and knight family CPUs, we will only allow clearing on scheduling. 1116 */ 1117 if ( !pVM->cpum.ro.HostFeatures.fMdsClear 1118 || pVM->cpum.ro.HostFeatures.fArchMdsNo) 1119 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false; 1120 else if ( ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Atom_Airmount 1121 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Atom_End) 1122 || ( pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Phi_KnightsLanding 1123 && pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Phi_End)) 1124 { 1125 if (!pVM->hm.s.fMdsClearOnSched) 1126 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry; 1127 pVM->hm.s.fMdsClearOnVmEntry = false; 1128 } 1129 else if ( pVM->cpum.ro.HostFeatures.enmMicroarch < kCpumMicroarch_Intel_Core7_Nehalem 1130 || pVM->cpum.ro.HostFeatures.enmMicroarch >= kCpumMicroarch_Intel_Core7_End) 1131 pVM->hm.s.fMdsClearOnSched = pVM->hm.s.fMdsClearOnVmEntry = false; 1132 1133 /* 1096 1134 * Sync options. 1097 1135 */ … … 1112 1150 if (pVM->cpum.ro.HostFeatures.fFlushCmd && pVM->hm.s.fL1dFlushOnVmEntry) 1113 1151 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_L1D_ENTRY; 1152 if (pVM->cpum.ro.HostFeatures.fMdsClear && pVM->hm.s.fMdsClearOnVmEntry) 1153 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_MDS_ENTRY; 1114 1154 if (iCpu == 0) 1115 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool \n",1155 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n", 1116 1156 pCpuCtx->fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry, 1117 pVM->hm.s.fL1dFlushOnSched ));1157 pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry)); 1118 1158 } 1119 1159 -
trunk/src/VBox/VMM/include/HMInternal.h
r78222 r78632 472 472 /** Set if host manages speculation control settings. */ 473 473 bool fSpecCtrlByHost; 474 /** Set if MDS related buffers should be cleared on VM entry. */ 475 bool fMdsClearOnVmEntry; 476 /** Set if MDS related buffers should be cleared on EMT scheduling. */ 477 bool fMdsClearOnSched; 478 /** Alignment padding. */ 479 bool afPaddingMinus1[6]; 474 480 475 481 /** Maximum ASID allowed. */ … … 1194 1200 PFNHMSVMVMRUN pfnVMRun); 1195 1201 # endif 1202 DECLASM(void) hmR0MdsClear(void); 1196 1203 #endif /* IN_RING0 */ 1197 1204
Note:
See TracChangeset
for help on using the changeset viewer.