Changeset 70606 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jan 16, 2018 7:05:36 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk
- Property svn:mergeinfo
-
old new 8 8 /branches/VBox-5.0:104445,104938,104943,104950,104952-104953,104987-104988,104990,106453 9 9 /branches/VBox-5.1:112367,115992,116543,116550,116568,116573 10 /branches/VBox-5.2:120083,120099,120213,120221,120239 10 11 /branches/andy/draganddrop:90781-91268 11 12 /branches/andy/guestctrl20:78916,78930
-
- Property svn:mergeinfo
-
trunk/src/VBox
- Property svn:mergeinfo
-
old new 8 8 /branches/VBox-5.0/src/VBox:104938,104943,104950,104987-104988,104990,106453 9 9 /branches/VBox-5.1/src/VBox:112367,116543,116550,116568,116573 10 /branches/VBox-5.2/src/VBox:120083,120099,120213,120221,120239 10 11 /branches/andy/draganddrop/src/VBox:90781-91268 11 12 /branches/andy/guestctrl20/src/VBox:78916,78930
-
- Property svn:mergeinfo
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r69221 r70606 49 49 ; Use define because I'm too lazy to convert the struct. 50 50 %define XMM_OFF_IN_X86FXSTATE 160 51 52 ;; Spectre filler for 32-bit mode. 53 ; Some user space address that points to a 4MB page boundrary in hope that it 54 ; will somehow make it less useful. 55 %define SPECTRE_FILLER32 0x227fffff 56 ;; Spectre filler for 64-bit mode. 57 ; Choosen to be an invalid address (also with 5 level paging). 58 %define SPECTRE_FILLER64 0x02204204207fffff 59 ;; Spectre filler for the current CPU mode. 60 %ifdef RT_ARCH_AMD64 61 %define SPECTRE_FILLER SPECTRE_FILLER64 62 %else 63 %define SPECTRE_FILLER SPECTRE_FILLER32 64 %endif 51 65 52 66 ;; … … 224 238 %define MYPOPSEGS MYPOPSEGS32 225 239 %endif 240 241 ;; 242 ; Creates an indirect branch prediction barrier on CPUs that need and supports that. 243 ; @clobbers eax, edx, ecx 244 ; @param 1 How to address CPUMCTX. 245 ; @param 2 Which flag to test for (CPUMCTX_WSF_IBPB_ENTRY or CPUMCTX_WSF_IBPB_EXIT) 246 %macro INDIRECT_BRANCH_PREDICTION_BARRIER 2 247 test byte [%1 + CPUMCTX.fWorldSwitcher], %2 248 jz %%no_indirect_branch_barrier 249 mov ecx, MSR_IA32_PRED_CMD 250 mov eax, MSR_IA32_PRED_CMD_F_IBPB 251 xor edx, edx 252 wrmsr 253 %%no_indirect_branch_barrier: 254 %endmacro 226 255 227 256 … … 1185 1214 1186 1215 mov [ss:xDI + CPUMCTX.eax], eax 1216 mov xAX, SPECTRE_FILLER 1187 1217 mov [ss:xDI + CPUMCTX.ebx], ebx 1218 mov xBX, xAX 1188 1219 mov [ss:xDI + CPUMCTX.ecx], ecx 1220 mov xCX, xAX 1189 1221 mov [ss:xDI + CPUMCTX.edx], edx 1222 mov xDX, xAX 1190 1223 mov [ss:xDI + CPUMCTX.esi], esi 1224 mov xSI, xAX 1191 1225 mov [ss:xDI + CPUMCTX.ebp], ebp 1226 mov xBP, xAX 1192 1227 mov xAX, cr2 1193 1228 mov [ss:xDI + CPUMCTX.cr2], xAX … … 1199 1234 pop dword [ss:xDI + CPUMCTX.edi] ; The guest edi we pushed above. 1200 1235 %endif 1236 1237 ; Fight spectre. 1238 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xDI, CPUMCTX_WSF_IBPB_EXIT 1201 1239 1202 1240 %ifndef VMX_SKIP_TR … … 1416 1454 ; Don't mess with ESP anymore!!! 1417 1455 1456 ; Fight spectre. 1457 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1458 1418 1459 ; Load guest general purpose registers. 1419 1460 mov eax, [xSI + CPUMCTX.eax] … … 1490 1531 1491 1532 mov qword [xDI + CPUMCTX.eax], rax 1533 mov rax, SPECTRE_FILLER64 1492 1534 mov qword [xDI + CPUMCTX.ebx], rbx 1535 mov rbx, rax 1493 1536 mov qword [xDI + CPUMCTX.ecx], rcx 1537 mov rcx, rax 1494 1538 mov qword [xDI + CPUMCTX.edx], rdx 1539 mov rdx, rax 1495 1540 mov qword [xDI + CPUMCTX.esi], rsi 1541 mov rsi, rax 1496 1542 mov qword [xDI + CPUMCTX.ebp], rbp 1543 mov rbp, rax 1497 1544 mov qword [xDI + CPUMCTX.r8], r8 1545 mov r8, rax 1498 1546 mov qword [xDI + CPUMCTX.r9], r9 1547 mov r9, rax 1499 1548 mov qword [xDI + CPUMCTX.r10], r10 1549 mov r10, rax 1500 1550 mov qword [xDI + CPUMCTX.r11], r11 1551 mov r11, rax 1501 1552 mov qword [xDI + CPUMCTX.r12], r12 1553 mov r12, rax 1502 1554 mov qword [xDI + CPUMCTX.r13], r13 1555 mov r13, rax 1503 1556 mov qword [xDI + CPUMCTX.r14], r14 1557 mov r14, rax 1504 1558 mov qword [xDI + CPUMCTX.r15], r15 1559 mov r15, rax 1505 1560 mov rax, cr2 1506 1561 mov qword [xDI + CPUMCTX.cr2], rax … … 1508 1563 pop xAX ; The guest rdi we pushed above 1509 1564 mov qword [xDI + CPUMCTX.edi], rax 1565 1566 ; Fight spectre. 1567 INDIRECT_BRANCH_PREDICTION_BARRIER xDI, CPUMCTX_WSF_IBPB_EXIT 1510 1568 1511 1569 %ifndef VMX_SKIP_TR … … 1704 1762 ; Note: assumes success! 1705 1763 ; Don't mess with ESP anymore!!! 1764 1765 ; Fight spectre. 1766 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1706 1767 1707 1768 ; Load guest general purpose registers. … … 1833 1894 vmsave 1834 1895 1896 ; Fight spectre. 1897 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 1898 1835 1899 ; Setup xAX for VMLOAD. 1836 1900 mov xAX, [xBP + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; x86: take low dword only) … … 1870 1934 1871 1935 mov [ss:xAX + CPUMCTX.ebx], ebx 1936 mov xBX, SPECTRE_FILLER 1872 1937 mov [ss:xAX + CPUMCTX.ecx], ecx 1938 mov xCX, xBX 1873 1939 mov [ss:xAX + CPUMCTX.edx], edx 1940 mov xDX, xBX 1874 1941 mov [ss:xAX + CPUMCTX.esi], esi 1942 mov xSI, xBX 1875 1943 mov [ss:xAX + CPUMCTX.edi], edi 1944 mov xDI, xBX 1876 1945 mov [ss:xAX + CPUMCTX.ebp], ebp 1946 mov xBP, xBX 1947 1948 ; Fight spectre. Note! Trashes xAX! 1949 INDIRECT_BRANCH_PREDICTION_BARRIER ss:xAX, CPUMCTX_WSF_IBPB_EXIT 1877 1950 1878 1951 ; Restore the host xcr0 if necessary. … … 1978 2051 vmsave 1979 2052 2053 ; Fight spectre. 2054 INDIRECT_BRANCH_PREDICTION_BARRIER xSI, CPUMCTX_WSF_IBPB_ENTRY 2055 1980 2056 ; Setup rax for VMLOAD. 1981 2057 mov rax, [rbp + xCB * 2 + RTHCPHYS_CB] ; HCPhysVmcb (64 bits physical address; take low dword only) … … 2022 2098 2023 2099 mov qword [rax + CPUMCTX.ebx], rbx 2100 mov rbx, SPECTRE_FILLER64 2024 2101 mov qword [rax + CPUMCTX.ecx], rcx 2102 mov rcx, rbx 2025 2103 mov qword [rax + CPUMCTX.edx], rdx 2104 mov rdx, rbx 2026 2105 mov qword [rax + CPUMCTX.esi], rsi 2106 mov rsi, rbx 2027 2107 mov qword [rax + CPUMCTX.edi], rdi 2108 mov rdi, rbx 2028 2109 mov qword [rax + CPUMCTX.ebp], rbp 2110 mov rbp, rbx 2029 2111 mov qword [rax + CPUMCTX.r8], r8 2112 mov r8, rbx 2030 2113 mov qword [rax + CPUMCTX.r9], r9 2114 mov r9, rbx 2031 2115 mov qword [rax + CPUMCTX.r10], r10 2116 mov r10, rbx 2032 2117 mov qword [rax + CPUMCTX.r11], r11 2118 mov r11, rbx 2033 2119 mov qword [rax + CPUMCTX.r12], r12 2120 mov r12, rbx 2034 2121 mov qword [rax + CPUMCTX.r13], r13 2122 mov r13, rbx 2035 2123 mov qword [rax + CPUMCTX.r14], r14 2124 mov r14, rbx 2036 2125 mov qword [rax + CPUMCTX.r15], r15 2126 mov r15, rbx 2127 2128 ; Fight spectre. Note! Trashes rax! 2129 INDIRECT_BRANCH_PREDICTION_BARRIER rax, CPUMCTX_WSF_IBPB_EXIT 2037 2130 2038 2131 ; Restore the host xcr0 if necessary. -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r70555 r70606 1702 1702 pFeatures->uStepping); 1703 1703 1704 PCCPUMCPUIDLEAF pLeaf= cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008);1705 if (p Leaf)1706 pFeatures->cMaxPhysAddrWidth = p Leaf->uEax & 0xff;1704 PCCPUMCPUIDLEAF const pExtLeaf8 = cpumR3CpuIdFindLeaf(paLeaves, cLeaves, 0x80000008); 1705 if (pExtLeaf8) 1706 pFeatures->cMaxPhysAddrWidth = pExtLeaf8->uEax & 0xff; 1707 1707 else if (pStd1Leaf->uEdx & X86_CPUID_FEATURE_EDX_PSE36) 1708 1708 pFeatures->cMaxPhysAddrWidth = 36; … … 1743 1743 pFeatures->fAvx512Foundation = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_AVX512F); 1744 1744 pFeatures->fClFlushOpt = RT_BOOL(pSxfLeaf0->uEbx & X86_CPUID_STEXT_FEATURE_EBX_CLFLUSHOPT); 1745 1746 pFeatures->fIbpb = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB); 1747 pFeatures->fIbrs = pFeatures->fIbpb; 1748 pFeatures->fStibp = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_STIBP); 1749 pFeatures->fArchCap = RT_BOOL(pSxfLeaf0->uEdx & X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP); 1745 1750 } 1746 1751 … … 1782 1787 pFeatures->fMmx |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_MMX); 1783 1788 pFeatures->fTsc |= RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_TSC); 1789 pFeatures->fIbpb |= pExtLeaf8 && (pExtLeaf8->uEbx & X86_CPUID_AMD_EFEID_EBX_IBPB); 1784 1790 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX); 1785 1791 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP); … … 2255 2261 2256 2262 /* 2257 * Configure XSAVE offsets according to the CPUID info .2263 * Configure XSAVE offsets according to the CPUID info and set the feature flags. 2258 2264 */ 2259 2265 memset(&pVM->aCpus[0].cpum.s.Guest.aoffXState[0], 0xff, sizeof(pVM->aCpus[0].cpum.s.Guest.aoffXState)); … … 3125 3131 //| X86_CPUID_STEXT_FEATURE_ECX_PREFETCHWT1 - we do not do vector functions yet. 3126 3132 ; 3127 pCurLeaf->uEdx &= 0; 3133 pCurLeaf->uEdx &= 0; /** @todo X86_CPUID_STEXT_FEATURE_EDX_IBRS_IBPB, X86_CPUID_STEXT_FEATURE_EDX_STIBP and X86_CPUID_STEXT_FEATURE_EDX_ARCHCAP */ 3128 3134 3129 3135 if (pCpum->u8PortableCpuIdLevel > 0) … … 3508 3514 { 3509 3515 pCurLeaf->uEax &= UINT32_C(0x0000ffff); /* Virtual & physical address sizes only. */ 3510 pCurLeaf->uEbx = 0; /* reserved */3516 pCurLeaf->uEbx = 0; /* reserved - [12] == IBPB */ 3511 3517 pCurLeaf->uEdx = 0; /* reserved */ 3512 3518 … … 5983 5989 { 5984 5990 DBGFREGSUBFIELD_RO("PREFETCHWT1\0" "PREFETCHWT1 instruction", 0, 1, 0), 5991 DBGFREGSUBFIELD_RO("UMIP\0" "User mode insturction prevention", 2, 1, 0), 5985 5992 DBGFREGSUBFIELD_RO("PKU\0" "Protection Key for Usermode pages", 3, 1, 0), 5986 DBGFREGSUBFIELD_RO("OSPKU\0" "CR4.PKU mirror", 4, 1, 0), 5993 DBGFREGSUBFIELD_RO("OSPKE\0" "CR4.PKU mirror", 4, 1, 0), 5994 DBGFREGSUBFIELD_RO("MAWAU\0" "Value used by BNDLDX & BNDSTX", 17, 5, 0), 5995 DBGFREGSUBFIELD_RO("RDPID\0" "Read processor ID support", 22, 1, 0), 5996 DBGFREGSUBFIELD_RO("SGX_LC\0" "Supports SGX Launch Configuration", 30, 1, 0), 5997 DBGFREGSUBFIELD_TERMINATOR() 5998 }; 5999 6000 /** CPUID(7,0).EDX field descriptions. */ 6001 static DBGFREGSUBFIELD const g_aLeaf7Sub0EdxSubFields[] = 6002 { 6003 DBGFREGSUBFIELD_RO("IBRS_IBPB\0" "IA32_SPEC_CTRL.IBRS and IA32_PRED_CMD.IBPB", 26, 1, 0), 6004 DBGFREGSUBFIELD_RO("STIBP\0" "Supports IA32_SPEC_CTRL.STIBP", 27, 1, 0), 6005 DBGFREGSUBFIELD_RO("ARCHCAP\0" "Supports IA32_ARCH_CAP", 29, 1, 0), 5987 6006 DBGFREGSUBFIELD_TERMINATOR() 5988 6007 }; … … 6073 6092 }; 6074 6093 6094 /** CPUID(0x80000008,0).EBX field descriptions. */ 6095 static DBGFREGSUBFIELD const g_aExtLeaf8EbxSubFields[] = 6096 { 6097 DBGFREGSUBFIELD_RO("CLZERO\0" "Clear zero instruction (cacheline)", 0, 1, 0), 6098 DBGFREGSUBFIELD_RO("IRPerf\0" "Instructions retired count support", 1, 1, 0), 6099 DBGFREGSUBFIELD_RO("XSaveErPtr\0" "Save/restore error pointers (FXSAVE/RSTOR*)", 2, 1, 0), 6100 DBGFREGSUBFIELD_RO("IBPB\0" "Supports the IBPB command in IA32_PRED_CMD", 12, 1, 0), 6101 DBGFREGSUBFIELD_TERMINATOR() 6102 }; 6103 6075 6104 6076 6105 static void cpumR3CpuIdInfoMnemonicListU32(PCDBGFINFOHLP pHlp, uint32_t uVal, PCDBGFREGSUBFIELD pDesc, … … 6275 6304 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEcx, Host.uEcx, g_aLeaf7Sub0EcxSubFields, 56); 6276 6305 if (pCurLeaf->uEdx || Host.uEdx) 6277 pHlp->pfnPrintf(pHlp, "%36s %#x (%#x)\n", "Ext Features EDX:", pCurLeaf->uEdx, Host.uEdx);6306 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEdx, Host.uEdx, g_aLeaf7Sub0EdxSubFields, 56); 6278 6307 } 6279 6308 else … … 6282 6311 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEcx, g_aLeaf7Sub0EcxSubFields, "Ext Features ECX:", 36); 6283 6312 if (pCurLeaf->uEdx) 6284 pHlp->pfnPrintf(pHlp, "%36s %#x\n", "Ext Features EDX:", pCurLeaf->uEdx);6313 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEdx, g_aLeaf7Sub0EdxSubFields, "Ext Features EDX:", 36); 6285 6314 } 6286 6315 break; … … 6770 6799 } 6771 6800 6772 if (iVerbosity && (pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0)) != NULL) 6773 { 6774 uint32_t uEAX = pCurLeaf->uEax; 6775 uint32_t uECX = pCurLeaf->uEcx; 6776 6777 pHlp->pfnPrintf(pHlp, 6778 "Physical Address Width: %d bits\n" 6779 "Virtual Address Width: %d bits\n" 6780 "Guest Physical Address Width: %d bits\n", 6781 (uEAX >> 0) & 0xff, 6782 (uEAX >> 8) & 0xff, 6783 (uEAX >> 16) & 0xff); 6784 pHlp->pfnPrintf(pHlp, 6785 "Physical Core Count: %d\n", 6786 ((uECX >> 0) & 0xff) + 1); 6801 pCurLeaf = cpumR3CpuIdGetLeaf(paLeaves, cLeaves, UINT32_C(0x80000008), 0); 6802 if (pCurLeaf != NULL) 6803 { 6804 if (pCurLeaf->uEbx || (Host.uEbx && iVerbosity)) 6805 { 6806 if (iVerbosity < 1) 6807 cpumR3CpuIdInfoMnemonicListU32(pHlp, pCurLeaf->uEbx, g_aExtLeaf8EbxSubFields, "Ext Features ext IDs EBX:", 34); 6808 else 6809 cpumR3CpuIdInfoVerboseCompareListU32(pHlp, pCurLeaf->uEbx, Host.uEbx, g_aExtLeaf8EbxSubFields, 56); 6810 } 6811 6812 if (iVerbosity) 6813 { 6814 uint32_t uEAX = pCurLeaf->uEax; 6815 uint32_t uECX = pCurLeaf->uEcx; 6816 6817 pHlp->pfnPrintf(pHlp, 6818 "Physical Address Width: %d bits\n" 6819 "Virtual Address Width: %d bits\n" 6820 "Guest Physical Address Width: %d bits\n", 6821 (uEAX >> 0) & 0xff, 6822 (uEAX >> 8) & 0xff, 6823 (uEAX >> 16) & 0xff); 6824 pHlp->pfnPrintf(pHlp, 6825 "Physical Core Count: %d\n", 6826 ((uECX >> 0) & 0xff) + 1); 6827 } 6787 6828 } 6788 6829 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r70557 r70606 453 453 "|EnableLargePages" 454 454 "|EnableVPID" 455 "|IBPBOnVMExit" 456 "|IBPBOnVMEntry" 455 457 "|TPRPatchingEnabled" 456 458 "|64bitEnabled" … … 611 613 * available. */ 612 614 rc = CFGMR3QueryBoolDef(pCfgHm, "UseVmxPreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true); 615 AssertLogRelRCReturn(rc, rc); 616 617 /** @cfgm{/HM/IBPBOnVMExit, bool} 618 * Costly paranoia setting. */ 619 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMExit", &pVM->hm.s.fIbpbOnVmExit, false); 620 AssertLogRelRCReturn(rc, rc); 621 622 /** @cfgm{/HM/IBPBOnVMEntry, bool} 623 * Costly paranoia setting. */ 624 rc = CFGMR3QueryBoolDef(pCfgHm, "IBPBOnVMEntry", &pVM->hm.s.fIbpbOnVmEntry, false); 613 625 AssertLogRelRCReturn(rc, rc); 614 626 … … 1163 1175 Assert(!pVM->hm.s.fTprPatchingAllowed); /* paranoia */ 1164 1176 pVM->hm.s.fTprPatchingAllowed = false; 1177 } 1178 1179 /* 1180 * Sync options. 1181 */ 1182 /** @todo Move this out of of CPUMCTX and into some ring-0 only HM structure. 1183 * That will require a little bit of work, of course. */ 1184 for (VMCPUID iCpu = 0; iCpu < pVM->cCpus; iCpu++) 1185 { 1186 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 1187 PCPUMCTX pCpuCtx = CPUMQueryGuestCtxPtr(pVCpu); 1188 pCpuCtx->fWorldSwitcher &= ~(CPUMCTX_WSF_IBPB_EXIT | CPUMCTX_WSF_IBPB_ENTRY); 1189 if (pVM->cpum.ro.HostFeatures.fIbpb) 1190 { 1191 if (pVM->hm.s.fIbpbOnVmExit) 1192 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_EXIT; 1193 if (pVM->hm.s.fIbpbOnVmEntry) 1194 pCpuCtx->fWorldSwitcher |= CPUMCTX_WSF_IBPB_ENTRY; 1195 } 1196 if (iCpu == 0) 1197 LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%d fIbpbOnVmEntry=%d)\n", 1198 pCpuCtx->fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry)); 1165 1199 } 1166 1200 -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r69764 r70606 119 119 ; 120 120 ; Guest context state 121 ; (Identical to the .Hyper chunk below .)121 ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.) 122 122 ; 123 123 .Guest resq 0 … … 220 220 .Guest.msrKERNELGSBASE resb 8 221 221 .Guest.uMsrPadding0 resb 8 222 alignb 8 222 223 .Guest.aXcr resq 2 223 224 .Guest.fXStateMask resq 1 224 225 .Guest.pXStateR0 RTR0PTR_RES 1 226 alignb 8 225 227 .Guest.pXStateR3 RTR3PTR_RES 1 228 alignb 8 226 229 .Guest.pXStateRC RTRCPTR_RES 1 227 230 .Guest.aoffXState resw 64 228 %if HC_ARCH_BITS == 64 229 .Guest.abPadding resb 4 230 %else 231 .Guest.abPadding resb 12 232 %endif 231 .Guest.fWorldSwitcher resd 1 232 alignb 8 233 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 234 .Guest.hwvirt.svm.GCPhysVmcb resq 1 … … 506 506 .Hyper.msrKERNELGSBASE resb 8 507 507 .Hyper.uMsrPadding0 resb 8 508 alignb 8 508 509 .Hyper.aXcr resq 2 509 510 .Hyper.fXStateMask resq 1 510 511 .Hyper.pXStateR0 RTR0PTR_RES 1 512 alignb 8 511 513 .Hyper.pXStateR3 RTR3PTR_RES 1 514 alignb 8 512 515 .Hyper.pXStateRC RTRCPTR_RES 1 513 516 .Hyper.aoffXState resw 64 514 %if HC_ARCH_BITS == 64 515 .Hyper.abPadding resb 4 516 %else 517 .Hyper.abPadding resb 12 518 %endif 517 .Hyper.fWorldSwitcher resd 1 518 alignb 8 519 519 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 520 520 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 -
trunk/src/VBox/VMM/include/HMInternal.h
r70415 r70606 417 417 /** Set if posted interrupt processing is enabled. */ 418 418 bool fPostedIntrs; 419 /** Alignment. */ 420 bool fAlignment0; 421 422 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ 423 uint32_t fHostKernelFeatures; 419 /** Set if indirect branch prediction barrier on VM exit. */ 420 bool fIbpbOnVmExit; 421 /** Set if indirect branch prediction barrier on VM entry. */ 422 bool fIbpbOnVmEntry; 423 /** Explicit padding. */ 424 bool afPadding[3]; 424 425 425 426 /** Maximum ASID allowed. */ … … 429 430 uint32_t cMaxResumeLoops; 430 431 432 /** Host kernel flags that HM might need to know (SUPKERNELFEATURES_XXX). */ 433 uint32_t fHostKernelFeatures; 434 435 /** Size of the guest patch memory block. */ 436 uint32_t cbGuestPatchMem; 431 437 /** Guest allocated memory for patching purposes. */ 432 438 RTGCPTR pGuestPatchMem; 433 439 /** Current free pointer inside the patch block. */ 434 440 RTGCPTR pFreeGuestPatchMem; 435 /** Size of the guest patch memory block. */436 uint32_t cbGuestPatchMem;437 uint32_t u32Alignment0;438 441 439 442 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
Note:
See TracChangeset
for help on using the changeset viewer.