Changeset 14648 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 26, 2008 2:51:47 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCMInternal.h
r14532 r14648 399 399 struct 400 400 { 401 X86 EFLAGS eflags;401 X86RFLAGS rflags; 402 402 uint32_t fValid; 403 403 } RealMode; -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r14647 r14648 413 413 414 414 /* Init TSC offset to zero. */ 415 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0); 416 #if HC_ARCH_BITS == 32 417 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, 0); 418 #endif 419 AssertRC(rc); 420 421 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0); 422 #if HC_ARCH_BITS == 32 423 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_HIGH, 0); 424 #endif 425 AssertRC(rc); 426 427 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0); 428 #if HC_ARCH_BITS == 32 429 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_HIGH, 0); 430 #endif 415 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0); 416 AssertRC(rc); 417 418 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0); 419 AssertRC(rc); 420 421 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0); 431 422 AssertRC(rc); 432 423 … … 435 426 { 436 427 /* Optional */ 437 rc = VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVM->hwaccm.s.vmx.pMSRBitmapPhys); 438 #if HC_ARCH_BITS == 32 439 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_HIGH, pVM->hwaccm.s.vmx.pMSRBitmapPhys >> 32ULL); 440 #endif 428 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVM->hwaccm.s.vmx.pMSRBitmapPhys); 441 429 AssertRC(rc); 442 430 } 443 431 444 432 /* Clear MSR controls. */ 445 rc = VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0); 446 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0); 447 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0); 448 #if HC_ARCH_BITS == 32 449 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_HIGH, 0); 450 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0); 451 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0); 452 #endif 433 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0); 434 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0); 435 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0); 453 436 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0); 454 437 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0); … … 460 443 /* Optional */ 461 444 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0); 462 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys); 463 #if HC_ARCH_BITS == 32 464 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_HIGH, pVM->hwaccm.s.vmx.pAPICPhys >> 32ULL); 465 #endif 445 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys); 466 446 AssertRC(rc); 467 447 } 468 448 469 449 /* Set link pointer to -1. Not currently used. */ 470 #if HC_ARCH_BITS == 32 471 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFF); 472 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_HIGH, 0xFFFFFFFF); 473 #else 474 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF); 475 #endif 450 rc = VMXWriteVMCS64(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF); 476 451 AssertRC(rc); 477 452 … … 908 883 { 909 884 Pdpe = PGMGstGetPaePDPtr(pVM, i); 910 int rc = VMXWriteVMCS(VMX_VMCS_GUEST_PDPTR0_FULL + i*2, Pdpe.u); 911 #if HC_ARCH_BITS == 32 912 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_PDPTR0_FULL + i*2 + 1, Pdpe.u >> 32ULL); 913 #endif 885 int rc = VMXWriteVMCS64(VMX_VMCS_GUEST_PDPTR0_FULL + i*2, Pdpe.u); 914 886 AssertRC(rc); 915 887 } … … 977 949 { 978 950 int rc = VINF_SUCCESS; 951 uint64_t val64; 952 X86RFLAGS rflags; 979 953 RTGCUINTPTR val; 980 X86EFLAGS eflags;981 954 982 955 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 1059 1032 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_LDTR, 0); 1060 1033 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_LDTR_LIMIT, 0); 1061 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_LDTR_BASE,0);1034 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_LDTR_BASE, 0); 1062 1035 /* Note: vmlaunch will fail with 0 or just 0x02. No idea why. */ 1063 1036 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, 0x82 /* present, LDT */); … … 1067 1040 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_LDTR, pCtx->ldtr); 1068 1041 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtrHid.u32Limit); 1069 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_LDTR_BASE,pCtx->ldtrHid.u64Base);1042 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_LDTR_BASE, pCtx->ldtrHid.u64Base); 1070 1043 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, pCtx->ldtrHid.Attr.u); 1071 1044 } … … 1085 1058 AssertRC(rc); 1086 1059 1087 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, 0);1088 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, HWACCM_VTX_TSS_SIZE);1089 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_TR_BASE,GCPhys /* phys = virt in this mode */);1060 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, 0); 1061 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, HWACCM_VTX_TSS_SIZE); 1062 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE, GCPhys /* phys = virt in this mode */); 1090 1063 1091 1064 X86DESCATTR attr; … … 1099 1072 #endif /* HWACCM_VMX_EMULATE_REALMODE */ 1100 1073 { 1101 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, pCtx->tr);1102 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, pCtx->trHid.u32Limit);1103 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_TR_BASE,pCtx->trHid.u64Base);1074 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, pCtx->tr); 1075 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, pCtx->trHid.u32Limit); 1076 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE, pCtx->trHid.u64Base); 1104 1077 1105 1078 val = pCtx->trHid.Attr.u; … … 1119 1092 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR) 1120 1093 { 1121 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);1122 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_GDTR_BASE,pCtx->gdtr.pGdt);1094 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); 1095 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_GDTR_BASE, pCtx->gdtr.pGdt); 1123 1096 AssertRC(rc); 1124 1097 } … … 1126 1099 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR) 1127 1100 { 1128 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);1129 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_IDTR_BASE,pCtx->idtr.pIdt);1101 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); 1102 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_IDTR_BASE, pCtx->idtr.pIdt); 1130 1103 AssertRC(rc); 1131 1104 } … … 1134 1107 * Sysenter MSRs (unconditional) 1135 1108 */ 1136 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);1137 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);1138 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);1109 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs); 1110 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip); 1111 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp); 1139 1112 AssertRC(rc); 1140 1113 … … 1142 1115 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0) 1143 1116 { 1144 val = pCtx->cr0;1145 rc = VMXWriteVMCS (VMX_VMCS_CTRL_CR0_READ_SHADOW, val);1146 Log2(("Guest CR0-shadow % 08x\n", val));1117 val64 = pCtx->cr0; 1118 rc = VMXWriteVMCS64(VMX_VMCS64_CTRL_CR0_READ_SHADOW, val64); 1119 Log2(("Guest CR0-shadow %RX64\n", val64)); 1147 1120 if (CPUMIsGuestFPUStateActive(pVCpu) == false) 1148 1121 { 1149 1122 /* Always use #NM exceptions to load the FPU/XMM state on demand. */ 1150 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;1123 val64 |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP; 1151 1124 } 1152 1125 else 1153 1126 { 1154 1127 /** @todo check if we support the old style mess correctly. */ 1155 if (!(val & X86_CR0_NE))1128 if (!(val64 & X86_CR0_NE)) 1156 1129 Log(("Forcing X86_CR0_NE!!!\n")); 1157 1130 1158 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */1131 val64 |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */ 1159 1132 } 1160 1133 /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */ 1161 val |= X86_CR0_PE | X86_CR0_PG;1134 val64 |= X86_CR0_PE | X86_CR0_PG; 1162 1135 if (pVM->hwaccm.s.fNestedPaging) 1163 1136 { … … 1180 1153 { 1181 1154 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */ 1182 val |= X86_CR0_WP;1155 val64 |= X86_CR0_WP; 1183 1156 } 1184 1157 1185 1158 /* Always enable caching. */ 1186 val &= ~(X86_CR0_CD|X86_CR0_NW);1187 1188 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_CR0, val);1189 Log2(("Guest CR0 % 08x\n", val));1159 val64 &= ~(X86_CR0_CD|X86_CR0_NW); 1160 1161 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR0, val64); 1162 Log2(("Guest CR0 %RX64\n", val64)); 1190 1163 /* CR0 flags owned by the host; if the guests attempts to change them, then 1191 1164 * the VM will exit. 1192 1165 */ 1193 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */1194 | X86_CR0_WP /* Must monitor this bit (it must always be enabled). */1195 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */1196 | X86_CR0_TS1197 | X86_CR0_ET /* Bit not restored during VM-exit! */1198 | X86_CR0_CD /* Bit not restored during VM-exit! */1199 | X86_CR0_NW /* Bit not restored during VM-exit! */1200 | X86_CR0_NE1201 | X86_CR0_MP;1202 pVCpu->hwaccm.s.vmx.cr0_mask = val ;1203 1204 rc |= VMXWriteVMCS (VMX_VMCS_CTRL_CR0_MASK, val);1205 Log2(("Guest CR0-mask % 08x\n", val));1166 val64 = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */ 1167 | X86_CR0_WP /* Must monitor this bit (it must always be enabled). */ 1168 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */ 1169 | X86_CR0_TS 1170 | X86_CR0_ET /* Bit not restored during VM-exit! */ 1171 | X86_CR0_CD /* Bit not restored during VM-exit! */ 1172 | X86_CR0_NW /* Bit not restored during VM-exit! */ 1173 | X86_CR0_NE 1174 | X86_CR0_MP; 1175 pVCpu->hwaccm.s.vmx.cr0_mask = val64; 1176 1177 rc |= VMXWriteVMCS64(VMX_VMCS64_CTRL_CR0_MASK, val64); 1178 Log2(("Guest CR0-mask %RX64\n", val64)); 1206 1179 AssertRC(rc); 1207 1180 } … … 1209 1182 { 1210 1183 /* CR4 */ 1211 rc = VMXWriteVMCS (VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4);1212 Log2(("Guest CR4-shadow % 08x\n", pCtx->cr4));1184 rc = VMXWriteVMCS64(VMX_VMCS64_CTRL_CR4_READ_SHADOW, pCtx->cr4); 1185 Log2(("Guest CR4-shadow %RX64\n", pCtx->cr4)); 1213 1186 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */ 1214 val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;1187 val64 = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0; 1215 1188 1216 1189 if (!pVM->hwaccm.s.fNestedPaging) … … 1226 1199 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1227 1200 /** @todo use normal 32 bits paging */ 1228 val |= X86_CR4_PAE;1201 val64 |= X86_CR4_PAE; 1229 1202 break; 1230 1203 … … 1246 1219 { 1247 1220 /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */ 1248 val |= X86_CR4_PSE;1221 val64 |= X86_CR4_PSE; 1249 1222 /* Our identity mapping is a 32 bits page directory. */ 1250 val &= ~X86_CR4_PAE;1223 val64 &= ~X86_CR4_PAE; 1251 1224 } 1252 1225 … … 1254 1227 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ 1255 1228 if (CPUMIsGuestInRealModeEx(pCtx)) 1256 val |= X86_CR4_VME;1229 val64 |= X86_CR4_VME; 1257 1230 #endif /* HWACCM_VMX_EMULATE_REALMODE */ 1258 1231 1259 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_CR4, val);1260 Log2(("Guest CR4 %08x\n", val ));1232 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_CR4, val64); 1233 Log2(("Guest CR4 %08x\n", val64)); 1261 1234 /* CR4 flags owned by the host; if the guests attempts to change them, then 1262 1235 * the VM will exit. 1263 1236 */ 1264 val = 01237 val64 = 0 1265 1238 #ifdef HWACCM_VMX_EMULATE_REALMODE 1266 1239 | X86_CR4_VME … … 1270 1243 | X86_CR4_PSE 1271 1244 | X86_CR4_VMXE; 1272 pVCpu->hwaccm.s.vmx.cr4_mask = val ;1273 1274 rc |= VMXWriteVMCS (VMX_VMCS_CTRL_CR4_MASK, val);1275 Log2(("Guest CR4-mask % 08x\n", val));1245 pVCpu->hwaccm.s.vmx.cr4_mask = val64; 1246 1247 rc |= VMXWriteVMCS64(VMX_VMCS64_CTRL_CR4_MASK, val64); 1248 Log2(("Guest CR4-mask %RX64\n", val64)); 1276 1249 AssertRC(rc); 1277 1250 } … … 1289 1262 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 1290 1263 1291 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hwaccm.s.vmx.GCPhysEPTP); 1292 #if HC_ARCH_BITS == 32 1293 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_HIGH, (uint32_t)(pVCpu->hwaccm.s.vmx.GCPhysEPTP >> 32ULL)); 1294 #endif 1264 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hwaccm.s.vmx.GCPhysEPTP); 1295 1265 AssertRC(rc); 1296 1266 … … 1306 1276 * take care of the translation to host physical addresses. 1307 1277 */ 1308 val = GCPhys;1278 val64 = GCPhys; 1309 1279 } 1310 1280 else 1311 1281 { 1312 1282 /* Save the real guest CR3 in VMX_VMCS_GUEST_CR3 */ 1313 val = pCtx->cr3;1283 val64 = pCtx->cr3; 1314 1284 /* Prefetch the four PDPT entries in PAE mode. */ 1315 1285 vmxR0PrefetchPAEPdptrs(pVM, pCtx); … … 1318 1288 else 1319 1289 { 1320 val = PGMGetHyperCR3(pVM);1321 Assert(val );1290 val64 = PGMGetHyperCR3(pVM); 1291 Assert(val64); 1322 1292 } 1323 1293 1324 1294 /* Save our shadow CR3 register. */ 1325 rc = VMXWriteVMCS (VMX_VMCS_GUEST_CR3, val);1295 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_CR3, val64); 1326 1296 AssertRC(rc); 1327 1297 } … … 1338 1308 1339 1309 /* Resync DR7 */ 1340 rc = VMXWriteVMCS (VMX_VMCS_GUEST_DR7, pCtx->dr[7]);1310 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DR7, pCtx->dr[7]); 1341 1311 AssertRC(rc); 1342 1312 … … 1359 1329 1360 1330 /* IA32_DEBUGCTL MSR. */ 1361 rc = VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_FULL, 0); 1362 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_HIGH, 0); 1331 rc = VMXWriteVMCS64(VMX_VMCS_GUEST_DEBUGCTL_FULL, 0); 1363 1332 AssertRC(rc); 1364 1333 1365 1334 /** @todo do we really ever need this? */ 1366 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS,0);1335 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DEBUG_EXCEPTIONS, 0); 1367 1336 AssertRC(rc); 1368 1337 } 1369 1338 1370 1339 /* EIP, ESP and EFLAGS */ 1371 rc = VMXWriteVMCS (VMX_VMCS_GUEST_RIP, pCtx->rip);1372 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_RSP, pCtx->rsp);1340 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_RIP, pCtx->rip); 1341 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_RSP, pCtx->rsp); 1373 1342 AssertRC(rc); 1374 1343 1375 1344 /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */ 1376 eflags = pCtx->eflags;1377 eflags.u32&= VMX_EFLAGS_RESERVED_0;1378 eflags.u32|= VMX_EFLAGS_RESERVED_1;1345 rflags = pCtx->rflags; 1346 rflags.u64 &= VMX_EFLAGS_RESERVED_0; 1347 rflags.u64 |= VMX_EFLAGS_RESERVED_1; 1379 1348 1380 1349 #ifdef HWACCM_VMX_EMULATE_REALMODE … … 1382 1351 if (CPUMIsGuestInRealModeEx(pCtx)) 1383 1352 { 1384 pVCpu->hwaccm.s.vmx.RealMode. eflags = eflags;1385 1386 eflags.Bits.u1VM = 1;1387 eflags.Bits.u2IOPL = 3;1353 pVCpu->hwaccm.s.vmx.RealMode.rflags = rflags; 1354 1355 rflags.Bits.u1VM = 1; 1356 rflags.Bits.u2IOPL = 3; 1388 1357 } 1389 1358 #endif /* HWACCM_VMX_EMULATE_REALMODE */ 1390 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RFLAGS, eflags.u32);1359 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_RFLAGS, rflags.u64); 1391 1360 AssertRC(rc); 1392 1361 … … 1397 1366 { 1398 1367 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */ 1399 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset); 1400 #if HC_ARCH_BITS == 32 1401 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, (uint32_t)(u64TSCOffset >> 32ULL)); 1402 #endif 1368 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset); 1403 1369 AssertRC(rc); 1404 1370 … … 1442 1408 #endif 1443 1409 /* Unconditionally update these as wrmsr might have changed them. */ 1444 rc = VMXWriteVMCS (VMX_VMCS_GUEST_FS_BASE, pCtx->fsHid.u64Base);1445 AssertRC(rc); 1446 rc = VMXWriteVMCS (VMX_VMCS_GUEST_GS_BASE, pCtx->gsHid.u64Base);1410 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_FS_BASE, pCtx->fsHid.u64Base); 1411 AssertRC(rc); 1412 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_GS_BASE, pCtx->gsHid.u64Base); 1447 1413 AssertRC(rc); 1448 1414 } … … 1470 1436 DECLINLINE(int) VMXR0SaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1471 1437 { 1472 RTCCUINTREG val, valShadow; 1473 RTGCUINTPTR uInterruptState; 1438 uint64_t val64, u64Shadow; 1439 RTHCUINTPTR val; 1440 RTHCUINTPTR uInterruptState; 1474 1441 int rc; 1475 1442 1476 1443 /* Let's first sync back eip, esp, and eflags. */ 1477 rc = VMXReadVMCS (VMX_VMCS_GUEST_RIP, &val);1444 rc = VMXReadVMCS64(VMX_VMCS64_GUEST_RIP, &pCtx->rip); 1478 1445 AssertRC(rc); 1479 pCtx->rip = val; 1480 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val); 1446 rc = VMXReadVMCS64(VMX_VMCS64_GUEST_RSP, &pCtx->rsp); 1481 1447 AssertRC(rc); 1482 pCtx->rsp = val; 1483 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val); 1448 rc = VMXReadVMCS64(VMX_VMCS64_GUEST_RFLAGS, &pCtx->rflags.u64); 1484 1449 AssertRC(rc); 1485 pCtx->eflags.u32 = val;1486 1450 1487 1451 /* Take care of instruction fusing (sti, mov ss) */ 1488 rc |= VMXReadVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &val); 1489 uInterruptState = val; 1452 rc |= VMXReadVMCS(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uInterruptState); 1490 1453 if (uInterruptState != 0) 1491 1454 { … … 1498 1461 1499 1462 /* Control registers. */ 1500 VMXReadVMCS (VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);1501 VMXReadVMCS (VMX_VMCS_GUEST_CR0, &val);1502 val = (valShadow & pVCpu->hwaccm.s.vmx.cr0_mask) | (val& ~pVCpu->hwaccm.s.vmx.cr0_mask);1503 CPUMSetGuestCR0(pVM, val );1504 1505 VMXReadVMCS (VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);1506 VMXReadVMCS (VMX_VMCS_GUEST_CR4, &val);1507 val = (valShadow & pVCpu->hwaccm.s.vmx.cr4_mask) | (val& ~pVCpu->hwaccm.s.vmx.cr4_mask);1508 CPUMSetGuestCR4(pVM, val );1463 VMXReadVMCS64(VMX_VMCS64_CTRL_CR0_READ_SHADOW, &u64Shadow); 1464 VMXReadVMCS64(VMX_VMCS64_GUEST_CR0, &val64); 1465 val64 = (u64Shadow & pVCpu->hwaccm.s.vmx.cr0_mask) | (val64 & ~pVCpu->hwaccm.s.vmx.cr0_mask); 1466 CPUMSetGuestCR0(pVM, val64); 1467 1468 VMXReadVMCS64(VMX_VMCS64_CTRL_CR4_READ_SHADOW, &u64Shadow); 1469 VMXReadVMCS64(VMX_VMCS64_GUEST_CR4, &val64); 1470 val64 = (u64Shadow & pVCpu->hwaccm.s.vmx.cr4_mask) | (val64 & ~pVCpu->hwaccm.s.vmx.cr4_mask); 1471 CPUMSetGuestCR4(pVM, val64); 1509 1472 1510 1473 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */ … … 1516 1479 CPUMSetGuestCR2(pVM, ASMGetCR2()); 1517 1480 1518 VMXReadVMCS (VMX_VMCS_GUEST_CR3, &val);1519 1520 if (val != pCtx->cr3)1521 { 1522 CPUMSetGuestCR3(pVM, val );1523 PGMUpdateCR3(pVM, val );1481 VMXReadVMCS64(VMX_VMCS64_GUEST_CR3, &val64); 1482 1483 if (val64 != pCtx->cr3) 1484 { 1485 CPUMSetGuestCR3(pVM, val64); 1486 PGMUpdateCR3(pVM, val64); 1524 1487 } 1525 1488 /* Prefetch the four PDPT entries in PAE mode. */ … … 1528 1491 1529 1492 /* Sync back DR7 here. */ 1530 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val); 1531 pCtx->dr[7] = val; 1493 VMXReadVMCS64(VMX_VMCS64_GUEST_DR7, &pCtx->dr[7]); 1532 1494 1533 1495 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 1542 1504 * System MSRs 1543 1505 */ 1544 VMXReadVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, &val);1506 VMXReadVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, &val); 1545 1507 pCtx->SysEnter.cs = val; 1546 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val); 1547 pCtx->SysEnter.eip = val; 1548 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val); 1549 pCtx->SysEnter.esp = val; 1508 VMXReadVMCS64(VMX_VMCS64_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); 1509 VMXReadVMCS64(VMX_VMCS64_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); 1550 1510 1551 1511 /* Misc. registers; must sync everything otherwise we can get out of sync when jumping to ring 3. */ 1552 1512 VMX_READ_SELREG(LDTR, ldtr); 1553 1513 1554 VMXReadVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, &val);1514 VMXReadVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, &val); 1555 1515 pCtx->gdtr.cbGdt = val; 1556 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val); 1557 pCtx->gdtr.pGdt = val; 1558 1559 VMXReadVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, &val); 1516 VMXReadVMCS64(VMX_VMCS64_GUEST_GDTR_BASE, &pCtx->gdtr.pGdt); 1517 1518 VMXReadVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, &val); 1560 1519 pCtx->idtr.cbIdt = val; 1561 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val); 1562 pCtx->idtr.pIdt = val; 1520 VMXReadVMCS64(VMX_VMCS64_GUEST_IDTR_BASE, &pCtx->idtr.pIdt); 1563 1521 1564 1522 #ifdef HWACCM_VMX_EMULATE_REALMODE … … 1568 1526 /* Hide our emulation flags */ 1569 1527 pCtx->eflags.Bits.u1VM = 0; 1570 pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode. eflags.Bits.u2IOPL;1528 pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode.rflags.Bits.u2IOPL; 1571 1529 1572 1530 /* Force a TR resync every time in case we switch modes. */ … … 1725 1683 int rc = VINF_SUCCESS; 1726 1684 RTCCUINTREG val; 1685 uint64_t val64; 1727 1686 RTCCUINTREG exitReason, instrError, cbInstr; 1728 RTGCUINTPTRexitQualification;1687 uint64_t exitQualification; 1729 1688 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */ 1730 1689 RTGCUINTPTR errCode, instrInfo; … … 2005 1964 rc |= VMXReadVMCS(VMX_VMCS32_RO_EXIT_INSTR_INFO, &val); 2006 1965 instrInfo = val; 2007 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_QUALIFICATION, &val); 2008 exitQualification = val; 1966 rc |= VMXReadVMCS64(VMX_VMCS64_RO_EXIT_QUALIFICATION, &exitQualification); 2009 1967 AssertRC(rc); 2010 1968 … … 2256 2214 2257 2215 /* Resync DR7 */ 2258 rc = VMXWriteVMCS (VMX_VMCS_GUEST_DR7, pCtx->dr[7]);2216 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DR7, pCtx->dr[7]); 2259 2217 AssertRC(rc); 2260 2218 … … 2373 2331 Assert(pVM->hwaccm.s.fNestedPaging); 2374 2332 2375 #if HC_ARCH_BITS == 64 2376 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); 2377 AssertRC(rc); 2378 #else 2379 uint32_t val_hi; 2380 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &val); 2381 AssertRC(rc); 2382 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_HIGH, &val_hi); 2383 AssertRC(rc); 2384 GCPhys = RT_MAKE_U64(val, val_hi); 2385 #endif 2333 rc = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); 2334 AssertRC(rc); 2386 2335 2387 2336 Assert(((exitQualification >> 7) & 3) != 2); … … 2435 2384 Assert(pVM->hwaccm.s.fNestedPaging); 2436 2385 2437 #if HC_ARCH_BITS == 64 2438 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); 2439 AssertRC(rc); 2440 #else 2441 uint32_t val_hi; 2442 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &val); 2443 AssertRC(rc); 2444 rc = VMXReadVMCS(VMX_VMCS_EXIT_PHYS_ADDR_HIGH, &val_hi); 2445 AssertRC(rc); 2446 GCPhys = RT_MAKE_U64(val, val_hi); 2447 #endif 2386 rc = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); 2387 AssertRC(rc); 2448 2388 2449 2389 Log(("VMX_EXIT_EPT_MISCONFIG for %VGp\n", GCPhys)); … … 2803 2743 2804 2744 /* Resync DR7 */ 2805 rc = VMXWriteVMCS (VMX_VMCS_GUEST_DR7, pCtx->dr[7]);2745 rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DR7, pCtx->dr[7]); 2806 2746 AssertRC(rc); 2807 2747 … … 2951 2891 Log(("VMX_EXIT_ERR_INVALID_GUEST_STATE\n")); 2952 2892 2953 VMXReadVMCS (VMX_VMCS_GUEST_RIP, &val);2954 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val ));2955 2956 VMXReadVMCS (VMX_VMCS_GUEST_CR0, &val);2957 Log(("VMX_VMCS _GUEST_CR0 %RX64\n", val));2958 2959 VMXReadVMCS (VMX_VMCS_GUEST_CR3, &val);2960 Log(("VMX_VMCS _GUEST_CR3 %RGp\n", val));2961 2962 VMXReadVMCS (VMX_VMCS_GUEST_CR4, &val);2963 Log(("VMX_VMCS _GUEST_CR4 %RX64\n", val));2964 2965 VMXReadVMCS (VMX_VMCS_GUEST_RFLAGS, &val);2966 Log(("VMX_VMCS _GUEST_RFLAGS %08x\n", val));2893 VMXReadVMCS64(VMX_VMCS64_GUEST_RIP, &val64); 2894 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val64)); 2895 2896 VMXReadVMCS64(VMX_VMCS64_GUEST_CR0, &val64); 2897 Log(("VMX_VMCS64_GUEST_CR0 %RX64\n", val64)); 2898 2899 VMXReadVMCS64(VMX_VMCS64_GUEST_CR3, &val64); 2900 Log(("VMX_VMCS64_GUEST_CR3 %RGp\n", val64)); 2901 2902 VMXReadVMCS64(VMX_VMCS64_GUEST_CR4, &val64); 2903 Log(("VMX_VMCS64_GUEST_CR4 %RX64\n", val64)); 2904 2905 VMXReadVMCS64(VMX_VMCS64_GUEST_RFLAGS, &val64); 2906 Log(("VMX_VMCS64_GUEST_RFLAGS %08x\n", val64)); 2967 2907 2968 2908 VMX_LOG_SELREG(CS, "CS"); … … 2975 2915 VMX_LOG_SELREG(LDTR, "LDTR"); 2976 2916 2977 VMXReadVMCS (VMX_VMCS_GUEST_GDTR_BASE, &val);2978 Log(("VMX_VMCS _GUEST_GDTR_BASE %RGv\n", val));2979 VMXReadVMCS (VMX_VMCS_GUEST_IDTR_BASE, &val);2980 Log(("VMX_VMCS _GUEST_IDTR_BASE %RGv\n", val));2917 VMXReadVMCS64(VMX_VMCS64_GUEST_GDTR_BASE, &val64); 2918 Log(("VMX_VMCS64_GUEST_GDTR_BASE %RGv\n", (RTGCPTR)val64)); 2919 VMXReadVMCS64(VMX_VMCS64_GUEST_IDTR_BASE, &val64); 2920 Log(("VMX_VMCS64_GUEST_IDTR_BASE %RGv\n", (RTGCPTR)val64)); 2981 2921 #endif /* VBOX_STRICT */ 2982 2922 rc = VERR_VMX_INVALID_GUEST_STATE; … … 3213 3153 { 3214 3154 int rc; 3215 RTCCUINTREG exitReason, instrError , val;3155 RTCCUINTREG exitReason, instrError; 3216 3156 3217 3157 rc = VMXReadVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason); … … 3227 3167 3228 3168 #ifdef VBOX_STRICT 3229 RTGDTR gdtr; 3230 PX86DESCHC pDesc; 3169 RTGDTR gdtr; 3170 PX86DESCHC pDesc; 3171 uint64_t val64; 3172 RTCCUINTREG val; 3231 3173 3232 3174 ASMGetGDTR(&gdtr); 3233 3175 3234 VMXReadVMCS (VMX_VMCS_GUEST_RIP, &val);3235 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val ));3176 VMXReadVMCS64(VMX_VMCS64_GUEST_RIP, &val64); 3177 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val64)); 3236 3178 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val); 3237 3179 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val)); … … 3255 3197 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val)); 3256 3198 3257 VMXReadVMCS (VMX_VMCS_GUEST_RFLAGS, &val);3258 Log(("VMX_VMCS_GUEST_RFLAGS % 08x\n", val));3199 VMXReadVMCS64(VMX_VMCS64_GUEST_RFLAGS, &val64); 3200 Log(("VMX_VMCS_GUEST_RFLAGS %RX64\n", val64)); 3259 3201 3260 3202 if (val < gdtr.cbGdt) -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r14581 r14648 142 142 #define VMX_WRITE_SELREG(REG, reg) \ 143 143 { \ 144 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_##REG, pCtx->reg); 145 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, pCtx->reg##Hid.u32Limit); 146 rc |= VMXWriteVMCS (VMX_VMCS_GUEST_##REG##_BASE, pCtx->reg##Hid.u64Base);\144 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_##REG, pCtx->reg); \ 145 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, pCtx->reg##Hid.u32Limit); \ 146 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, pCtx->reg##Hid.u64Base); \ 147 147 if ((pCtx->eflags.u32 & X86_EFL_VM)) \ 148 148 val = pCtx->reg##Hid.Attr.u; \ … … 161 161 val = 0x10000; /* Invalid guest state error otherwise. (BIT(16) = Unusable) */ \ 162 162 \ 163 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, val); 163 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, val); \ 164 164 } 165 165 166 166 #define VMX_READ_SELREG(REG, reg) \ 167 { \168 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val);\169 pCtx->reg = val; \170 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val);\171 pCtx->reg##Hid.u32Limit = val; \172 VMXReadVMCS (VMX_VMCS_GUEST_##REG##_BASE, &val);\173 pCtx->reg##Hid.u64Base = val ; \174 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \175 pCtx->reg##Hid.Attr.u = val; \167 { \ 168 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val); \ 169 pCtx->reg = val; \ 170 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val); \ 171 pCtx->reg##Hid.u32Limit = val; \ 172 VMXReadVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, &val64); \ 173 pCtx->reg##Hid.u64Base = val64; \ 174 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \ 175 pCtx->reg##Hid.Attr.u = val; \ 176 176 } 177 177 178 178 #define VMX_LOG_SELREG(REG, szSelReg) \ 179 { \180 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val);\181 Log(("%s Selector %x\n", szSelReg, val)); \182 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val);\183 Log(("%s Limit %x\n", szSelReg, val)); \184 VMXReadVMCS (VMX_VMCS_GUEST_##REG##_BASE, &val);\185 Log(("%s Base %RX64\n", szSelReg, val )); \186 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \187 Log(("%s Attributes %x\n", szSelReg, val)); \179 { \ 180 VMXReadVMCS(VMX_VMCS16_GUEST_FIELD_##REG, &val); \ 181 Log(("%s Selector %x\n", szSelReg, val)); \ 182 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_LIMIT, &val); \ 183 Log(("%s Limit %x\n", szSelReg, val)); \ 184 VMXReadVMCS64(VMX_VMCS64_GUEST_##REG##_BASE, &val64); \ 185 Log(("%s Base %RX64\n", szSelReg, val64)); \ 186 VMXReadVMCS(VMX_VMCS32_GUEST_##REG##_ACCESS_RIGHTS, &val); \ 187 Log(("%s Attributes %x\n", szSelReg, val)); \ 188 188 } 189 189
Note:
See TracChangeset
for help on using the changeset viewer.