Changeset 78220 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Apr 20, 2019 4:08:44 AM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 130157
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r76993 r78220 326 326 }; 327 327 328 /** Saved state field descriptors for VMX nested hardware-virtualization 329 * VMCS. */ 330 static const SSMFIELD g_aVmxHwvirtVmcs[] = 331 { 332 SSMFIELD_ENTRY( VMXVVMCS, u32VmcsRevId), 333 SSMFIELD_ENTRY( VMXVVMCS, enmVmxAbort), 334 SSMFIELD_ENTRY( VMXVVMCS, fVmcsState), 335 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au8Padding0), 336 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved0), 337 338 SSMFIELD_ENTRY( VMXVVMCS, u16Vpid), 339 SSMFIELD_ENTRY( VMXVVMCS, u16PostIntNotifyVector), 340 SSMFIELD_ENTRY( VMXVVMCS, u16EptpIndex), 341 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved0), 342 343 SSMFIELD_ENTRY( VMXVVMCS, GuestEs), 344 SSMFIELD_ENTRY( VMXVVMCS, GuestCs), 345 SSMFIELD_ENTRY( VMXVVMCS, GuestSs), 346 SSMFIELD_ENTRY( VMXVVMCS, GuestDs), 347 SSMFIELD_ENTRY( VMXVVMCS, GuestFs), 348 SSMFIELD_ENTRY( VMXVVMCS, GuestGs), 349 SSMFIELD_ENTRY( VMXVVMCS, GuestLdtr), 350 SSMFIELD_ENTRY( VMXVVMCS, GuestTr), 351 SSMFIELD_ENTRY( VMXVVMCS, u16GuestIntStatus), 352 SSMFIELD_ENTRY( VMXVVMCS, u16PmlIndex), 353 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved1[8]), 354 355 SSMFIELD_ENTRY( VMXVVMCS, HostEs), 356 SSMFIELD_ENTRY( VMXVVMCS, HostCs), 357 SSMFIELD_ENTRY( VMXVVMCS, HostSs), 358 SSMFIELD_ENTRY( VMXVVMCS, HostDs), 359 SSMFIELD_ENTRY( VMXVVMCS, HostFs), 360 SSMFIELD_ENTRY( VMXVVMCS, HostGs), 361 SSMFIELD_ENTRY( VMXVVMCS, HostTr), 362 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au16Reserved2), 363 364 SSMFIELD_ENTRY( VMXVVMCS, u32PinCtls), 365 SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls), 366 SSMFIELD_ENTRY( VMXVVMCS, u32XcptBitmap), 367 SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMask), 368 SSMFIELD_ENTRY( VMXVVMCS, u32XcptPFMatch), 369 SSMFIELD_ENTRY( VMXVVMCS, u32Cr3TargetCount), 370 SSMFIELD_ENTRY( VMXVVMCS, u32ExitCtls), 371 SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrStoreCount), 372 SSMFIELD_ENTRY( VMXVVMCS, u32ExitMsrLoadCount), 373 SSMFIELD_ENTRY( VMXVVMCS, u32EntryCtls), 374 SSMFIELD_ENTRY( VMXVVMCS, u32EntryMsrLoadCount), 375 SSMFIELD_ENTRY( VMXVVMCS, u32EntryIntInfo), 376 SSMFIELD_ENTRY( VMXVVMCS, u32EntryXcptErrCode), 377 SSMFIELD_ENTRY( VMXVVMCS, u32EntryInstrLen), 378 SSMFIELD_ENTRY( VMXVVMCS, u32TprThreshold), 379 SSMFIELD_ENTRY( VMXVVMCS, u32ProcCtls2), 380 SSMFIELD_ENTRY( VMXVVMCS, u32PleGap), 381 SSMFIELD_ENTRY( VMXVVMCS, u32PleWindow), 382 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved1), 383 384 SSMFIELD_ENTRY( VMXVVMCS, u32RoVmInstrError), 385 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitReason), 386 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntInfo), 387 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitIntErrCode), 388 SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringInfo), 389 SSMFIELD_ENTRY( VMXVVMCS, u32RoIdtVectoringErrCode), 390 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrLen), 391 SSMFIELD_ENTRY( VMXVVMCS, u32RoExitInstrInfo), 392 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32RoReserved2), 393 394 SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsLimit), 395 SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsLimit), 396 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsLimit), 397 SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsLimit), 398 SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsLimit), 399 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsLimit), 400 SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrLimit), 401 SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrLimit), 402 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGdtrLimit), 403 SSMFIELD_ENTRY( VMXVVMCS, u32GuestIdtrLimit), 404 SSMFIELD_ENTRY( VMXVVMCS, u32GuestEsAttr), 405 SSMFIELD_ENTRY( VMXVVMCS, u32GuestCsAttr), 406 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSsAttr), 407 SSMFIELD_ENTRY( VMXVVMCS, u32GuestDsAttr), 408 SSMFIELD_ENTRY( VMXVVMCS, u32GuestFsAttr), 409 SSMFIELD_ENTRY( VMXVVMCS, u32GuestGsAttr), 410 SSMFIELD_ENTRY( VMXVVMCS, u32GuestLdtrAttr), 411 SSMFIELD_ENTRY( VMXVVMCS, u32GuestTrAttr), 412 SSMFIELD_ENTRY( VMXVVMCS, u32GuestIntrState), 413 SSMFIELD_ENTRY( VMXVVMCS, u32GuestActivityState), 414 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSmBase), 415 SSMFIELD_ENTRY( VMXVVMCS, u32GuestSysenterCS), 416 SSMFIELD_ENTRY( VMXVVMCS, u32PreemptTimer), 417 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved3), 418 419 SSMFIELD_ENTRY( VMXVVMCS, u32HostSysenterCs), 420 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au32Reserved4), 421 422 SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapA), 423 SSMFIELD_ENTRY( VMXVVMCS, u64AddrIoBitmapB), 424 SSMFIELD_ENTRY( VMXVVMCS, u64AddrMsrBitmap), 425 SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrStore), 426 SSMFIELD_ENTRY( VMXVVMCS, u64AddrExitMsrLoad), 427 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEntryMsrLoad), 428 SSMFIELD_ENTRY( VMXVVMCS, u64ExecVmcsPtr), 429 SSMFIELD_ENTRY( VMXVVMCS, u64AddrPml), 430 SSMFIELD_ENTRY( VMXVVMCS, u64TscOffset), 431 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVirtApic), 432 SSMFIELD_ENTRY( VMXVVMCS, u64AddrApicAccess), 433 SSMFIELD_ENTRY( VMXVVMCS, u64AddrPostedIntDesc), 434 SSMFIELD_ENTRY( VMXVVMCS, u64VmFuncCtls), 435 SSMFIELD_ENTRY( VMXVVMCS, u64EptpPtr), 436 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap0), 437 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap1), 438 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap2), 439 SSMFIELD_ENTRY( VMXVVMCS, u64EoiExitBitmap3), 440 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEptpList), 441 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmreadBitmap), 442 SSMFIELD_ENTRY( VMXVVMCS, u64AddrVmwriteBitmap), 443 SSMFIELD_ENTRY( VMXVVMCS, u64AddrXcptVeInfo), 444 SSMFIELD_ENTRY( VMXVVMCS, u64XssBitmap), 445 SSMFIELD_ENTRY( VMXVVMCS, u64AddrEnclsBitmap), 446 SSMFIELD_ENTRY( VMXVVMCS, u64TscMultiplier), 447 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved0), 448 449 SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestPhysAddr), 450 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved1), 451 452 SSMFIELD_ENTRY( VMXVVMCS, u64VmcsLinkPtr), 453 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDebugCtlMsr), 454 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPatMsr), 455 SSMFIELD_ENTRY( VMXVVMCS, u64GuestEferMsr), 456 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPerfGlobalCtlMsr), 457 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte0), 458 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte1), 459 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte2), 460 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPdpte3), 461 SSMFIELD_ENTRY( VMXVVMCS, u64GuestBndcfgsMsr), 462 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved2), 463 464 SSMFIELD_ENTRY( VMXVVMCS, u64HostPatMsr), 465 SSMFIELD_ENTRY( VMXVVMCS, u64HostEferMsr), 466 SSMFIELD_ENTRY( VMXVVMCS, u64HostPerfGlobalCtlMsr), 467 SSMFIELD_ENTRY_IGNORE(VMXVVMCS, au64Reserved3), 468 469 SSMFIELD_ENTRY( VMXVVMCS, u64Cr0Mask), 470 SSMFIELD_ENTRY( VMXVVMCS, u64Cr4Mask), 471 SSMFIELD_ENTRY( VMXVVMCS, u64Cr0ReadShadow), 472 SSMFIELD_ENTRY( VMXVVMCS, u64Cr4ReadShadow), 473 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target0), 474 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target1), 475 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target2), 476 SSMFIELD_ENTRY( VMXVVMCS, u64Cr3Target3), 477 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved4), 478 479 SSMFIELD_ENTRY( VMXVVMCS, u64RoExitQual), 480 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRcx), 481 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRsi), 482 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRdi), 483 SSMFIELD_ENTRY( VMXVVMCS, u64RoIoRip), 484 SSMFIELD_ENTRY( VMXVVMCS, u64RoGuestLinearAddr), 485 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved5), 486 487 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr0), 488 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr3), 489 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCr4), 490 SSMFIELD_ENTRY( VMXVVMCS, u64GuestEsBase), 491 SSMFIELD_ENTRY( VMXVVMCS, u64GuestCsBase), 492 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSsBase), 493 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDsBase), 494 SSMFIELD_ENTRY( VMXVVMCS, u64GuestFsBase), 495 SSMFIELD_ENTRY( VMXVVMCS, u64GuestGsBase), 496 SSMFIELD_ENTRY( VMXVVMCS, u64GuestLdtrBase), 497 SSMFIELD_ENTRY( VMXVVMCS, u64GuestTrBase), 498 SSMFIELD_ENTRY( VMXVVMCS, u64GuestGdtrBase), 499 SSMFIELD_ENTRY( VMXVVMCS, u64GuestIdtrBase), 500 SSMFIELD_ENTRY( VMXVVMCS, u64GuestDr7), 501 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRsp), 502 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRip), 503 SSMFIELD_ENTRY( VMXVVMCS, u64GuestRFlags), 504 SSMFIELD_ENTRY( VMXVVMCS, u64GuestPendingDbgXcpt), 505 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEsp), 506 SSMFIELD_ENTRY( VMXVVMCS, u64GuestSysenterEip), 507 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved6), 508 509 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr0), 510 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr3), 511 SSMFIELD_ENTRY( VMXVVMCS, u64HostCr4), 512 SSMFIELD_ENTRY( VMXVVMCS, u64HostFsBase), 513 SSMFIELD_ENTRY( VMXVVMCS, u64HostGsBase), 514 SSMFIELD_ENTRY( VMXVVMCS, u64HostTrBase), 515 SSMFIELD_ENTRY( VMXVVMCS, u64HostGdtrBase), 516 SSMFIELD_ENTRY( VMXVVMCS, u64HostIdtrBase), 517 SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEsp), 518 SSMFIELD_ENTRY( VMXVVMCS, u64HostSysenterEip), 519 SSMFIELD_ENTRY( VMXVVMCS, u64HostRsp), 520 SSMFIELD_ENTRY( VMXVVMCS, u64HostRip), 521 SSMFIELD_ENTRY( VMXVVMCS, au64Reserved7), 522 SSMFIELD_ENTRY_TERM() 523 }; 524 328 525 /** Saved state field descriptors for CPUMCTX. */ 329 526 static const SSMFIELD g_aCpumX87Fields[] = … … 928 1125 for (VMCPUID i = 0; i < pVM->cCpus; i++) 929 1126 { 930 PVMCPU pVCpu = &pVM->aCpus[i]; 931 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3) 932 { 933 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES); 934 pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3 = NULL; 935 } 936 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3) 937 { 938 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES); 939 pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3 = NULL; 940 } 941 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3) 942 { 943 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3, VMX_V_VIRT_APIC_PAGES); 944 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3 = NULL; 945 } 946 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3) 947 { 948 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 949 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3 = NULL; 950 } 951 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3) 952 { 953 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 954 pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3 = NULL; 955 } 956 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3) 957 { 958 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 959 pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3 = NULL; 960 } 961 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3) 962 { 963 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES); 964 pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3 = NULL; 965 } 966 if (pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3) 967 { 968 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES); 969 pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3 = NULL; 1127 PVMCPU pVCpu = &pVM->aCpus[i]; 1128 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1129 1130 if (pCtx->hwvirt.vmx.pVmcsR3) 1131 { 1132 SUPR3ContFree(pCtx->hwvirt.vmx.pVmcsR3, VMX_V_VMCS_PAGES); 1133 pCtx->hwvirt.vmx.pVmcsR3 = NULL; 1134 } 1135 if (pCtx->hwvirt.vmx.pShadowVmcsR3) 1136 { 1137 SUPR3ContFree(pCtx->hwvirt.vmx.pShadowVmcsR3, VMX_V_VMCS_PAGES); 1138 pCtx->hwvirt.vmx.pShadowVmcsR3 = NULL; 1139 } 1140 if (pCtx->hwvirt.vmx.pvVmreadBitmapR3) 1141 { 1142 SUPR3ContFree(pCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 1143 pCtx->hwvirt.vmx.pvVmreadBitmapR3 = NULL; 1144 } 1145 if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3) 1146 { 1147 SUPR3ContFree(pCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_PAGES); 1148 pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = NULL; 1149 } 1150 if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3) 1151 { 1152 SUPR3ContFree(pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1153 pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = NULL; 1154 } 1155 if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3) 1156 { 1157 SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1158 pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = NULL; 1159 } 1160 if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3) 1161 { 1162 SUPR3ContFree(pCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_PAGES); 1163 pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = NULL; 1164 } 1165 if (pCtx->hwvirt.vmx.pvMsrBitmapR3) 1166 { 1167 SUPR3ContFree(pCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_PAGES); 1168 pCtx->hwvirt.vmx.pvMsrBitmapR3 = NULL; 1169 } 1170 if (pCtx->hwvirt.vmx.pvIoBitmapR3) 1171 { 1172 SUPR3ContFree(pCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES); 1173 pCtx->hwvirt.vmx.pvIoBitmapR3 = NULL; 970 1174 } 971 1175 } … … 982 1186 { 983 1187 int rc = VINF_SUCCESS; 984 LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n", 985 pVM->cCpus * ( VMX_V_VMCS_PAGES + VMX_V_VIRT_APIC_PAGES + VMX_V_VMREAD_VMWRITE_BITMAP_PAGES * 2 986 + VMX_V_AUTOMSR_AREA_PAGES))); 1188 uint32_t const cPages = (2 * VMX_V_VMCS_PAGES) 1189 + VMX_V_VIRT_APIC_PAGES 1190 + (2 * VMX_V_VMREAD_VMWRITE_BITMAP_SIZE) 1191 + (3 * VMX_V_AUTOMSR_AREA_SIZE) 1192 + VMX_V_MSR_BITMAP_SIZE 1193 + (VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 1194 LogRel(("CPUM: Allocating %u pages for the nested-guest VMCS and related structures\n", pVM->cCpus * cPages)); 987 1195 for (VMCPUID i = 0; i < pVM->cCpus; i++) 988 1196 { 989 PVMCPU pVCpu = &pVM->aCpus[i]; 990 pVCpu->cpum.s.Guest.hwvirt.enmHwvirt = CPUMHWVIRT_VMX; 1197 PVMCPU pVCpu = &pVM->aCpus[i]; 1198 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 1199 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_VMX; 991 1200 992 1201 /* … … 994 1203 */ 995 1204 Assert(VMX_V_VMCS_PAGES == 1); 996 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3); 997 rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3, 998 &pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR0, NULL /* paPages */); 999 if (RT_FAILURE(rc)) 1000 { 1001 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pVmcsR3); 1205 pCtx->hwvirt.vmx.pVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, 1206 &pCtx->hwvirt.vmx.pVmcsR0, 1207 &pCtx->hwvirt.vmx.HCPhysVmcs); 1208 if (pCtx->hwvirt.vmx.pVmcsR3) 1209 { /* likely */ } 1210 else 1211 { 1002 1212 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES)); 1003 1213 break; … … 1008 1218 */ 1009 1219 Assert(VMX_V_VMCS_PAGES == 1); 1010 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3); 1011 rc = SUPR3PageAllocEx(VMX_V_VMCS_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3, 1012 &pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR0, NULL /* paPages */); 1013 if (RT_FAILURE(rc)) 1014 { 1015 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pShadowVmcsR3); 1220 pCtx->hwvirt.vmx.pShadowVmcsR3 = (PVMXVVMCS)SUPR3ContAlloc(VMX_V_VMCS_PAGES, 1221 &pCtx->hwvirt.vmx.pShadowVmcsR0, 1222 &pCtx->hwvirt.vmx.HCPhysShadowVmcs); 1223 if (pCtx->hwvirt.vmx.pShadowVmcsR3) 1224 { /* likely */ } 1225 else 1226 { 1016 1227 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's shadow VMCS\n", pVCpu->idCpu, VMX_V_VMCS_PAGES)); 1017 break;1018 }1019 1020 /*1021 * Allocate the Virtual-APIC page.1022 */1023 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);1024 rc = SUPR3PageAllocEx(VMX_V_VIRT_APIC_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3,1025 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR0, NULL /* paPages */);1026 if (RT_FAILURE(rc))1027 {1028 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVirtApicPageR3);1029 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's Virtual-APIC page\n", pVCpu->idCpu,1030 VMX_V_VIRT_APIC_PAGES));1031 1228 break; 1032 1229 } … … 1035 1232 * Allocate the VMREAD-bitmap. 1036 1233 */ 1037 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3); 1038 rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3, 1039 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR0, NULL /* paPages */); 1040 if (RT_FAILURE(rc)) 1041 { 1042 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmreadBitmapR3); 1234 pCtx->hwvirt.vmx.pvVmreadBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 1235 &pCtx->hwvirt.vmx.pvVmreadBitmapR0, 1236 &pCtx->hwvirt.vmx.HCPhysVmreadBitmap); 1237 if (pCtx->hwvirt.vmx.pvVmreadBitmapR3) 1238 { /* likely */ } 1239 else 1240 { 1043 1241 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMREAD-bitmap\n", pVCpu->idCpu, 1044 1242 VMX_V_VMREAD_VMWRITE_BITMAP_PAGES)); … … 1049 1247 * Allocatge the VMWRITE-bitmap. 1050 1248 */ 1051 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);1052 rc = SUPR3PageAllocEx(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 0 /* fFlags */,1053 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3,1054 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR0, NULL /* paPages */);1055 if (RT_FAILURE(rc))1056 {1057 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvVmwriteBitmapR3);1249 pCtx->hwvirt.vmx.pvVmwriteBitmapR3 = SUPR3ContAlloc(VMX_V_VMREAD_VMWRITE_BITMAP_PAGES, 1250 &pCtx->hwvirt.vmx.pvVmwriteBitmapR0, 1251 &pCtx->hwvirt.vmx.HCPhysVmwriteBitmap); 1252 if (pCtx->hwvirt.vmx.pvVmwriteBitmapR3) 1253 { /* likely */ } 1254 else 1255 { 1058 1256 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VMWRITE-bitmap\n", pVCpu->idCpu, 1059 1257 VMX_V_VMREAD_VMWRITE_BITMAP_PAGES)); … … 1062 1260 1063 1261 /* 1064 * Allocate the MSR auto-load/storearea.1262 * Allocate the VM-entry MSR-load area. 1065 1263 */ 1066 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1067 rc = SUPR3PageAllocEx(VMX_V_AUTOMSR_AREA_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3, 1068 &pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR0, NULL /* paPages */); 1069 if (RT_FAILURE(rc)) 1070 { 1071 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pAutoMsrAreaR3); 1072 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's auto-load/store MSR area\n", pVCpu->idCpu, 1264 pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1265 &pCtx->hwvirt.vmx.pEntryMsrLoadAreaR0, 1266 &pCtx->hwvirt.vmx.HCPhysEntryMsrLoadArea); 1267 if (pCtx->hwvirt.vmx.pEntryMsrLoadAreaR3) 1268 { /* likely */ } 1269 else 1270 { 1271 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-entry MSR-load area\n", pVCpu->idCpu, 1272 VMX_V_AUTOMSR_AREA_PAGES)); 1273 break; 1274 } 1275 1276 /* 1277 * Allocate the VM-exit MSR-store area. 1278 */ 1279 pCtx->hwvirt.vmx.pExitMsrStoreAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1280 &pCtx->hwvirt.vmx.pExitMsrStoreAreaR0, 1281 &pCtx->hwvirt.vmx.HCPhysExitMsrStoreArea); 1282 if (pCtx->hwvirt.vmx.pExitMsrStoreAreaR3) 1283 { /* likely */ } 1284 else 1285 { 1286 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-store area\n", pVCpu->idCpu, 1287 VMX_V_AUTOMSR_AREA_PAGES)); 1288 break; 1289 } 1290 1291 /* 1292 * Allocate the VM-exit MSR-load area. 1293 */ 1294 pCtx->hwvirt.vmx.pExitMsrLoadAreaR3 = (PVMXAUTOMSR)SUPR3ContAlloc(VMX_V_AUTOMSR_AREA_PAGES, 1295 &pCtx->hwvirt.vmx.pExitMsrLoadAreaR0, 1296 &pCtx->hwvirt.vmx.HCPhysExitMsrLoadArea); 1297 if (pCtx->hwvirt.vmx.pExitMsrLoadAreaR3) 1298 { /* likely */ } 1299 else 1300 { 1301 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's VM-exit MSR-load area\n", pVCpu->idCpu, 1073 1302 VMX_V_AUTOMSR_AREA_PAGES)); 1074 1303 break; … … 1078 1307 * Allocate the MSR bitmap. 1079 1308 */ 1080 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3); 1081 rc = SUPR3PageAllocEx(VMX_V_MSR_BITMAP_PAGES, 0 /* fFlags */, (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3, 1082 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR0, NULL /* paPages */); 1083 if (RT_FAILURE(rc)) 1084 { 1085 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvMsrBitmapR3); 1309 pCtx->hwvirt.vmx.pvMsrBitmapR3 = SUPR3ContAlloc(VMX_V_MSR_BITMAP_PAGES, 1310 &pCtx->hwvirt.vmx.pvMsrBitmapR0, 1311 &pCtx->hwvirt.vmx.HCPhysMsrBitmap); 1312 if (pCtx->hwvirt.vmx.pvMsrBitmapR3) 1313 { /* likely */ } 1314 else 1315 { 1086 1316 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's MSR bitmap\n", pVCpu->idCpu, 1087 1317 VMX_V_MSR_BITMAP_PAGES)); … … 1092 1322 * Allocate the I/O bitmaps (A and B). 1093 1323 */ 1094 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);1095 rc = SUPR3PageAllocEx(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES, 0 /* fFlags */,1096 (void **)&pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3,1097 &pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR0, NULL /* paPages */);1098 if (RT_FAILURE(rc))1099 {1100 Assert(!pVCpu->cpum.s.Guest.hwvirt.vmx.pvIoBitmapR3);1324 pCtx->hwvirt.vmx.pvIoBitmapR3 = SUPR3ContAlloc(VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES, 1325 &pCtx->hwvirt.vmx.pvIoBitmapR0, 1326 &pCtx->hwvirt.vmx.HCPhysIoBitmap); 1327 if (pCtx->hwvirt.vmx.pvIoBitmapR3) 1328 { /* likely */ } 1329 else 1330 { 1101 1331 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's I/O bitmaps\n", pVCpu->idCpu, 1102 1332 VMX_V_IO_BITMAP_A_PAGES + VMX_V_IO_BITMAP_B_PAGES)); 1103 1333 break; 1104 1334 } 1335 1336 /* 1337 * Zero out all allocated pages (should compress well for saved-state). 1338 */ 1339 memset(pCtx->hwvirt.vmx.CTX_SUFF(pVmcs), 0, VMX_V_VMCS_SIZE); 1340 memset(pCtx->hwvirt.vmx.CTX_SUFF(pShadowVmcs), 0, VMX_V_VMCS_SIZE); 1341 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmreadBitmap), 0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 1342 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvVmwriteBitmap), 0, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 1343 memset(pCtx->hwvirt.vmx.CTX_SUFF(pEntryMsrLoadArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1344 memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrStoreArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1345 memset(pCtx->hwvirt.vmx.CTX_SUFF(pExitMsrLoadArea), 0, VMX_V_AUTOMSR_AREA_SIZE); 1346 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvMsrBitmap), 0, VMX_V_MSR_BITMAP_SIZE); 1347 memset(pCtx->hwvirt.vmx.CTX_SUFF(pvIoBitmap), 0, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 1105 1348 } 1106 1349 … … 1454 1697 1455 1698 1456 #if 01457 1699 /** 1458 1700 * Checks whether the given guest CPU VMX features are compatible with the provided … … 1468 1710 static bool cpumR3AreVmxCpuFeaturesCompatible(PVM pVM, PCCPUMFEATURES pBase, PCCPUMFEATURES pGst) 1469 1711 { 1470 if (cpumR3IsHwAssist VmxNstGstExecAllowed(pVM))1712 if (cpumR3IsHwAssistNstGstExecAllowed(pVM)) 1471 1713 { 1472 1714 uint64_t const fBase = ((uint64_t)pBase->fVmxInsOutInfo << 0) | ((uint64_t)pBase->fVmxExtIntExit << 1) … … 1537 1779 1538 1780 if ((fBase | fGst) != fBase) 1781 { 1782 LogRel(("CPUM: Host VMX features are incompatible with those from the saved state. fBase=%#RX64 fGst=%#RX64\n", 1783 fBase, fGst)); 1539 1784 return false; 1785 } 1540 1786 return true; 1541 1787 } 1542 1788 return true; 1543 1789 } 1544 #endif1545 1790 1546 1791 … … 2336 2581 SSMR3PutBool(pSSM, pGstCtx->hwvirt.fGif); 2337 2582 } 2583 if (pVM->cpum.s.GuestFeatures.fVmx) 2584 { 2585 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs)); 2586 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmxon); 2587 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysVmcs); 2588 SSMR3PutGCPhys(pSSM, pGstCtx->hwvirt.vmx.GCPhysShadowVmcs); 2589 SSMR3PutU32(pSSM, (uint32_t)pGstCtx->hwvirt.vmx.enmDiag); 2590 SSMR3PutU32(pSSM, (uint32_t)pGstCtx->hwvirt.vmx.enmAbort); 2591 SSMR3PutU32(pSSM, pGstCtx->hwvirt.vmx.uAbortAux); 2592 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxRootMode); 2593 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInVmxNonRootMode); 2594 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fInterceptEvents); 2595 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2596 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2597 SSMR3PutStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2598 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2599 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2600 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2601 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2602 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2603 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE); 2604 SSMR3PutMem(pSSM, pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 2605 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uFirstPauseLoopTick); 2606 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uPrevPauseTick); 2607 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.uEntryTick); 2608 SSMR3PutU16(pSSM, pGstCtx->hwvirt.vmx.offVirtApicWrite); 2609 SSMR3PutBool(pSSM, pGstCtx->hwvirt.vmx.fVirtNmiBlocking); 2610 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl); 2611 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Basic); 2612 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.PinCtls.u); 2613 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u); 2614 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u); 2615 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u); 2616 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u); 2617 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u); 2618 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u); 2619 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u); 2620 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u); 2621 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Misc); 2622 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0); 2623 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1); 2624 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0); 2625 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1); 2626 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum); 2627 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64VmFunc); 2628 SSMR3PutU64(pSSM, pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps); 2629 } 2338 2630 SSMR3PutU32(pSSM, pVCpu->cpum.s.fUseFlags); 2339 2631 SSMR3PutU32(pSSM, pVCpu->cpum.s.fChanged); … … 2368 2660 * Validate version. 2369 2661 */ 2370 if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 2662 if ( uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM 2663 && uVersion != CPUM_SAVED_STATE_VERSION_HWVIRT_SVM 2371 2664 && uVersion != CPUM_SAVED_STATE_VERSION_XSAVE 2372 2665 && uVersion != CPUM_SAVED_STATE_VERSION_GOOD_CPUID_COUNT … … 2577 2870 } 2578 2871 } 2579 /** @todo NSTVMX: Load VMX state. */ 2872 if (uVersion >= CPUM_SAVED_STATE_VERSION_HWVIRT_VMX_IEM) 2873 { 2874 if (pVM->cpum.s.GuestFeatures.fVmx) 2875 { 2876 Assert(pGstCtx->hwvirt.vmx.CTX_SUFF(pVmcs)); 2877 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmxon); 2878 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysVmcs); 2879 SSMR3GetGCPhys(pSSM, &pGstCtx->hwvirt.vmx.GCPhysShadowVmcs); 2880 SSMR3GetU32(pSSM, (uint32_t *)&pGstCtx->hwvirt.vmx.enmDiag); 2881 SSMR3GetU32(pSSM, (uint32_t *)&pGstCtx->hwvirt.vmx.enmAbort); 2882 SSMR3GetU32(pSSM, &pGstCtx->hwvirt.vmx.uAbortAux); 2883 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxRootMode); 2884 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInVmxNonRootMode); 2885 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fInterceptEvents); 2886 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fNmiUnblockingIret); 2887 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2888 SSMR3GetStructEx(pSSM, pGstCtx->hwvirt.vmx.pShadowVmcsR3, sizeof(VMXVVMCS), 0, g_aVmxHwvirtVmcs, NULL); 2889 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvVmreadBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2890 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvVmwriteBitmapR3, VMX_V_VMREAD_VMWRITE_BITMAP_SIZE); 2891 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pEntryMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2892 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrStoreAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2893 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pExitMsrLoadAreaR3, VMX_V_AUTOMSR_AREA_SIZE); 2894 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvMsrBitmapR3, VMX_V_MSR_BITMAP_SIZE); 2895 SSMR3GetMem(pSSM, pGstCtx->hwvirt.vmx.pvIoBitmapR3, VMX_V_IO_BITMAP_A_SIZE + VMX_V_IO_BITMAP_B_SIZE); 2896 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uFirstPauseLoopTick); 2897 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uPrevPauseTick); 2898 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.uEntryTick); 2899 SSMR3GetU16(pSSM, &pGstCtx->hwvirt.vmx.offVirtApicWrite); 2900 SSMR3GetBool(pSSM, &pGstCtx->hwvirt.vmx.fVirtNmiBlocking); 2901 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64FeatCtrl); 2902 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Basic); 2903 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.PinCtls.u); 2904 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls.u); 2905 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ProcCtls2.u); 2906 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.ExitCtls.u); 2907 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.EntryCtls.u); 2908 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TruePinCtls.u); 2909 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueProcCtls.u); 2910 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueEntryCtls.u); 2911 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.TrueExitCtls.u); 2912 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Misc); 2913 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed0); 2914 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr0Fixed1); 2915 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed0); 2916 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64Cr4Fixed1); 2917 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmcsEnum); 2918 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64VmFunc); 2919 SSMR3GetU64(pSSM, &pGstCtx->hwvirt.vmx.Msrs.u64EptVpidCaps); 2920 } 2921 } 2580 2922 } 2581 2923 else … … 2678 3020 2679 3021 /* 2680 * Guest CPUIDs .3022 * Guest CPUIDs (and VMX MSR features). 2681 3023 */ 2682 3024 if (uVersion >= CPUM_SAVED_STATE_VERSION_VER3_2) … … 2684 3026 CPUMMSRS GuestMsrs; 2685 3027 RT_ZERO(GuestMsrs); 2686 if (pVM->cpum.s.GuestFeatures.fVmx) 3028 3029 CPUMFEATURES BaseFeatures; 3030 bool const fVmxGstFeat = pVM->cpum.s.GuestFeatures.fVmx; 3031 if (fVmxGstFeat) 3032 { 3033 /* 3034 * At this point the MSRs in the guest CPU-context are loaded with the guest VMX MSRs from the saved state. 3035 * However the VMX sub-features have not been exploded yet. So cache the base (host derived) VMX features 3036 * here so we can compare them for compatibility after exploding guest features. 3037 */ 3038 BaseFeatures = pVM->cpum.s.GuestFeatures; 3039 3040 /* Use the VMX MSR features from the saved state while exploding guest features. */ 2687 3041 GuestMsrs.hwvirt.vmx = pVM->aCpus[0].cpum.s.Guest.hwvirt.vmx.Msrs; 2688 return cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs); 3042 } 3043 3044 /* Load CPUID and explode guest features. */ 3045 rc = cpumR3LoadCpuId(pVM, pSSM, uVersion, &GuestMsrs); 3046 if (fVmxGstFeat) 3047 { 3048 /* 3049 * Check if the exploded VMX features from the saved state are compatible with the host-derived features 3050 * we cached earlier (above). The is required if we use hardware-assisted nested-guest execution with 3051 * VMX features presented to the guest. 3052 */ 3053 bool const fIsCompat = cpumR3AreVmxCpuFeaturesCompatible(pVM, &BaseFeatures, &pVM->cpum.s.GuestFeatures); 3054 if (!fIsCompat) 3055 return VERR_CPUM_INVALID_HWVIRT_FEAT_COMBO; 3056 } 3057 return rc; 2689 3058 } 2690 3059 return cpumR3LoadCpuIdPre32(pVM, pSSM, uVersion); … … 3716 4085 pHlp->pfnPrintf(pHlp, " uFirstPauseLoopTick = %RX64\n", pCtx->hwvirt.vmx.uFirstPauseLoopTick); 3717 4086 pHlp->pfnPrintf(pHlp, " uPrevPauseTick = %RX64\n", pCtx->hwvirt.vmx.uPrevPauseTick); 3718 pHlp->pfnPrintf(pHlp, " u VmentryTick = %RX64\n", pCtx->hwvirt.vmx.uVmentryTick);4087 pHlp->pfnPrintf(pHlp, " uEntryTick = %RX64\n", pCtx->hwvirt.vmx.uEntryTick); 3719 4088 pHlp->pfnPrintf(pHlp, " offVirtApicWrite = %#RX16\n", pCtx->hwvirt.vmx.offVirtApicWrite); 4089 pHlp->pfnPrintf(pHlp, " fVirtNmiBlocking = %RTbool\n", pCtx->hwvirt.vmx.fVirtNmiBlocking); 3720 4090 pHlp->pfnPrintf(pHlp, " VMCS cache:\n"); 3721 4091 cpumR3InfoVmxVmcs(pHlp, pCtx->hwvirt.vmx.pVmcsR3, " " /* pszPrefix */); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r77611 r78220 2146 2146 { 2147 2147 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu)); 2148 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);2149 UPDATE_RC();2148 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE) 2149 UPDATE_RC(); 2150 2150 } 2151 2151 … … 2169 2169 { 2170 2170 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu)); 2171 if (rc2 == VINF_VMX_INTERCEPT_NOT_ACTIVE) 2172 rc2 = VINF_SUCCESS; 2173 UPDATE_RC(); 2171 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE) 2172 UPDATE_RC(); 2174 2173 } 2175 2174 … … 2198 2197 * NMIs (take priority over external interrupts). 2199 2198 */ 2200 Assert(!HMR3IsEventPending(pVCpu));2201 2199 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI) 2202 2200 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS)) -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r77591 r78220 734 734 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF", "Guest #XF (extended math fault, SIMD FPU) exception."); 735 735 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other", "Other guest exceptions."); 736 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt", "HLT instruction."); 737 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "RDMSR instruction."); 738 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "WRMSR instruction."); 739 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait", "MWAIT instruction."); 740 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor", "MONITOR instruction."); 736 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr", "MSR read."); 737 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr", "MSR write."); 741 738 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR-Write", "Debug register write."); 742 739 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR-Read", "Debug register read."); … … 753 750 HM_REG_COUNTER(&pVCpu->hm.s.StatExitClts, "/HM/CPU%d/Exit/Instr/CLTS", "CLTS instruction."); 754 751 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLmsw, "/HM/CPU%d/Exit/Instr/LMSW", "LMSW instruction."); 755 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli", "CLI instruction.");756 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti", "STI instruction.");757 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf", "PUSHF instruction.");758 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf", "POPF instruction.");759 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret", "IRET instruction.");760 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int", "INT instruction.");761 752 HM_REG_COUNTER(&pVCpu->hm.s.StatExitXdtrAccess, "/HM/CPU%d/Exit/Instr/XdtrAccess", "GDTR, IDTR, LDTR access."); 762 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/I O/Write", "I/O write.");763 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/I O/Read", "I/O read.");764 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/I O/WriteString", "String I/O write.");765 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/I O/ReadString", "String I/O read.");766 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts again.");753 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/Instr/IO/Write", "I/O write."); 754 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/Instr/IO/Read", "I/O read."); 755 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/Instr/IO/WriteString", "String I/O write."); 756 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/Instr/IO/ReadString", "String I/O read."); 757 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIntWindow, "/HM/CPU%d/Exit/IntWindow", "Interrupt-window exit. Guest is ready to receive interrupts."); 767 758 HM_REG_COUNTER(&pVCpu->hm.s.StatExitExtInt, "/HM/CPU%d/Exit/ExtInt", "Physical maskable interrupt (host)."); 768 759 #endif … … 772 763 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTprBelowThreshold, "/HM/CPU%d/Exit/TprBelowThreshold", "TPR lowered below threshold by the guest."); 773 764 HM_REG_COUNTER(&pVCpu->hm.s.StatExitTaskSwitch, "/HM/CPU%d/Exit/TaskSwitch", "Task switch."); 774 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMtf, "/HM/CPU%d/Exit/MonitorTrapFlag", "Monitor Trap Flag.");775 765 HM_REG_COUNTER(&pVCpu->hm.s.StatExitApicAccess, "/HM/CPU%d/Exit/ApicAccess", "APIC access. Guest attempted to access memory at a physical address on the APIC-access page."); 776 766 … … 961 951 PVMCPU pVCpu = &pVM->aCpus[i]; 962 952 963 PVMXVMCS BATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;953 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache; 964 954 strcpy((char *)pVmcsCache->aMagic, "VMCSCACHE Magic"); 965 955 pVmcsCache->uMagic = UINT64_C(0xdeadbeefdeadbeef); … … 1493 1483 LogRel(("HM: Max resume loops = %u\n", pVM->hm.s.cMaxResumeLoops)); 1494 1484 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.u64HostCr4)); 1495 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64Host Efer));1485 LogRel(("HM: Host EFER = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEfer)); 1496 1486 LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtl)); 1497 1487 … … 1527 1517 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1528 1518 { 1529 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 1530 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVmcs)); 1531 } 1519 PCVMXVMCSINFO pVmcsInfo = &pVM->aCpus[i].hm.s.vmx.VmcsInfo; 1520 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap)); 1521 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfo->HCPhysVmcs)); 1522 } 1523 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1524 if (pVM->cpum.ro.GuestFeatures.fVmx) 1525 { 1526 LogRel(("HM: Nested-guest:\n")); 1527 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1528 { 1529 PCVMXVMCSINFO pVmcsInfoNstGst = &pVM->aCpus[i].hm.s.vmx.VmcsInfoNstGst; 1530 LogRel(("HM: VCPU%3d: MSR bitmap physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysMsrBitmap)); 1531 LogRel(("HM: VCPU%3d: VMCS physaddr = %#RHp\n", i, pVmcsInfoNstGst->HCPhysVmcs)); 1532 } 1533 } 1534 #endif 1532 1535 1533 1536 /* … … 1681 1684 else if (CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE)) 1682 1685 { 1683 if (pVM->hm.s.vmx.u64Host Efer & MSR_K6_EFER_NXE)1686 if (pVM->hm.s.vmx.u64HostMsrEfer & MSR_K6_EFER_NXE) 1684 1687 CPUMR3SetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1685 1688 else … … 1974 1977 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR; 1975 1978 } 1979 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX) 1980 if (pVCpu->hm.s.paStatNestedExitReason) 1981 { 1982 MMHyperFree(pVM, pVCpu->hm.s.paStatNestedExitReason); 1983 pVCpu->hm.s.paStatNestedExitReason = NULL; 1984 pVCpu->hm.s.paStatNestedExitReasonR0 = NIL_RTR0PTR; 1985 } 1986 # endif 1976 1987 #endif 1977 1988 1978 1989 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1979 memset(pVCpu->hm.s.vmx.Vmcs BatchCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsBatchCache.aMagic));1980 pVCpu->hm.s.vmx.Vmcs BatchCache.uMagic = 0;1981 pVCpu->hm.s.vmx.Vmcs BatchCache.uPos = 0xffffffff;1990 memset(pVCpu->hm.s.vmx.VmcsCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VmcsCache.aMagic)); 1991 pVCpu->hm.s.vmx.VmcsCache.uMagic = 0; 1992 pVCpu->hm.s.vmx.VmcsCache.uPos = 0xffffffff; 1982 1993 #endif 1983 1994 } … … 1995 2006 VMMR3_INT_DECL(void) HMR3ResetCpu(PVMCPU pVCpu) 1996 2007 { 1997 /* Sync. entire state on VM reset R0-reentry. It's safe to reset2008 /* Sync. entire state on VM reset ring-0 re-entry. It's safe to reset 1998 2009 the HM flags here, all other EMTs are in ring-3. See VMR3Reset(). */ 1999 2010 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 2000 2011 2001 pVCpu->hm.s.fActive = false; 2002 pVCpu->hm.s.Event.fPending = false; 2003 pVCpu->hm.s.vmx.fWasInRealMode = true; 2004 pVCpu->hm.s.vmx.u64MsrApicBase = 0; 2005 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; 2012 pVCpu->hm.s.fActive = false; 2013 pVCpu->hm.s.Event.fPending = false; 2014 pVCpu->hm.s.vmx.u64GstMsrApicBase = 0; 2015 pVCpu->hm.s.vmx.VmcsInfo.fSwitchedTo64on32 = false; 2016 pVCpu->hm.s.vmx.VmcsInfo.fWasInRealMode = true; 2017 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 2018 if (pVCpu->CTX_SUFF(pVM)->cpum.ro.GuestFeatures.fVmx) 2019 { 2020 pVCpu->hm.s.vmx.VmcsInfoNstGst.fSwitchedTo64on32 = false; 2021 pVCpu->hm.s.vmx.VmcsInfoNstGst.fWasInRealMode = true; 2022 } 2023 #endif 2006 2024 2007 2025 /* Reset the contents of the read cache. */ 2008 PVMXVMCS BATCHCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsBatchCache;2026 PVMXVMCSCACHE pVmcsCache = &pVCpu->hm.s.vmx.VmcsCache; 2009 2027 for (unsigned j = 0; j < pVmcsCache->Read.cValidEntries; j++) 2010 2028 pVmcsCache->Read.aFieldVal[j] = 0; … … 2847 2865 2848 2866 /** 2849 * Checks if internal events are pending. In that case we are not allowed to dispatch interrupts.2850 *2851 * @returns true if an internal event is pending, otherwise false.2852 * @param pVCpu The cross context virtual CPU structure.2853 */2854 VMMR3_INT_DECL(bool) HMR3IsEventPending(PVMCPU pVCpu)2855 {2856 return HMIsEnabled(pVCpu->pVMR3)2857 && pVCpu->hm.s.Event.fPending;2858 }2859 2860 2861 /**2862 2867 * Checks if the VMX-preemption timer is being used. 2863 2868 * … … 2884 2889 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2885 2890 { 2886 PVMCPU pVCpu = &pVM->aCpus[i]; 2891 PVMCPU pVCpu = &pVM->aCpus[i]; 2892 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 2893 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs; 2887 2894 switch (iStatusCode) 2888 2895 { … … 2893 2900 2894 2901 case VERR_VMX_INVALID_VMCS_PTR: 2902 { 2895 2903 LogRel(("HM: VERR_VMX_INVALID_VMCS_PTR:\n")); 2896 LogRel(("HM: CPU[%u] Current pointer %#RGp vs %#RGp\n", i, pVCpu->hm.s.vmx.LastError.u64VmcsPhys, 2897 pVCpu->hm.s.vmx.HCPhysVmcs)); 2904 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest")); 2905 LogRel(("HM: CPU[%u] Current pointer %#RHp vs %#RHp\n", i, pVCpu->hm.s.vmx.LastError.HCPhysCurrentVmcs, 2906 pVmcsInfo->HCPhysVmcs)); 2898 2907 LogRel(("HM: CPU[%u] Current VMCS version %#x\n", i, pVCpu->hm.s.vmx.LastError.u32VmcsRev)); 2899 2908 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu)); 2900 2909 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu)); 2901 2910 break; 2911 } 2902 2912 2903 2913 case VERR_VMX_UNABLE_TO_START_VM: 2904 2914 LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM:\n")); 2915 LogRel(("HM: CPU[%u] %s VMCS active\n", i, fNstGstVmcsActive ? "Nested-guest" : "Guest")); 2905 2916 LogRel(("HM: CPU[%u] Instruction error %#x\n", i, pVCpu->hm.s.vmx.LastError.u32InstrError)); 2906 2917 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason)); … … 2914 2925 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS) 2915 2926 { 2916 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32PinCtls));2927 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVmcsInfo->u32PinCtls)); 2917 2928 { 2918 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32PinCtls;2929 uint32_t const u32Val = pVmcsInfo->u32PinCtls; 2919 2930 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_EXT_INT_EXIT ); 2920 2931 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_NMI_EXIT ); … … 2923 2934 HMVMX_LOGREL_FEAT(u32Val, VMX_PIN_CTLS_POSTED_INT ); 2924 2935 } 2925 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls));2936 LogRel(("HM: CPU[%u] ProcCtls %#RX32\n", i, pVmcsInfo->u32ProcCtls)); 2926 2937 { 2927 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls;2938 uint32_t const u32Val = pVmcsInfo->u32ProcCtls; 2928 2939 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_INT_WINDOW_EXIT ); 2929 2940 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_TSC_OFFSETTING); … … 2948 2959 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS_USE_SECONDARY_CTLS); 2949 2960 } 2950 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ProcCtls2));2961 LogRel(("HM: CPU[%u] ProcCtls2 %#RX32\n", i, pVmcsInfo->u32ProcCtls2)); 2951 2962 { 2952 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ProcCtls2;2963 uint32_t const u32Val = pVmcsInfo->u32ProcCtls2; 2953 2964 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_VIRT_APIC_ACCESS ); 2954 2965 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_EPT ); … … 2974 2985 HMVMX_LOGREL_FEAT(u32Val, VMX_PROC_CTLS2_TSC_SCALING ); 2975 2986 } 2976 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32EntryCtls));2987 LogRel(("HM: CPU[%u] EntryCtls %#RX32\n", i, pVmcsInfo->u32EntryCtls)); 2977 2988 { 2978 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32EntryCtls;2989 uint32_t const u32Val = pVmcsInfo->u32EntryCtls; 2979 2990 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_DEBUG ); 2980 2991 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_IA32E_MODE_GUEST ); … … 2985 2996 HMVMX_LOGREL_FEAT(u32Val, VMX_ENTRY_CTLS_LOAD_EFER_MSR ); 2986 2997 } 2987 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.Ctls.u32ExitCtls));2998 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVmcsInfo->u32ExitCtls)); 2988 2999 { 2989 uint32_t const u32Val = pV Cpu->hm.s.vmx.Ctls.u32ExitCtls;3000 uint32_t const u32Val = pVmcsInfo->u32ExitCtls; 2990 3001 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_DEBUG ); 2991 3002 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_HOST_ADDR_SPACE_SIZE ); … … 2998 3009 HMVMX_LOGREL_FEAT(u32Val, VMX_EXIT_CTLS_SAVE_PREEMPT_TIMER ); 2999 3010 } 3000 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap)); 3001 LogRel(("HM: CPU[%u] HCPhysGuestMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr)); 3002 LogRel(("HM: CPU[%u] HCPhysHostMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr)); 3003 LogRel(("HM: CPU[%u] cMsrs %u\n", i, pVCpu->hm.s.vmx.cMsrs)); 3011 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVmcsInfo->HCPhysMsrBitmap)); 3012 LogRel(("HM: CPU[%u] HCPhysGuestMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrLoad)); 3013 LogRel(("HM: CPU[%u] HCPhysGuestMsrStore %#RHp\n", i, pVmcsInfo->HCPhysGuestMsrStore)); 3014 LogRel(("HM: CPU[%u] HCPhysHostMsrLoad %#RHp\n", i, pVmcsInfo->HCPhysHostMsrLoad)); 3015 LogRel(("HM: CPU[%u] cEntryMsrLoad %u\n", i, pVmcsInfo->cEntryMsrLoad)); 3016 LogRel(("HM: CPU[%u] cExitMsrStore %u\n", i, pVmcsInfo->cExitMsrStore)); 3017 LogRel(("HM: CPU[%u] cExitMsrLoad %u\n", i, pVmcsInfo->cExitMsrLoad)); 3004 3018 } 3005 3019 /** @todo Log VM-entry event injection control fields … … 3249 3263 if (pVM->hm.s.vmx.fSupported) 3250 3264 { 3251 bool const fRealOnV86Active = pVCpu->hm.s.vmx.RealMode.fRealOnV86Active; 3265 PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu); 3266 bool const fRealOnV86Active = pVmcsInfo->RealMode.fRealOnV86Active; 3267 bool const fNstGstVmcsActive = pVCpu->hm.s.vmx.fSwitchedToNstGstVmcs; 3268 3269 pHlp->pfnPrintf(pHlp, " %s VMCS active\n", fNstGstVmcsActive ? "Nested-guest" :" Guest"); 3252 3270 pHlp->pfnPrintf(pHlp, " Real-on-v86 active = %RTbool\n", fRealOnV86Active); 3253 3271 if (fRealOnV86Active) 3254 3272 { 3255 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pV Cpu->hm.s.vmx.RealMode.Eflags.u32);3256 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrCS.u);3257 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrSS.u);3258 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrDS.u);3259 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrES.u);3260 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrFS.u);3261 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pV Cpu->hm.s.vmx.RealMode.AttrGS.u);3273 pHlp->pfnPrintf(pHlp, " EFlags = %#x\n", pVmcsInfo->RealMode.Eflags.u32); 3274 pHlp->pfnPrintf(pHlp, " Attr CS = %#x\n", pVmcsInfo->RealMode.AttrCS.u); 3275 pHlp->pfnPrintf(pHlp, " Attr SS = %#x\n", pVmcsInfo->RealMode.AttrSS.u); 3276 pHlp->pfnPrintf(pHlp, " Attr DS = %#x\n", pVmcsInfo->RealMode.AttrDS.u); 3277 pHlp->pfnPrintf(pHlp, " Attr ES = %#x\n", pVmcsInfo->RealMode.AttrES.u); 3278 pHlp->pfnPrintf(pHlp, " Attr FS = %#x\n", pVmcsInfo->RealMode.AttrFS.u); 3279 pHlp->pfnPrintf(pHlp, " Attr GS = %#x\n", pVmcsInfo->RealMode.AttrGS.u); 3262 3280 } 3263 3281 }
Note:
See TracChangeset
for help on using the changeset viewer.