Changeset 22040 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 6, 2009 4:33:21 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r22015 r22040 946 946 947 947 LogRel(("HWACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys)); 948 LogRel(("HWACCM: MSR bitmap physaddr = %RHp\n", pVM->hwaccm.s.vmx.pMSRBitmapPhys)); 948 949 /* Paranoia */ 950 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc) >= 512); 949 951 950 952 for (unsigned i=0;i<pVM->cCPUs;i++) 951 LogRel(("HWACCM: VMCS physaddr VCPU%d = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys)); 953 { 954 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys)); 955 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys)); 956 } 952 957 953 958 #ifdef HWACCM_VTX_WITH_EPT -
trunk/src/VBox/VMM/HWACCMInternal.h
r22016 r22040 336 336 R0PTRTYPE(uint8_t *) pAPIC; 337 337 338 /** R0 memory object for the MSR bitmap (1 page). */339 RTR0MEMOBJ pMemObjMSRBitmap;340 /** Physical address of the MSR bitmap (1 page). */341 RTHCPHYS pMSRBitmapPhys;342 /** Virtual address of the MSR bitmap (1 page). */343 R0PTRTYPE(uint8_t *) pMSRBitmap;344 345 338 /** R0 memory object for the MSR entry load page (guest MSRs). */ 346 339 RTR0MEMOBJ pMemObjMSREntryLoad; … … 585 578 /** Current EPTP. */ 586 579 RTHCPHYS GCPhysEPTP; 580 581 /** R0 memory object for the MSR bitmap (1 page). */ 582 RTR0MEMOBJ pMemObjMSRBitmap; 583 /** Physical address of the MSR bitmap (1 page). */ 584 RTHCPHYS pMSRBitmapPhys; 585 /** Virtual address of the MSR bitmap (1 page). */ 586 R0PTRTYPE(uint8_t *) pMSRBitmap; 587 588 /** R0 memory object for the guest MSR load area (1 page). */ 589 RTR0MEMOBJ pMemObjGuestMSR; 590 /** Physical address of the guest MSR load area (1 page). */ 591 RTHCPHYS pGuestMSRPhys; 592 /** Virtual address of the guest MSR load area (1 page). */ 593 R0PTRTYPE(uint8_t *) pGuestMSR; 594 595 /** R0 memory object for the MSR load area (1 page). */ 596 RTR0MEMOBJ pMemObjHostMSR; 597 /** Physical address of the MSR load area (1 page). */ 598 RTHCPHYS pHostMSRPhys; 599 /** Virtual address of the MSR load area (1 page). */ 600 R0PTRTYPE(uint8_t *) pHostMSR; 587 601 588 602 /** VMCS cache. */ -
trunk/src/VBox/VMM/VMMGC/HWACCMGCA.asm
r15962 r22040 232 232 ; * 233 233 ; */ 234 235 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs236 ;; @todo use the automatic load feature for MSRs237 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR238 %if 0 ; not supported on Intel CPUs239 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR240 %endif241 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR242 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK243 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE244 245 234 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 246 235 mov qword [rbx + VMCSCACHE.uPos], 5 … … 308 297 309 298 pop rsi ; pCtx (needed in rsi by the macros below) 310 311 ;; @todo use the automatic load feature for MSRs312 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE313 299 314 300 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/VMMR0/HWACCMR0Mixed.mac
r20996 r22040 414 414 %endif 415 415 416 ; Save the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs and restore the guest MSRs417 ;; @todo use the automatic load feature for MSRs418 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR419 %if 0 ; not supported on Intel CPUs420 LOADGUESTMSR MSR_K8_CSTAR, CPUMCTX.msrCSTAR421 %endif422 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR423 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK424 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE425 426 416 ; Save the pCtx pointer 427 417 push xSI … … 531 521 pop xSI ; pCtx (needed in rsi by the macros below) 532 522 533 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs534 ;; @todo use the automatic load feature for MSRs535 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE536 LOADHOSTMSR MSR_K8_SF_MASK537 LOADHOSTMSR MSR_K6_STAR538 %if 0 ; not supported on Intel CPUs539 LOADHOSTMSR MSR_K8_CSTAR540 %endif541 LOADHOSTMSR MSR_K8_LSTAR542 543 523 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 544 524 pop xDX ; saved pCache … … 589 569 pop xSI ; pCtx (needed in rsi by the macros below) 590 570 591 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs592 ;; @todo use the automatic load feature for MSRs593 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE594 LOADHOSTMSR MSR_K8_SF_MASK595 LOADHOSTMSR MSR_K6_STAR596 %if 0 ; not supported on Intel CPUs597 LOADHOSTMSR MSR_K8_CSTAR598 %endif599 LOADHOSTMSR MSR_K8_LSTAR600 601 571 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 602 572 add xSP, xS ; pCache … … 622 592 623 593 pop xSI ; pCtx (needed in rsi by the macros below) 624 625 ; Restore the host LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs626 ;; @todo use the automatic load feature for MSRs627 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE628 LOADHOSTMSR MSR_K8_SF_MASK629 LOADHOSTMSR MSR_K6_STAR630 %if 0 ; not supported on Intel CPUs631 LOADHOSTMSR MSR_K8_CSTAR632 %endif633 LOADHOSTMSR MSR_K8_LSTAR634 594 635 595 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r21951 r22040 83 83 static bool vmxR0IsValidWriteField(uint32_t idxField); 84 84 #endif 85 static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite); 85 86 86 87 static void VMXR0CheckError(PVM pVM, PVMCPU pVCpu, int rc) … … 198 199 } 199 200 200 /* Allocate the MSR bitmap if this feature is supported. */201 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)202 {203 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);204 AssertRC(rc);205 if (RT_FAILURE(rc))206 return rc;207 208 pVM->hwaccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjMSRBitmap);209 pVM->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 0);210 memset(pVM->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);211 }212 213 201 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 214 202 { … … 256 244 pVCpu->hwaccm.s.vmx.pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0); 257 245 ASMMemZero32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE); 246 247 /* Allocate the MSR bitmap if this feature is supported. */ 248 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 249 { 250 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 251 AssertRC(rc); 252 if (RT_FAILURE(rc)) 253 return rc; 254 255 pVCpu->hwaccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap); 256 pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0); 257 memset(pVCpu->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE); 258 } 259 260 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */ 261 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 262 AssertRC(rc); 263 if (RT_FAILURE(rc)) 264 return rc; 265 266 pVCpu->hwaccm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR); 267 pVCpu->hwaccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0); 268 memset(pVCpu->hwaccm.s.vmx.pGuestMSR, 0, PAGE_SIZE); 269 270 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */ 271 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */); 272 AssertRC(rc); 273 if (RT_FAILURE(rc)) 274 return rc; 275 276 pVCpu->hwaccm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR); 277 pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0); 278 memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE); 258 279 259 280 /* Current guest paging mode. */ … … 294 315 pVCpu->hwaccm.s.vmx.pVAPICPhys = 0; 295 316 } 317 if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) 318 { 319 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, false); 320 pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 321 pVCpu->hwaccm.s.vmx.pMSRBitmap = 0; 322 pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = 0; 323 } 324 if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ) 325 { 326 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, false); 327 pVCpu->hwaccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ; 328 pVCpu->hwaccm.s.vmx.pHostMSR = 0; 329 pVCpu->hwaccm.s.vmx.pHostMSRPhys = 0; 330 } 331 if (pVCpu->hwaccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ) 332 { 333 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, false); 334 pVCpu->hwaccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ; 335 pVCpu->hwaccm.s.vmx.pGuestMSR = 0; 336 pVCpu->hwaccm.s.vmx.pGuestMSRPhys = 0; 337 } 296 338 } 297 339 if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ) … … 301 343 pVM->hwaccm.s.vmx.pAPIC = 0; 302 344 pVM->hwaccm.s.vmx.pAPICPhys = 0; 303 }304 if (pVM->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)305 {306 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, false);307 pVM->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;308 pVM->hwaccm.s.vmx.pMSRBitmap = 0;309 pVM->hwaccm.s.vmx.pMSRBitmapPhys = 0;310 345 } 311 346 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 395 430 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; 396 431 397 #ifdef VBOX_WITH_VTX_MSR_BITMAPS398 432 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 399 433 { 400 Assert(pV M->hwaccm.s.vmx.pMSRBitmapPhys);434 Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys); 401 435 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS; 402 436 } 403 #endif404 437 405 438 /* We will use the secondary control if it's present. */ … … 480 513 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 481 514 { 482 /* Optional */ 483 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVM->hwaccm.s.vmx.pMSRBitmapPhys); 515 Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys); 516 517 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys); 484 518 AssertRC(rc); 485 } 486 487 /* Clear MSR controls. */ 488 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0); 489 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0); 490 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0); 491 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0); 492 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0); 493 AssertRC(rc); 494 519 520 /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */ 521 vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true); 522 vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true); 523 vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true); 524 } 525 526 /* Set the guest & host MSR load/store physical addresses. */ 527 Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys); 528 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys); 529 AssertRC(rc); 530 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys); 531 AssertRC(rc); 532 533 Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys); 534 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pHostMSRPhys); 535 AssertRC(rc); 536 495 537 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 496 538 { … … 610 652 VMXR0CheckError(pVM, &pVM->aCpus[0], rc); 611 653 return rc; 654 } 655 656 /** 657 * Sets the permission bits for the specified MSR 658 * 659 * @param pVCpu The VMCPU to operate on. 660 * @param ulMSR MSR value 661 * @param fRead Reading allowed/disallowed 662 * @param fWrite Writing allowed/disallowed 663 */ 664 static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite) 665 { 666 unsigned ulBit; 667 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap; 668 669 /* Layout: 670 * 0x000 - 0x3ff - Low MSR read bits 671 * 0x400 - 0x7ff - High MSR read bits 672 * 0x800 - 0xbff - Low MSR write bits 673 * 0xc00 - 0xfff - High MSR write bits 674 */ 675 if (ulMSR <= 0x00001FFF) 676 { 677 /* Pentium-compatible MSRs */ 678 ulBit = ulMSR; 679 } 680 else 681 if ( ulMSR >= 0xC0000000 682 && ulMSR <= 0xC0001FFF) 683 { 684 /* AMD Sixth Generation x86 Processor MSRs */ 685 ulBit = (ulMSR - 0xC0000000); 686 pMSRBitmap += 0x400; 687 } 688 else 689 { 690 AssertFailed(); 691 return; 692 } 693 694 Assert(ulBit <= 0x1fff); 695 if (fRead) 696 ASMBitClear(pMSRBitmap, ulBit); 697 else 698 ASMBitSet(pMSRBitmap, ulBit); 699 700 if (fWrite) 701 ASMBitClear(pMSRBitmap + 0x800, ulBit); 702 else 703 ASMBitSet(pMSRBitmap + 0x800, ulBit); 612 704 } 613 705 … … 992 1084 } 993 1085 994 995 1086 /* Save the base address of the TR selector. */ 996 1087 if (SelTR > gdtr.cbGdt) … … 1067 1158 AssertRC(rc); 1068 1159 1069 #if 0 /* @todo deal with 32/64 */ 1070 /* Restore the host EFER - on CPUs that support it. */ 1071 if (pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR) 1072 { 1073 uint64_t msrEFER = ASMRdMsr(MSR_IA32_EFER); 1074 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FIELD_EFER_FULL, msrEFER); 1075 AssertRC(rc); 1076 } 1077 #endif 1160 /* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */ 1161 PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR; 1162 unsigned idxMsr = 0; 1163 1164 /* EFER MSR present? */ 1165 if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)) 1166 { 1167 if (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP) 1168 { 1169 pMsr->u32IndexMSR = MSR_K6_STAR; 1170 pMsr->u32Reserved = 0; 1171 pMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */ 1172 pMsr++; idxMsr++; 1173 } 1174 1175 pMsr->u32IndexMSR = MSR_K6_EFER; 1176 pMsr->u32Reserved = 0; 1177 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1178 if (CPUMIsGuestInLongMode(pVCpu)) 1179 { 1180 /* Must match the efer value in our 64 bits switcher. */ 1181 pMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE; 1182 } 1183 else 1184 #endif 1185 pMsr->u64Value = ASMRdMsr(MSR_K6_EFER); 1186 pMsr++; idxMsr++; 1187 } 1188 1189 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1190 if (VMX_IS_64BIT_HOST_MODE()) 1191 { 1192 pMsr->u32IndexMSR = MSR_K8_LSTAR; 1193 pMsr->u32Reserved = 0; 1194 pMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bits mode syscall rip */ 1195 pMsr++; idxMsr++; 1196 pMsr->u32IndexMSR = MSR_K8_SF_MASK; 1197 pMsr->u32Reserved = 0; 1198 pMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */ 1199 pMsr++; idxMsr++; 1200 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 1201 pMsr->u32Reserved = 0; 1202 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */ 1203 pMsr++; idxMsr++; 1204 } 1205 #endif 1206 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr); 1207 AssertRC(rc); 1208 1078 1209 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT; 1079 1210 } … … 1175 1306 /* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */ 1176 1307 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG; 1177 #if 0 /* @todo deal with 32/64 */1178 /* Required for the EFER write below, not supported on all CPUs. */1179 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR;1180 #endif1181 1308 /* 64 bits guest mode? */ 1182 1309 if (CPUMIsGuestInLongModeEx(pCtx)) … … 1195 1322 1196 1323 /* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */ 1197 #if 0 /* @todo deal with 32/64 */1198 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG | VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR;1199 #else1200 1324 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG; 1201 #endif1202 1325 1203 1326 #if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL) … … 1708 1831 } 1709 1832 1710 #if 0 /* @todo deal with 32/64 */1711 /* Unconditionally update the guest EFER - on CPUs that supports it. */1712 if (pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)1713 {1714 rc = VMXWriteVMCS64(VMX_VMCS_GUEST_EFER_FULL, pCtx->msrEFER);1715 AssertRC(rc);1716 }1717 #endif1718 1719 1833 vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx); 1834 1835 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 1836 { 1837 /* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */ 1838 vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true); 1839 vmxR0SetMSRPermission(pVCpu, MSR_K6_STAR, true, true); 1840 vmxR0SetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true); 1841 vmxR0SetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true); 1842 } 1843 1844 /* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */ 1845 PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR; 1846 unsigned idxMsr = 0; 1847 1848 pMsr->u32IndexMSR = MSR_K6_EFER; 1849 pMsr->u32Reserved = 0; 1850 pMsr->u64Value = pCtx->msrEFER; 1851 /* VT-x will complain if only MSR_K6_EFER_LME is set. */ 1852 if (!CPUMIsGuestInLongModeEx(pCtx)) 1853 pMsr->u64Value &= ~(MSR_K6_EFER_LMA|MSR_K6_EFER_LME); 1854 1855 pMsr++; idxMsr++; 1856 pMsr->u32IndexMSR = MSR_K8_LSTAR; 1857 pMsr->u32Reserved = 0; 1858 pMsr->u64Value = pCtx->msrLSTAR; /* 64 bits mode syscall rip */ 1859 pMsr++; idxMsr++; 1860 pMsr->u32IndexMSR = MSR_K6_STAR; 1861 pMsr->u32Reserved = 0; 1862 pMsr->u64Value = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 1863 pMsr++; idxMsr++; 1864 pMsr->u32IndexMSR = MSR_K8_SF_MASK; 1865 pMsr->u32Reserved = 0; 1866 pMsr->u64Value = pCtx->msrSFMASK; /* syscall flag mask */ 1867 pMsr++; idxMsr++; 1868 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE; 1869 pMsr->u32Reserved = 0; 1870 pMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */ 1871 pMsr++; idxMsr++; 1872 1873 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr); 1874 AssertRC(rc); 1875 1876 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, idxMsr); 1877 AssertRC(rc); 1720 1878 1721 1879 /* Done. */ … … 1847 2005 /* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */ 1848 2006 VMX_READ_SELREG(TR, tr); 2007 } 2008 2009 uint32_t maxMsr = 0; 2010 rc = VMXReadVMCS32(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, &maxMsr); 2011 AssertRC(rc); 2012 2013 /* Save the possibly changed MSRs that we automatically restore and save during a world switch. */ 2014 for (unsigned i = 0; i < maxMsr; i++) 2015 { 2016 PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR; 2017 pMsr += i; 2018 2019 switch (pMsr->u32IndexMSR) 2020 { 2021 case MSR_K8_LSTAR: 2022 pCtx->msrLSTAR = pMsr->u64Value; 2023 break; 2024 case MSR_K6_STAR: 2025 pCtx->msrSTAR = pMsr->u64Value; 2026 break; 2027 case MSR_K8_SF_MASK: 2028 pCtx->msrSFMASK = pMsr->u64Value; 2029 break; 2030 case MSR_K8_KERNEL_GS_BASE: 2031 pCtx->msrKERNELGSBASE = pMsr->u64Value; 2032 break; 2033 case MSR_K6_EFER: 2034 /* EFER can't be changed without causing a VM-exit. */ 2035 // Assert(pCtx->msrEFER == pMsr->u64Value); 2036 break; 2037 default: 2038 AssertFailed(); 2039 return VERR_INTERNAL_ERROR; 2040 } 1849 2041 } 1850 2042 return VINF_SUCCESS; … … 2041 2233 #ifdef VBOX_STRICT 2042 2234 RTCPUID idCpuCheck; 2235 bool fWasInLongMode = false; 2043 2236 #endif 2044 2237 #ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0 … … 2123 2316 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n")); 2124 2317 } 2318 fWasInLongMode = CPUMIsGuestInLongMode(pVCpu); 2125 2319 #endif 2126 2320 … … 2140 2334 (int)pVCpu->hwaccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification)); 2141 2335 Assert(!HWACCMR0SuspendPending()); 2336 /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */ 2337 Assert(fWasInLongMode == CPUMIsGuestInLongMode(pVCpu)); 2142 2338 2143 2339 /* Safety precaution; looping for too long here can have a very bad effect on the host */
Note:
See TracChangeset
for help on using the changeset viewer.