- Timestamp:
- Jun 13, 2013 1:32:16 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46514 r46530 142 142 /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */ 143 143 uint64_t u64ExitCode; 144 /** The guest's TPR value used for TPR shadowing. */ 145 uint8_t u8GuestTpr; 144 146 } SVMTRANSIENT, *PSVMTRANSIENT; 145 147 /** @} */ … … 449 451 * 450 452 * @param pVCpu Pointer to the VMCPU. 451 * @param uMsr The MSR.452 * @param fRead Whether reading is allowed.453 * @param fWrite Whether writing is allowed.453 * @param uMsr The MSR for which the access permissions are being set. 454 * @param enmRead MSR read permissions. 455 * @param enmWrite MSR write permissions. 454 456 */ 455 457 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite) … … 626 628 * Don't intercept guest read/write accesses to these MSRs. 627 629 */ 628 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);629 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);630 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);631 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);632 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);633 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);630 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 631 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 632 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 633 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 634 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 635 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 634 636 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 635 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);637 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 636 638 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 637 639 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); … … 886 888 * @returns VBox status code. 887 889 * @param pVCpu Pointer to the VMCPU. 890 * @param pVmcb Pointer to the VMCB. 888 891 * @param pCtx Pointer the guest-CPU context. 889 892 * 890 893 * @remarks No-long-jump zone!!! 891 894 */ 892 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)895 DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 893 896 { 894 897 /* … … 1037 1040 } 1038 1041 1042 1039 1043 /** 1040 1044 * Loads the guest segment registers into the VMCB. … … 1042 1046 * @returns VBox status code. 1043 1047 * @param pVCpu Pointer to the VMCPU. 1048 * @param pVmcb Pointer to the VMCB. 1044 1049 * @param pCtx Pointer to the guest-CPU context. 1045 1050 * 1046 1051 * @remarks No-long-jump zone!!! 1047 1052 */ 1048 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)1053 DECLINLINE(void) hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1049 1054 { 1050 1055 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */ … … 1100 1105 * 1101 1106 * @param pVCpu Pointer to the VMCPU. 1107 * @param pVmcb Pointer to the VMCB. 1102 1108 * @param pCtx Pointer to the guest-CPU context. 1103 1109 * 1104 1110 * @remarks No-long-jump zone!!! 1105 1111 */ 1106 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pCtx)1112 DECLINLINE(void) hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1107 1113 { 1108 1114 /* Guest Sysenter MSRs. */ … … 1147 1153 * 1148 1154 * @remarks No-long-jump zone!!! 1149 */ 1150 static void hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 1155 * @remarks Requires EFLAGS to be up-to-date in the VMCB! 1156 */ 1157 DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 1151 1158 { 1152 1159 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) … … 1235 1242 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG; 1236 1243 } 1244 1245 1246 /** 1247 * Loads the guest APIC state (currently just the TPR). 1248 * 1249 * @returns VBox status code. 1250 * @param pVCpu Pointer to the VMCPU. 1251 * @param pVmcb Pointer to the VMCB. 1252 * @param pCtx Pointer to the guest-CPU context. 1253 */ 1254 DECLINLINE(int) hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1255 { 1256 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_APIC_STATE)) 1257 return VINF_SUCCESS; 1258 1259 bool fPendingIntr; 1260 uint8_t u8Tpr; 1261 int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */); 1262 AssertRCReturn(rc, rc); 1263 1264 /** Assume that we need to trap all TPR accesses and thus need not check on 1265 * every #VMEXIT if we should update the TPR. */ 1266 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking); 1267 pVCpu->hm.s.svm.fSyncVTpr = false; 1268 1269 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 1270 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive) 1271 { 1272 pCtx->msrLSTAR = u8LastTPR; 1273 1274 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 1275 if (fPendingIntr) 1276 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 1277 else 1278 { 1279 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 1280 pVCpu->hm.s.svm.fSyncVTpr = true; 1281 } 1282 1283 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 1284 } 1285 else 1286 { 1287 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 1288 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 1289 1290 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 1291 if (fPending) 1292 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1293 else 1294 { 1295 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1296 pVCpu->hm.s.svm.fSyncVTpr = true; 1297 } 1298 1299 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1300 } 1301 1302 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE; 1303 return rc; 1304 } 1305 1237 1306 1238 1307 /** … … 1353 1422 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 1354 1423 1355 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, p Ctx);1424 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx); 1356 1425 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1357 1426 1358 hmR0SvmLoadGuestSegmentRegs(pVCpu, p Ctx);1359 hmR0SvmLoadGuestMsrs(pVCpu, p Ctx);1427 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx); 1428 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx); 1360 1429 1361 1430 pVmcb->guest.u64RIP = pCtx->rip; … … 1366 1435 1367 1436 /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */ 1368 hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx); 1437 hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx); 1438 1439 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx); 1440 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1369 1441 1370 1442 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx); … … 2294 2366 #endif 2295 2367 2296 /* -XXX- todo TPR syncing. */2297 2298 2368 /* 2299 2369 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging". … … 2310 2380 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags)); 2311 2381 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 2382 2383 /* 2384 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch 2385 * so we can update it on the way back if the guest changed the TPR. 2386 */ 2387 if (pVCpu->hm.s.svm.fSyncVTpr) 2388 { 2389 if (pVM->hm.s.fTPRPatchingActive) 2390 pSvmTransient->u8GuestTpr = pCtx->msrLSTAR; 2391 else 2392 pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR; 2393 } 2312 2394 2313 2395 /* Flush the appropriate tagged-TLB entries. */ … … 2401 2483 2402 2484 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */ 2485 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 2486 2403 2487 Assert(!(ASMGetFlags() & X86_EFL_IF)); 2404 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);2405 2406 /* -XXX- TPR patching? */2407 2408 2488 ASMSetFlags(pSvmTransient->uEFlags); /* Enable interrupts. */ 2409 2489 … … 2414 2494 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */ 2415 2495 2416 /* --XXX- TPR syncing todo */ 2496 if (pVCpu->hm.s.svm.fSyncVTpr) 2497 { 2498 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 2499 if ( pVM->hm.s.fTPRPatchingActive 2500 && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr) 2501 { 2502 int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff); 2503 AssertRC(rc); 2504 } 2505 else if ((uint8_t)(pSvmTransient->u8GuestTpr >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR) 2506 { 2507 int rc = PDMApicSetTPR(pVCpu, (pVmcb->ctrl.IntCtrl.n.u8VTPR << 4)); 2508 AssertRC(rc); 2509 } 2510 } 2417 2511 2418 2512 /* -XXX- premature interruption during event injection */ 2513 2419 2514 } 2420 2515 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46517 r46530 2659 2659 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 2660 2660 { 2661 /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8reads or writes. */2661 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 2662 2662 if (fPendingIntr) 2663 2663 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE); … … 6695 6695 6696 6696 /* Clear any unused and reserved bits. */ 6697 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_CR2 6698 | HM_CHANGED_VMX_RESERVED1 6699 | HM_CHANGED_VMX_RESERVED2); 6697 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2; 6700 6698 6701 6699 AssertMsg(!pVCpu->hm.s.fContextUseFlags, -
trunk/src/VBox/VMM/include/HMInternal.h
r46444 r46530 128 128 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19) 129 129 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20) 130 # define HM_CHANGED_VMX_RESERVED1 RT_BIT(21)131 # define HM_CHANGED_VMX_RESERVED2 RT_BIT(22)132 130 /* AMD-V specific state. */ 133 # define HM_CHANGED_SVM_INTERCEPT_VECTORS RT_BIT(16) 134 # define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS RT_BIT(17) 135 # define HM_CHANGED_SVM_GUEST_ASID RT_BIT(18) 136 # define HM_CHANGED_SVM_GUEST_TPR RT_BIT(19) 137 # define HM_CHANGED_SVM_GUEST_NP RT_BIT(20) 138 # define HM_CHANGED_SVM_LBR RT_BIT(21) 139 # define HM_CHANGED_SVM_AVIC RT_BIT(22) 131 # define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(16) 132 # define HM_CHANGED_SVM_RESERVED1 RT_BIT(17) 133 # define HM_CHANGED_SVM_RESERVED2 RT_BIT(18) 134 # define HM_CHANGED_SVM_RESERVED3 RT_BIT(19) 135 # define HM_CHANGED_SVM_RESERVED4 RT_BIT(20) 140 136 141 137 # define HM_CHANGED_HOST_CONTEXT RT_BIT(23) … … 159 155 | HM_CHANGED_VMX_GUEST_APIC_STATE \ 160 156 | HM_CHANGED_VMX_ENTRY_CTLS \ 161 | HM_CHANGED_VMX_EXIT_CTLS \ 162 | HM_CHANGED_VMX_RESERVED1 \ 163 | HM_CHANGED_VMX_RESERVED2) 157 | HM_CHANGED_VMX_EXIT_CTLS) 164 158 #endif 165 159 … … 725 719 /** Virtual address of the MSR bitmap. */ 726 720 R0PTRTYPE(void *) pvMsrBitmap; 721 722 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating 723 * we should check if the VTPR changed on every VM-exit. */ 724 bool fSyncVTpr; 727 725 } svm; 728 726
Note:
See TracChangeset
for help on using the changeset viewer.