- Timestamp:
- Mar 23, 2017 2:50:07 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r65904 r66227 1435 1435 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_Amd64Efer(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) 1436 1436 { 1437 PVM pVM = pVCpu->CTX_SUFF(pVM);1438 uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER;1439 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x800000011440 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx1441 : 0;1442 uint64_t fMask = 0;1443 uint64_t fIgnoreMask = MSR_K6_EFER_LMA;1444 1437 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 1445 1446 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ 1447 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) 1448 fMask |= MSR_K6_EFER_NXE; 1449 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 1450 fMask |= MSR_K6_EFER_LME; 1451 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 1452 fMask |= MSR_K6_EFER_SCE; 1453 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 1454 fMask |= MSR_K6_EFER_FFXSR; 1455 if (pVM->cpum.s.GuestFeatures.fSvm) 1456 fMask |= MSR_K6_EFER_SVME; 1457 1458 /* #GP(0) If anything outside the allowed bits is set. */ 1459 if (uValue & ~(fIgnoreMask | fMask)) 1460 { 1461 Log(("CPUM: Settings disallowed EFER bit. uValue=%#RX64 fAllowed=%#RX64 -> #GP(0)\n", uValue, fMask)); 1438 uint64_t uValidatedEfer; 1439 uint64_t const uOldEfer = pVCpu->cpum.s.Guest.msrEFER; 1440 int rc = CPUMGetValidateEfer(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.s.Guest.cr0, uOldEfer, uValue, &uValidatedEfer); 1441 if (RT_FAILURE(rc)) 1462 1442 return VERR_CPUM_RAISE_GP_0; 1463 } 1464 1465 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if 1466 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 1467 if ( (uOldEfer & MSR_K6_EFER_LME) != (uValue & fMask & MSR_K6_EFER_LME) 1468 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG)) 1469 { 1470 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); 1471 return VERR_CPUM_RAISE_GP_0; 1472 } 1473 1474 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ 1475 AssertMsg(!(uValue & ~( MSR_K6_EFER_NXE 1476 | MSR_K6_EFER_LME 1477 | MSR_K6_EFER_LMA /* ignored anyway */ 1478 | MSR_K6_EFER_SCE 1479 | MSR_K6_EFER_FFXSR 1480 | MSR_K6_EFER_SVME)), 1481 ("Unexpected value %#RX64\n", uValue)); 1482 pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask); 1443 1444 pVCpu->cpum.s.Guest.msrEFER = uValidatedEfer; 1483 1445 1484 1446 /* AMD64 Architecture Programmer's Manual: 15.15 TLB Control; flush the TLB … … 5424 5386 * @returns Pointer to the range if found, NULL if not. 5425 5387 * @param pVM The cross context VM structure. 5426 * @param idMsr 5388 * @param idMsr The MSR to look up. 5427 5389 */ 5428 5390 # ifndef IN_RING3 … … 6141 6103 6142 6104 6105 /** 6106 * Validates an EFER MSR write. 6107 * 6108 * @returns VBox status code. 6109 * @param pVM The cross context VM structure. 6110 * @param uCr0 The CR0 of the CPU corresponding to the EFER MSR. 6111 * @param uOldEfer Value of the previous EFER MSR on the CPU if any. 6112 * @param uNewEfer The new EFER MSR value being written. 6113 * @param puValidEfer Where to store the validated EFER (only updated if 6114 * this function returns VINF_SUCCESS). 6115 */ 6116 VMMDECL(int) CPUMGetValidateEfer(PVM pVM, uint64_t uCr0, uint64_t uOldEfer, uint64_t uNewEfer, uint64_t *puValidEfer) 6117 { 6118 uint32_t const fExtFeatures = pVM->cpum.s.aGuestCpuIdPatmExt[0].uEax >= 0x80000001 6119 ? pVM->cpum.s.aGuestCpuIdPatmExt[1].uEdx 6120 : 0; 6121 uint64_t fMask = 0; 6122 uint64_t const fIgnoreMask = MSR_K6_EFER_LMA; 6123 6124 /* Filter out those bits the guest is allowed to change. (e.g. LMA is read-only) */ 6125 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_NX) 6126 fMask |= MSR_K6_EFER_NXE; 6127 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE) 6128 fMask |= MSR_K6_EFER_LME; 6129 if (fExtFeatures & X86_CPUID_EXT_FEATURE_EDX_SYSCALL) 6130 fMask |= MSR_K6_EFER_SCE; 6131 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 6132 fMask |= MSR_K6_EFER_FFXSR; 6133 if (pVM->cpum.s.GuestFeatures.fSvm) 6134 fMask |= MSR_K6_EFER_SVME; 6135 6136 /* #GP(0) If anything outside the allowed bits is set. */ 6137 if (uNewEfer & ~(fIgnoreMask | fMask)) 6138 { 6139 Log(("CPUM: Settings disallowed EFER bit. uNewEfer=%#RX64 fAllowed=%#RX64 -> #GP(0)\n", uNewEfer, fMask)); 6140 return VERR_CPUM_RAISE_GP_0; 6141 } 6142 6143 /* Check for illegal MSR_K6_EFER_LME transitions: not allowed to change LME if 6144 paging is enabled. (AMD Arch. Programmer's Manual Volume 2: Table 14-5) */ 6145 if ( (uOldEfer & MSR_K6_EFER_LME) != (uNewEfer & fMask & MSR_K6_EFER_LME) 6146 && (uCr0 & X86_CR0_PG)) 6147 { 6148 Log(("CPUM: Illegal MSR_K6_EFER_LME change: paging is enabled!!\n")); 6149 return VERR_CPUM_RAISE_GP_0; 6150 } 6151 6152 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ 6153 AssertMsg(!(uNewEfer & ~( MSR_K6_EFER_NXE 6154 | MSR_K6_EFER_LME 6155 | MSR_K6_EFER_LMA /* ignored anyway */ 6156 | MSR_K6_EFER_SCE 6157 | MSR_K6_EFER_FFXSR 6158 | MSR_K6_EFER_SVME)), 6159 ("Unexpected value %#RX64\n", uNewEfer)); 6160 6161 *puValidEfer = (uOldEfer & ~fMask) | (uNewEfer & fMask); 6162 return VINF_SUCCESS; 6163 } 6164 6143 6165 #ifdef IN_RING0 6144 6166 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r65919 r66227 2393 2393 * 2394 2394 * Note! We used to check CS.DPL here, assuming it was always equal to 2395 * CPL even if a conforming segment was loaded. But this t runed out to2395 * CPL even if a conforming segment was loaded. But this turned out to 2396 2396 * only apply to older AMD-V. With VT-x we had an ACP2 regression 2397 2397 * during install after a far call to ring 2 with VT-x. Then on newer -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r66045 r66227 25 25 #include <VBox/vmm/gim.h> 26 26 #include <VBox/vmm/hm.h> 27 #include <VBox/vmm/iem.h> 27 28 #include <VBox/vmm/vm.h> 28 29 #include <VBox/vmm/hm_svm.h> … … 179 180 * 180 181 * @returns Strict VBox status code (i.e. informational status codes too). 182 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest 183 * code execution. 184 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT 185 * (SVM_EXIT_INVALID most likely). 186 * 181 187 * @param pVCpu The cross context virtual CPU structure. 182 188 * @param pCtx Pointer to the guest-CPU context. 183 189 * @param GCPhysVmcb Guest physical address of the VMCB to run. 184 190 */ 191 /** @todo move this to IEM and make the VMRUN version that can execute under 192 * hardware SVM here instead. */ 185 193 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, RTGCPHYS GCPhysVmcb) 186 194 { 187 195 Assert(pVCpu); 188 196 Assert(pCtx); 197 PVM pVM = pVCpu->CTX_SUFF(pVM); 189 198 190 199 /* … … 194 203 195 204 /* 196 * Cache the VMCB controls.205 * Save host state. 197 206 */ 198 PVM pVM = pVCpu->CTX_SUFF(pVM);199 int rc = PGMPhysSimpleReadGCPhys(pVM, & pCtx->hwvirt.svm.VmcbCtrl, GCPhysVmcb, sizeof(pCtx->hwvirt.svm.VmcbCtrl));207 SVMVMCBSTATESAVE VmcbNstGst; 208 int rc = PGMPhysSimpleReadGCPhys(pVM, &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), sizeof(SVMVMCBSTATESAVE)); 200 209 if (RT_SUCCESS(rc)) 201 210 { 202 /*203 * Save host state.204 */205 211 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState; 206 212 pHostState->es = pCtx->es; … … 220 226 221 227 /* 222 * Validatethe VMCB controls.228 * Load the VMCB controls. 223 229 */ 224 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN)) 230 AssertCompile(sizeof(pCtx->hwvirt.svm.VmcbCtrl) < RT_OFFSETOF(SVMVMCB, guest)); 231 rc = PGMPhysSimpleReadGCPhys(pVM, &pCtx->hwvirt.svm.VmcbCtrl, GCPhysVmcb, sizeof(pCtx->hwvirt.svm.VmcbCtrl)); 232 if (RT_SUCCESS(rc)) 225 233 { 226 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n")); 227 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 234 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl; 235 236 /* 237 * Validate guest-state and controls. 238 */ 239 /* VMRUN must always be intercepted. */ 240 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN)) 241 { 242 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n")); 243 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 244 } 245 246 /* Nested paging. */ 247 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging 248 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging) 249 { 250 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n")); 251 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 252 } 253 254 /* AVIC. */ 255 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable 256 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fAvic) 257 { 258 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n")); 259 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 260 } 261 262 /* Last branch record (LBR) virtualization. */ 263 if ( (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE) 264 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fLbrVirt) 265 { 266 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n")); 267 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 268 } 269 270 /* Guest ASID. */ 271 if (!pVmcbCtrl->TLBCtrl.n.u32ASID) 272 { 273 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n")); 274 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 275 } 276 277 /* IO permission bitmap. */ 278 RTGCPHYS GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr; 279 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK) 280 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)) 281 { 282 Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap)); 283 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 284 } 285 286 /* MSR permission bitmap. */ 287 RTGCPHYS GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr; 288 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK) 289 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)) 290 { 291 Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap)); 292 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 293 } 294 295 /* CR0. */ 296 if ( !(VmcbNstGst.u64CR0 & X86_CR0_CD) 297 && (VmcbNstGst.u64CR0 & X86_CR0_NW)) 298 { 299 Log(("HMSvmVmRun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0)); 300 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 301 } 302 if (VmcbNstGst.u64CR0 >> 32) 303 { 304 Log(("HMSvmVmRun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0)); 305 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 306 } 307 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */ 308 309 /* DR6 and DR7. */ 310 if ( VmcbNstGst.u64DR6 >> 32 311 || VmcbNstGst.u64DR7 >> 32) 312 { 313 Log(("HMSvmVmRun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64DR6, 314 VmcbNstGst.u64DR6)); 315 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 316 } 317 318 /* 319 * Copy segments from nested-guest VMCB state to the guest-CPU state. 320 * 321 * We do this here as we need to use the CS attributes and it's easier this way 322 * then using the VMCB format selectors. It doesn't really matter where we copy 323 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway. 324 */ 325 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, ES, es); 326 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, CS, cs); 327 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, SS, ss); 328 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, DS, ds); 329 330 /** @todo Segment attribute overrides by VMRUN. */ 331 332 /* 333 * CPL adjustments and overrides. 334 * 335 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL(). 336 * We shall thus adjust both CS.DPL and SS.DPL here. 337 */ 338 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = VmcbNstGst.u8CPL; 339 if (CPUMIsGuestInV86ModeEx(pCtx)) 340 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3; 341 if (CPUMIsGuestInRealModeEx(pCtx)) 342 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0; 343 344 /* 345 * Continue validating guest-state and controls. 346 */ 347 /* EFER, CR0 and CR4. */ 348 uint64_t uValidEfer; 349 rc = CPUMGetValidateEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer); 350 if (RT_FAILURE(rc)) 351 { 352 Log(("HMSvmVmRun: EFER invalid uOldEfer=%#RX64 uValidEfer=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64EFER, uValidEfer)); 353 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 354 } 355 Assert( !(uValidEfer & MSR_K6_EFER_LME) 356 || VmcbNstGst.u64CR0 & X86_CR0_PG); 357 bool const fSvm = (uValidEfer & MSR_K6_EFER_SVME); 358 bool const fLongModeSupported = pVM->cpum.ro.GuestFeatures.fLongMode; 359 bool const fLongModeActiveOrEnabled = (uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA)); 360 bool const fLongModeEnabled = (uValidEfer & MSR_K6_EFER_LME); 361 bool const fPaging = (VmcbNstGst.u64CR0 & X86_CR0_PG); 362 bool const fPae = (VmcbNstGst.u64CR4 & X86_CR4_PAE); 363 bool const fProtMode = (VmcbNstGst.u64CR0 & X86_CR0_PE); 364 bool const fLongModeWithPaging = fLongModeEnabled && fPaging; 365 bool const fLongModeConformCS = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig; 366 if ( !fSvm 367 || (!fLongModeSupported && fLongModeActiveOrEnabled) 368 || (fLongModeWithPaging && !fPae) 369 || (fLongModeWithPaging && !fProtMode) 370 || ( fLongModeEnabled 371 && fPaging 372 && fPae 373 && fLongModeConformCS)) 374 { 375 Log(("HMSvmVmRun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer)); 376 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 377 } 378 379 /* 380 * Preserve the required force-flags. 381 * 382 * We only preserve the force-flags that would affect the execution of the 383 * nested-guest (or the guest). 384 * 385 * - VMCPU_FF_INHIBIT_INTERRUPTS needn't be preserved as it's for a single 386 * instruction which is this VMRUN instruction itself. 387 * 388 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the 389 * execution of a subsequent IRET instruction in the guest. 390 * 391 * - The remaining FFs (e.g. timers) can stay in place so that we will be 392 * able to generate interrupts that should cause #VMEXITs for the 393 * nested-guest. 394 */ 395 /** @todo anything missed more here? */ 396 pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS; 397 398 /* 399 * Interrupt shadow. 400 */ 401 if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 402 EMSetInhibitInterruptsPC(pVCpu, VmcbNstGst.u64RIP); 403 404 /* 405 * TLB flush control. 406 */ 407 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */ 408 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE 409 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT 410 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 411 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */); 412 413 /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */ 414 415 /* 416 * Copy the remaining guest state from the VMCB to the guest-CPU context. 417 */ 418 pCtx->gdtr.cbGdt = VmcbNstGst.GDTR.u32Limit; 419 pCtx->gdtr.pGdt = VmcbNstGst.GDTR.u64Base; 420 pCtx->idtr.cbIdt = VmcbNstGst.IDTR.u32Limit; 421 pCtx->idtr.pIdt = VmcbNstGst.IDTR.u64Base; 422 pCtx->cr0 = VmcbNstGst.u64CR0; 423 pCtx->cr4 = VmcbNstGst.u64CR4; 424 pCtx->cr3 = VmcbNstGst.u64CR3; 425 pCtx->cr2 = VmcbNstGst.u64CR2; 426 pCtx->dr[6] = VmcbNstGst.u64DR6; 427 pCtx->dr[7] = VmcbNstGst.u64DR7; 428 pCtx->rflags.u = VmcbNstGst.u64RFlags; 429 pCtx->rax = VmcbNstGst.u64RAX; 430 pCtx->rsp = VmcbNstGst.u64RSP; 431 pCtx->rip = VmcbNstGst.u64RIP; 432 433 /* Mask DR6, DR7 bits mandatory set/clear bits. */ 434 pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK); 435 pCtx->dr[6] |= X86_DR6_RA1_MASK; 436 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK); 437 pCtx->dr[7] |= X86_DR7_RA1_MASK; 438 439 /* 440 * Check for pending virtual interrupts. 441 */ 442 if (pVmcbCtrl->IntCtrl.n.u1VIrqValid) 443 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 444 445 /* 446 * Clear global interrupt flags to allow interrupts in the guest. 447 */ 448 pCtx->hwvirt.svm.fGif = 1; 449 450 /* 451 * Event injection. 452 */ 453 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject; 454 if (pEventInject->n.u1Valid) 455 { 456 uint8_t const uVector = pEventInject->n.u8Vector; 457 TRPMEVENT const enmType = HMSvmEventToTrpmEventType(pEventInject); 458 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0; 459 460 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */ 461 if (enmType == TRPM_32BIT_HACK) 462 { 463 Log(("HMSvmVmRun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type)); 464 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 465 } 466 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION) 467 { 468 if ( uVector == X86_XCPT_NMI 469 || uVector > 31 /* X86_XCPT_MAX */) 470 { 471 Log(("HMSvmVmRun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector)); 472 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 473 } 474 if ( uVector == X86_XCPT_BR 475 && CPUMIsGuestInLongModeEx(pCtx)) 476 { 477 Log(("HMSvmVmRun: Cannot inject #BR when not in long mode -> #VMEXIT\n")); 478 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 479 } 480 /** @todo any others? */ 481 } 482 483 /** @todo NRIP: Software interrupts can only be pushed properly if we support 484 * NRIP for the nested-guest to calculate the instruction length 485 * below. */ 486 IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */); 487 } 488 489 return VINF_SUCCESS; 228 490 } 229 if ( pCtx->hwvirt.svm.VmcbCtrl.NestedPaging.n.u1NestedPaging 230 && !pVM->cpum.ro.GuestFeatures.svm.feat.n.fNestedPaging) 231 { 232 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n")); 233 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 234 } 235 if (!pCtx->hwvirt.svm.VmcbCtrl.TLBCtrl.n.u32ASID) 236 { 237 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n")); 238 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 239 } 240 241 /** @todo the rest. */ 242 243 return VERR_NOT_IMPLEMENTED; 491 492 /* Shouldn't really happen as the caller should've validated the physical address already. */ 493 Log(("HMSvmVmRun: Failed to read nested-guest VMCB control area at %#RGp -> #VMEXIT\n", 494 GCPhysVmcb)); 495 return VERR_SVM_IPE_4; 244 496 } 245 497 246 return rc; 498 /* Shouldn't really happen as the caller should've validated the physical address already. */ 499 Log(("HMSvmVmRun: Failed to read nested-guest VMCB save-state area at %#RGp -> #VMEXIT\n", 500 GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest))); 501 return VERR_SVM_IPE_5; 247 502 } 248 503 249 504 250 505 /** 251 * SVM nested-guest VMEXIT handler.506 * SVM nested-guest \#VMEXIT handler. 252 507 * 253 508 * @returns Strict VBox status code. 509 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful. 510 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's 511 * "host state" and a shutdown is required. 512 * 254 513 * @param pVCpu The cross context virtual CPU structure. 255 514 * @param pCtx The guest-CPU context. … … 267 526 268 527 pCtx->hwvirt.svm.fGif = 0; 269 270 /** @todo implement \#VMEXIT. */ 271 272 return VINF_SUCCESS; 528 #ifdef VBOX_STRICT 529 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl); 530 RT_ZERO(pCtx->hwvirt.svm.HostState); 531 pCtx->hwvirt.svm.GCPhysVmcb = NIL_RTGCPHYS; 532 #endif 533 534 /* 535 * Save the nested-guest state into the VMCB state-save area. 536 */ 537 SVMVMCBSTATESAVE VmcbNstGst; 538 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, ES, es); 539 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, CS, cs); 540 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, SS, ss); 541 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, DS, ds); 542 VmcbNstGst.GDTR.u32Limit = pCtx->gdtr.cbGdt; 543 VmcbNstGst.GDTR.u64Base = pCtx->gdtr.pGdt; 544 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.cbIdt; 545 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.pIdt; 546 VmcbNstGst.u64EFER = pCtx->msrEFER; 547 VmcbNstGst.u64CR4 = pCtx->cr4; 548 VmcbNstGst.u64CR3 = pCtx->cr3; 549 VmcbNstGst.u64CR2 = pCtx->cr2; 550 VmcbNstGst.u64CR0 = pCtx->cr0; 551 /** @todo Nested paging. */ 552 VmcbNstGst.u64RFlags = pCtx->rflags.u64; 553 VmcbNstGst.u64RIP = pCtx->rip; 554 VmcbNstGst.u64RSP = pCtx->rsp; 555 VmcbNstGst.u64RAX = pCtx->rax; 556 VmcbNstGst.u64DR7 = pCtx->dr[6]; 557 VmcbNstGst.u64DR6 = pCtx->dr[7]; 558 VmcbNstGst.u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */ 559 560 /* Save interrupt shadow of the nested-guest instruction if any. */ 561 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) 562 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip) 563 { 564 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl); 565 pCtx->hwvirt.svm.VmcbCtrl.u64IntShadow |= SVM_INTERRUPT_SHADOW_ACTIVE; 566 } 567 568 /* 569 * Save additional state and intercept information. 570 */ 571 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 572 { 573 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid); 574 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VIrqVector); 575 } 576 /* Save V_TPR. */ 577 578 /** @todo NRIP. */ 579 580 /* Save exit information. */ 581 pCtx->hwvirt.svm.VmcbCtrl.u64ExitCode = uExitCode; 582 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1; 583 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2; 584 585 /* 586 * Clear event injection. 587 */ 588 pCtx->hwvirt.svm.VmcbCtrl.EventInject.n.u1Valid = 0; 589 590 /* 591 * Write back the VMCB controls to the guest VMCB in guest physical memory. 592 */ 593 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, &pCtx->hwvirt.svm.VmcbCtrl, 594 sizeof(pCtx->hwvirt.svm.VmcbCtrl)); 595 if (RT_SUCCESS(rc)) 596 { 597 /* 598 * Prepare for guest's "host mode" by clearing internal processor state bits. 599 * 600 * Some of these like TSC offset can then be used unconditionally in our TM code 601 * but the offset in the guest's VMCB will remain as it should as we've written 602 * back the VMCB controls above. 603 */ 604 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl); 605 #if 0 606 /* Clear TSC offset. */ 607 pCtx->hwvirt.svm.VmcbCtrl.u64TSCOffset = 0; 608 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid = 0; 609 #endif 610 /* Restore guest's force-flags. */ 611 if (pCtx->hwvirt.fLocalForcedActions) 612 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions); 613 614 /* Clear nested-guest's interrupt pending. */ 615 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)) 616 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 617 618 /** @todo Nested paging. */ 619 /** @todo ASID. */ 620 621 /* 622 * Reload the guest's "host state". 623 */ 624 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState; 625 pCtx->es = pHostState->es; 626 pCtx->cs = pHostState->cs; 627 pCtx->ss = pHostState->ss; 628 pCtx->ds = pHostState->ds; 629 pCtx->gdtr = pHostState->gdtr; 630 pCtx->idtr = pHostState->idtr; 631 pCtx->msrEFER = pHostState->uEferMsr; 632 pCtx->cr0 = pHostState->uCr0 | X86_CR0_PE; 633 pCtx->cr3 = pHostState->uCr3; 634 pCtx->cr4 = pHostState->uCr4; 635 pCtx->rflags = pHostState->rflags; 636 pCtx->rflags.Bits.u1VM = 0; 637 pCtx->rip = pHostState->uRip; 638 pCtx->rsp = pHostState->uRsp; 639 pCtx->rax = pHostState->uRax; 640 /* The spec says "Disables all hardware breakpoints in DR7"... */ 641 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK); 642 pCtx->dr[7] |= X86_DR7_RA1_MASK; 643 644 rc = VINF_SVM_VMEXIT; 645 } 646 else 647 { 648 Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb)); 649 rc = VERR_SVM_VMEXIT_FAILED; 650 } 651 652 return rc; 273 653 } 274 else 275 { 276 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, 277 uExitInfo1, uExitInfo2)); 278 RT_NOREF2(uExitInfo1, uExitInfo2); 279 } 280 654 655 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode, 656 uExitInfo1, uExitInfo2)); 657 RT_NOREF2(uExitInfo1, uExitInfo2); 281 658 return VERR_SVM_IPE_5; 282 659 } 283 660 661 662 /** 663 * Converts an SVM event type to a TRPM event type. 664 * 665 * @returns The TRPM event type. 666 * @retval TRPM_32BIT_HACK if the specified @a uType isn't among the set of 667 * recognized trap types. 668 * 669 * @param uType The SVM event type (see SVM_EVENT_XXX). 670 */ 671 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent) 672 { 673 uint8_t const uType = pEvent->n.u3Type; 674 switch (uType) 675 { 676 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT; 677 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT; 678 case SVM_EVENT_EXCEPTION: 679 case SVM_EVENT_NMI: return TRPM_TRAP; 680 default: 681 break; 682 } 683 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType)); 684 return TRPM_32BIT_HACK; 685 } 686 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r66159 r66227 5901 5901 5902 5902 VBOXSTRICTRC rcStrict = HMSvmVmrun(pVCpu, pCtx, GCPhysVmcb); 5903 RT_NOREF(cbInstr); 5903 /* If VMRUN execution causes a #VMEXIT, we continue executing the instruction following the VMRUN. */ 5904 if (rcStrict == VINF_SVM_VMEXIT) 5905 { 5906 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5907 rcStrict = VINF_SUCCESS; 5908 } 5904 5909 return rcStrict; 5905 5910 } … … 5962 5967 { 5963 5968 PCSVMVMCB pVmcb = (PCSVMVMCB)pvVmcb; 5964 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, FS, fs);5965 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, GS, gs);5966 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, TR, tr);5967 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcb, LDTR, ldtr);5969 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, FS, fs); 5970 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, GS, gs); 5971 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, TR, tr); 5972 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr); 5968 5973 5969 5974 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; … … 6013 6018 { 6014 6019 PSVMVMCB pVmcb = (PSVMVMCB)pvVmcb; 6015 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs);6016 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs);6017 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr);6018 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr);6020 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs); 6021 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs); 6022 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr); 6023 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr); 6019 6024 6020 6025 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r63465 r66227 435 435 else 436 436 u64 = pVCpu->tm.s.u64TSC; 437 /** @todo @bugref{7243}: SVM TSC offset. */ 437 438 return u64; 438 439 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r66104 r66227 728 728 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1; 729 729 730 /* Ignore the priority in the TPR. This is necessary for delivering PIC style (ExtInt) interrupts and we currently731 deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */730 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts 731 and we currently deliver both PIC and APIC interrupts alike. See hmR0SvmInjectPendingEvent() */ 732 732 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1; 733 733 … … 1301 1301 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 1302 1302 { 1303 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, CS, cs);1304 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, SS, ss);1305 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, DS, ds);1306 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, ES, es);1307 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, FS, fs);1308 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, GS, gs);1303 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs); 1304 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss); 1305 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds); 1306 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es); 1307 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs); 1308 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs); 1309 1309 1310 1310 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; … … 1316 1316 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 1317 1317 { 1318 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, TR, tr);1318 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr); 1319 1319 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR); 1320 1320 } … … 1323 1323 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 1324 1324 { 1325 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, pVmcb, LDTR, ldtr);1325 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr); 1326 1326 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR); 1327 1327 } … … 1951 1951 * Guest segment registers (includes FS, GS base MSRs for 64-bit guests). 1952 1952 */ 1953 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, CS, cs);1954 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, SS, ss);1955 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, DS, ds);1956 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, ES, es);1957 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, FS, fs);1958 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, GS, gs);1953 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, CS, cs); 1954 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, SS, ss); 1955 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, DS, ds); 1956 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, ES, es); 1957 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, FS, fs); 1958 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, GS, gs); 1959 1959 1960 1960 /* … … 2006 2006 * ASSUME that it's normally correct and that we're in 32-bit or 64-bit mode. 2007 2007 */ 2008 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, TR, tr);2008 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, TR, tr); 2009 2009 if (pMixedCtx->tr.Attr.n.u4Type != X86_SEL_TYPE_SYS_386_TSS_BUSY) 2010 2010 { … … 2019 2019 * Guest Descriptor-Table registers. 2020 2020 */ 2021 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, pVmcb, LDTR, ldtr);2021 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr); 2022 2022 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 2023 2023 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; … … 2453 2453 Event.u = pVCpu->hm.s.Event.u64IntInfo; 2454 2454 2455 uint8_t uVector = Event.n.u8Vector; 2456 uint8_t uVectorType = Event.n.u3Type; 2457 2458 TRPMEVENT enmTrapType; 2459 switch (uVectorType) 2460 { 2461 case SVM_EVENT_EXTERNAL_IRQ: 2462 enmTrapType = TRPM_HARDWARE_INT; 2463 break; 2464 case SVM_EVENT_SOFTWARE_INT: 2465 enmTrapType = TRPM_SOFTWARE_INT; 2466 break; 2467 case SVM_EVENT_EXCEPTION: 2468 case SVM_EVENT_NMI: 2469 enmTrapType = TRPM_TRAP; 2470 break; 2471 default: 2472 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType)); 2473 enmTrapType = TRPM_32BIT_HACK; 2474 break; 2475 } 2455 uint8_t uVector = Event.n.u8Vector; 2456 uint8_t uVectorType = Event.n.u3Type; 2457 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event); 2476 2458 2477 2459 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType)); -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r65794 r66227 1961 1961 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF) 1962 1962 #endif 1963 && !HMR3IsEventPending(pVCpu)) 1964 { 1963 /** @todo Ask CPUM about nested hw.virt interrupt pending */) 1964 { 1965 Assert(!HMR3IsEventPending(pVCpu)); 1965 1966 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); 1966 1967 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)) -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r66040 r66227 231 231 .Guest.abPadding resb 12 232 232 %endif 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 .Guest.hwvirt.svm.GCPhysVmcb resq 1 235 .Guest.hwvirt.svm.VmcbCtrl resb 256 236 .Guest.hwvirt.svm.HostState resb 184 237 .Guest.hwvirt.svm.fGif resb 1 233 .Guest.hwvirt.svm.uMsrHSavePa resq 1 234 .Guest.hwvirt.svm.GCPhysVmcb resq 1 235 .Guest.hwvirt.svm.VmcbCtrl resb 256 236 .Guest.hwvirt.svm.HostState resb 184 237 .Guest.hwvirt.svm.fGif resb 1 238 .Guest.hwvirt.svm.abPadding0 resb 7 239 .Guest.hwvirt.fLocalForcedActions resd 1 238 240 alignb 64 239 241 … … 499 501 .Hyper.abPadding resb 12 500 502 %endif 501 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 502 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 503 .Hyper.hwvirt.svm.VmcbCtrl resb 256 504 .Hyper.hwvirt.svm.HostState resb 184 505 .Hyper.hwvirt.svm.fGif resb 1 503 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 504 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 505 .Hyper.hwvirt.svm.VmcbCtrl resb 256 506 .Hyper.hwvirt.svm.HostState resb 184 507 .Hyper.hwvirt.svm.fGif resb 1 508 .Hyper.hwvirt.svm.abPadding0 resb 7 509 .Hyper.hwvirt.fLocalForcedActions resd 1 506 510 alignb 64 507 511 -
trunk/src/VBox/VMM/include/HMInternal.h
r65448 r66227 28 28 #include <VBox/vmm/pgm.h> 29 29 #include <VBox/vmm/cpum.h> 30 #include <VBox/vmm/trpm.h> 30 31 #include <iprt/memobj.h> 31 32 #include <iprt/cpuset.h> … … 1101 1102 AssertCompileMemberAlignment(HMCPU, Event, 8); 1102 1103 1104 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent); 1103 1105 1104 1106 #ifdef IN_RING0 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r66040 r66227 137 137 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState); 138 138 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif); 139 GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions); 139 140 /** @todo add rest of hwvirt fields when code is more 140 141 * finalized. */
Note:
See TracChangeset
for help on using the changeset viewer.