Changeset 46304 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 29, 2013 9:13:19 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46297 r46304 176 176 PVMCPU pVCpu = &pVM->aCpus[i]; 177 177 178 if (pVCpu->hm.s.svm.hMemObjV MCBHost != NIL_RTR0MEMOBJ)179 { 180 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjV MCBHost, false);181 pVCpu->hm.s.svm.pvV MCBHost = 0;182 pVCpu->hm.s.svm.HCPhysV MCBHost = 0;183 pVCpu->hm.s.svm.hMemObjV MCBHost = NIL_RTR0MEMOBJ;184 } 185 186 if (pVCpu->hm.s.svm.hMemObjV MCB!= NIL_RTR0MEMOBJ)187 { 188 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjV MCB, false);189 pVCpu->hm.s.svm.pvV MCB= 0;190 pVCpu->hm.s.svm.HCPhysV MCB= 0;191 pVCpu->hm.s.svm.hMemObjV MCB= NIL_RTR0MEMOBJ;178 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ) 179 { 180 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false); 181 pVCpu->hm.s.svm.pvVmcbHost = 0; 182 pVCpu->hm.s.svm.HCPhysVmcbHost = 0; 183 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ; 184 } 185 186 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ) 187 { 188 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false); 189 pVCpu->hm.s.svm.pvVmcb = 0; 190 pVCpu->hm.s.svm.HCPhysVmcb = 0; 191 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ; 192 192 } 193 193 … … 227 227 { 228 228 PVMCPU pVCpu = &pVM->aCpus[i]; 229 pVCpu->hm.s.svm.hMemObjV MCBHost = NIL_RTR0MEMOBJ;230 pVCpu->hm.s.svm.hMemObjV MCB= NIL_RTR0MEMOBJ;229 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ; 230 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ; 231 231 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ; 232 232 } … … 236 236 { 237 237 /* Allocate one page for the host context */ 238 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjV MCBHost, 1 << PAGE_SHIFT, false /* fExecutable */);238 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */); 239 239 if (RT_FAILURE(rc)) 240 240 goto failure_cleanup; 241 241 242 pVCpu->hm.s.svm.pvV MCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost);243 pVCpu->hm.s.svm.HCPhysV MCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0);244 Assert(pVCpu->hm.s.svm.HCPhysV MCBHost < _4G);245 ASMMemZeroPage(pVCpu->hm.s.svm.pvV MCBHost);242 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost); 243 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0); 244 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G); 245 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost); 246 246 247 247 /* Allocate one page for the VM control block (VMCB). */ 248 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjV MCB, 1 << PAGE_SHIFT, false /* fExecutable */);248 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */); 249 249 if (RT_FAILURE(rc)) 250 250 goto failure_cleanup; 251 251 252 pVCpu->hm.s.svm.pvV MCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB);253 pVCpu->hm.s.svm.HCPhysV MCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0);254 Assert(pVCpu->hm.s.svm.HCPhysV MCB< _4G);255 ASMMemZeroPage(pVCpu->hm.s.svm.pvV MCB);252 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb); 253 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0); 254 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G); 255 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb); 256 256 257 257 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ … … 287 287 288 288 289 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r46297 r46304 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2013 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 70 70 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \ 71 71 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \ 72 p vVMCB->guest.REG.u16Sel = pCtx->reg.Sel; \73 p vVMCB->guest.REG.u32Limit = pCtx->reg.u32Limit; \74 p vVMCB->guest.REG.u64Base = pCtx->reg.u64Base; \75 p vVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \72 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \ 73 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \ 74 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \ 75 pVmcb->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \ 76 76 } while (0) 77 77 … … 79 79 do \ 80 80 { \ 81 pCtx->reg.Sel = p vVMCB->guest.REG.u16Sel; \82 pCtx->reg.ValidSel = p vVMCB->guest.REG.u16Sel; \81 pCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \ 82 pCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \ 83 83 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \ 84 pCtx->reg.u32Limit = p vVMCB->guest.REG.u32Limit; \85 pCtx->reg.u64Base = p vVMCB->guest.REG.u64Base; \86 pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(p vVMCB->guest.REG.u16Attr); \84 pCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \ 85 pCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \ 86 pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pVmcb->guest.REG.u16Attr); \ 87 87 } while (0) 88 88 … … 233 233 PVMCPU pVCpu = &pVM->aCpus[i]; 234 234 235 pVCpu->hm.s.svm.hMemObjV MCBHost = NIL_RTR0MEMOBJ;236 pVCpu->hm.s.svm.hMemObjV MCB= NIL_RTR0MEMOBJ;235 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ; 236 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ; 237 237 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ; 238 238 239 239 /* Allocate one page for the host context */ 240 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjV MCBHost, 1 << PAGE_SHIFT, false /* fExecutable */);240 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */); 241 241 if (RT_FAILURE(rc)) 242 242 return rc; 243 243 244 pVCpu->hm.s.svm.pvV MCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost);245 pVCpu->hm.s.svm.HCPhysV MCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0);246 Assert(pVCpu->hm.s.svm.HCPhysV MCBHost < _4G);247 ASMMemZeroPage(pVCpu->hm.s.svm.pvV MCBHost);244 pVCpu->hm.s.svm.pvVmcbHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcbHost); 245 pVCpu->hm.s.svm.HCPhysVmcbHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcbHost, 0); 246 Assert(pVCpu->hm.s.svm.HCPhysVmcbHost < _4G); 247 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost); 248 248 249 249 /* Allocate one page for the VM control block (VMCB). */ 250 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjV MCB, 1 << PAGE_SHIFT, false /* fExecutable */);250 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */); 251 251 if (RT_FAILURE(rc)) 252 252 return rc; 253 253 254 pVCpu->hm.s.svm.pvV MCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB);255 pVCpu->hm.s.svm.HCPhysV MCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0);256 Assert(pVCpu->hm.s.svm.HCPhysV MCB< _4G);257 ASMMemZeroPage(pVCpu->hm.s.svm.pvV MCB);254 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb); 255 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0); 256 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G); 257 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb); 258 258 259 259 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ … … 284 284 PVMCPU pVCpu = &pVM->aCpus[i]; 285 285 286 if (pVCpu->hm.s.svm.hMemObjV MCBHost != NIL_RTR0MEMOBJ)287 { 288 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjV MCBHost, false);289 pVCpu->hm.s.svm.pvV MCBHost = 0;290 pVCpu->hm.s.svm.HCPhysV MCBHost = 0;291 pVCpu->hm.s.svm.hMemObjV MCBHost = NIL_RTR0MEMOBJ;292 } 293 294 if (pVCpu->hm.s.svm.hMemObjV MCB!= NIL_RTR0MEMOBJ)295 { 296 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjV MCB, false);297 pVCpu->hm.s.svm.pvV MCB= 0;298 pVCpu->hm.s.svm.HCPhysV MCB= 0;299 pVCpu->hm.s.svm.hMemObjV MCB= NIL_RTR0MEMOBJ;286 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ) 287 { 288 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false); 289 pVCpu->hm.s.svm.pvVmcbHost = 0; 290 pVCpu->hm.s.svm.HCPhysVmcbHost = 0; 291 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ; 292 } 293 294 if (pVCpu->hm.s.svm.hMemObjVmcb != NIL_RTR0MEMOBJ) 295 { 296 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false); 297 pVCpu->hm.s.svm.pvVmcb = 0; 298 pVCpu->hm.s.svm.HCPhysVmcb = 0; 299 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ; 300 300 } 301 301 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ) … … 334 334 { 335 335 PVMCPU pVCpu = &pVM->aCpus[i]; 336 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pvVMCB;337 338 AssertMsgReturn(p vVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);336 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb; 337 338 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 339 339 340 340 /* … … 343 343 * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical. 344 344 */ 345 p vVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);345 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4); 346 346 347 347 /* CR0/4 writes must be intercepted for obvious reasons. */ 348 p vVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);348 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4); 349 349 350 350 /* Intercept all DRx reads and writes by default. Changed later on. */ 351 p vVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;352 p vVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;351 pVmcb->ctrl.u16InterceptRdDRx = 0xFFFF; 352 pVmcb->ctrl.u16InterceptWrDRx = 0xFFFF; 353 353 354 354 /* Intercept traps; only #NM is always intercepted. */ 355 p vVMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM);355 pVmcb->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM); 356 356 #ifdef VBOX_ALWAYS_TRAP_PF 357 p vVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);357 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF); 358 358 #endif 359 359 #ifdef VBOX_STRICT 360 p vVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)360 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP) 361 361 | RT_BIT(X86_XCPT_DB) 362 362 | RT_BIT(X86_XCPT_DE) … … 370 370 371 371 /* Set up instruction and miscellaneous intercepts. */ 372 p vVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR372 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR 373 373 | SVM_CTRL1_INTERCEPT_VINTR 374 374 | SVM_CTRL1_INTERCEPT_NMI … … 385 385 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */ 386 386 ; 387 p vVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */387 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */ 388 388 | SVM_CTRL2_INTERCEPT_VMMCALL 389 389 | SVM_CTRL2_INTERCEPT_VMLOAD … … 397 397 guest (host thinks the cpu load is high) */ 398 398 399 Log(("p vVMCB->ctrl.u32InterceptException = %x\n", pvVMCB->ctrl.u32InterceptException));400 Log(("p vVMCB->ctrl.u32InterceptCtrl1 = %x\n", pvVMCB->ctrl.u32InterceptCtrl1));401 Log(("p vVMCB->ctrl.u32InterceptCtrl2 = %x\n", pvVMCB->ctrl.u32InterceptCtrl2));399 Log(("pVmcb->ctrl.u32InterceptException = %x\n", pVmcb->ctrl.u32InterceptException)); 400 Log(("pVmcb->ctrl.u32InterceptCtrl1 = %x\n", pVmcb->ctrl.u32InterceptCtrl1)); 401 Log(("pVmcb->ctrl.u32InterceptCtrl2 = %x\n", pVmcb->ctrl.u32InterceptCtrl2)); 402 402 403 403 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */ 404 p vVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;404 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1; 405 405 406 406 /* Ignore the priority in the TPR; just deliver it when we tell it to. */ 407 p vVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1;407 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1; 408 408 409 409 /* Set IO and MSR bitmap addresses. */ 410 p vVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap;411 p vVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap;410 pVmcb->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap; 411 pVmcb->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap; 412 412 413 413 /* No LBR virtualization. */ 414 p vVMCB->ctrl.u64LBRVirt = 0;414 pVmcb->ctrl.u64LBRVirt = 0; 415 415 416 416 /* The ASID must start at 1; the host uses 0. */ 417 p vVMCB->ctrl.TLBCtrl.n.u32ASID = 1;417 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1; 418 418 419 419 /* … … 422 422 * so choose type 6 for all PAT slots. 423 423 */ 424 p vVMCB->guest.u64GPAT = 0x0006060606060606ULL;424 pVmcb->guest.u64GPAT = 0x0006060606060606ULL; 425 425 426 426 /* If nested paging is not in use, additional intercepts have to be set up. */ … … 428 428 { 429 429 /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */ 430 p vVMCB->ctrl.u16InterceptRdCRx |= RT_BIT(3);431 p vVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3);430 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(3); 431 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(3); 432 432 433 433 /* … … 436 436 * - task switches (may change CR3/EFLAGS/LDT) 437 437 */ 438 p vVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG438 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG 439 439 | SVM_CTRL1_INTERCEPT_TASK_SWITCH; 440 440 441 441 /* Page faults must be intercepted to implement shadow paging. */ 442 p vVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);442 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF); 443 443 } 444 444 … … 521 521 * @param pIntInfo Pointer to the SVM interrupt info. 522 522 */ 523 DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, SVM _EVENT *pEvent)523 DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, SVMEVENT *pEvent) 524 524 { 525 525 #ifdef VBOX_STRICT … … 539 539 * 540 540 * @param pVCpu Pointer to the VMCPU. 541 * @param p vVMCBPointer to the VMCB.541 * @param pVmcb Pointer to the VMCB. 542 542 * @param pCtx Pointer to the guest CPU context. 543 543 * @param pIntInfo Pointer to the SVM interrupt info. 544 544 */ 545 DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)545 DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, PSVMVMCB pVmcb, CPUMCTX *pCtx, SVMEVENT *pEvent) 546 546 { 547 547 #ifdef VBOX_WITH_STATISTICS … … 566 566 567 567 /* Set event injection state. */ 568 p vVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];568 pVmcb->ctrl.EventInject.au64[0] = pEvent->au64[0]; 569 569 } 570 570 … … 576 576 * @param pVM Pointer to the VM. 577 577 * @param pVCpu Pointer to the VMCPU. 578 * @param p vVMCBPointer to the VMCB.578 * @param pVmcb Pointer to the VMCB. 579 579 * @param pCtx Pointer to the guest CPU Context. 580 580 */ 581 static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx)581 static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, PSVMVMCB pVmcb, CPUMCTX *pCtx) 582 582 { 583 583 int rc; … … 589 589 if (pVCpu->hm.s.Event.fPending) 590 590 { 591 SVM _EVENT Event;591 SVMEVENT Event; 592 592 593 593 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode, … … 595 595 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 596 596 Event.au64[0] = pVCpu->hm.s.Event.u64IntrInfo; 597 hmR0SvmInjectEvent(pVCpu, p vVMCB, pCtx, &Event);597 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 598 598 599 599 pVCpu->hm.s.Event.fPending = false; … … 608 608 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI)) 609 609 { 610 SVM _EVENT Event;610 SVMEVENT Event; 611 611 612 612 Log(("CPU%d: injecting #NMI\n", pVCpu->idCpu)); … … 616 616 Event.n.u3Type = SVM_EVENT_NMI; 617 617 618 hmR0SvmInjectEvent(pVCpu, p vVMCB, pCtx, &Event);618 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 619 619 return VINF_SUCCESS; 620 620 } … … 630 630 || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 631 631 { 632 if (!p vVMCB->ctrl.IntCtrl.n.u1VIrqValid)632 if (!pVmcb->ctrl.IntCtrl.n.u1VIrqValid) 633 633 { 634 634 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 642 642 /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as 643 643 * soon as guest.IF is set. */ 644 p vVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;645 p vVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;646 p vVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */644 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 645 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; 646 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */ 647 647 } 648 648 } … … 685 685 uint8_t u8Vector; 686 686 TRPMEVENT enmType; 687 SVM _EVENT Event;687 SVMEVENT Event; 688 688 RTGCUINT u32ErrorCode; 689 689 … … 730 730 731 731 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 732 hmR0SvmInjectEvent(pVCpu, p vVMCB, pCtx, &Event);732 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 733 733 } /* if (interrupts can be dispatched) */ 734 734 … … 766 766 { 767 767 RTGCUINTPTR val; 768 SVM_VMCB *pvVMCB;768 PSVMVMCB pVmcb; 769 769 770 770 if (pVM == NULL) … … 774 774 Assert(pVM->hm.s.svm.fSupported); 775 775 776 p vVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;777 AssertMsgReturn(p vVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);776 pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 777 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 778 778 779 779 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 803 803 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 804 804 { 805 p vVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;806 p vVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;805 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 806 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 807 807 } 808 808 … … 810 810 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 811 811 { 812 p vVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;813 p vVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;812 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 813 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 814 814 } 815 815 … … 817 817 * Sysenter MSRs (unconditional) 818 818 */ 819 p vVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;820 p vVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;821 p vVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;819 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 820 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 821 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 822 822 823 823 /* Control registers */ … … 840 840 if (!pVCpu->hm.s.fFPUOldStyleOverride) 841 841 { 842 p vVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);842 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF); 843 843 pVCpu->hm.s.fFPUOldStyleOverride = true; 844 844 } … … 859 859 val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */ 860 860 } 861 p vVMCB->guest.u64CR0 = val;861 pVmcb->guest.u64CR0 = val; 862 862 } 863 863 /* CR2 as well */ 864 p vVMCB->guest.u64CR2 = pCtx->cr2;864 pVmcb->guest.u64CR2 = pCtx->cr2; 865 865 866 866 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) … … 878 878 enmShwPagingMode = PGMGetHostMode(pVM); 879 879 880 p vVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);881 Assert(p vVMCB->ctrl.u64NestedPagingCR3);882 p vVMCB->guest.u64CR3 = pCtx->cr3;880 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 881 Assert(pVmcb->ctrl.u64NestedPagingCR3); 882 pVmcb->guest.u64CR3 = pCtx->cr3; 883 883 } 884 884 else 885 885 { 886 p vVMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu);887 Assert(p vVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));886 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 887 Assert(pVmcb->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 888 888 } 889 889 } … … 925 925 } 926 926 } 927 p vVMCB->guest.u64CR4 = val;927 pVmcb->guest.u64CR4 = val; 928 928 } 929 929 … … 938 938 pCtx->dr[7] |= 0x400; /* must be one */ 939 939 940 p vVMCB->guest.u64DR7 = pCtx->dr[7];941 p vVMCB->guest.u64DR6 = pCtx->dr[6];940 pVmcb->guest.u64DR7 = pCtx->dr[7]; 941 pVmcb->guest.u64DR6 = pCtx->dr[6]; 942 942 943 943 #ifdef DEBUG … … 954 954 955 955 /* Override dr6 & dr7 with the hypervisor values. */ 956 p vVMCB->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);957 p vVMCB->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);956 pVmcb->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); 957 pVmcb->guest.u64DR6 = CPUMGetHyperDR6(pVCpu); 958 958 } 959 959 else … … 967 967 968 968 /* Disable drx move intercepts. */ 969 p vVMCB->ctrl.u16InterceptRdDRx = 0;970 p vVMCB->ctrl.u16InterceptWrDRx = 0;969 pVmcb->ctrl.u16InterceptRdDRx = 0; 970 pVmcb->ctrl.u16InterceptWrDRx = 0; 971 971 972 972 /* Save the host and load the guest debug state. */ … … 977 977 978 978 /* EIP, ESP and EFLAGS */ 979 p vVMCB->guest.u64RIP = pCtx->rip;980 p vVMCB->guest.u64RSP = pCtx->rsp;981 p vVMCB->guest.u64RFlags = pCtx->eflags.u32;979 pVmcb->guest.u64RIP = pCtx->rip; 980 pVmcb->guest.u64RSP = pCtx->rsp; 981 pVmcb->guest.u64RFlags = pCtx->eflags.u32; 982 982 983 983 /* Set CPL */ 984 p vVMCB->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;984 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 985 985 986 986 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */ 987 p vVMCB->guest.u64RAX = pCtx->rax;987 pVmcb->guest.u64RAX = pCtx->rax; 988 988 989 989 /* vmrun will fail without MSR_K6_EFER_SVME. */ 990 p vVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;990 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 991 991 992 992 /* 64 bits guest mode? */ … … 1005 1005 #endif 1006 1006 /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */ 1007 p vVMCB->guest.FS.u64Base = pCtx->fs.u64Base;1008 p vVMCB->guest.GS.u64Base = pCtx->gs.u64Base;1007 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base; 1008 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base; 1009 1009 } 1010 1010 else 1011 1011 { 1012 1012 /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */ 1013 p vVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;1013 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1014 1014 1015 1015 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun; … … 1017 1017 1018 1018 /* TSC offset. */ 1019 if (TMCpuTickCanUseRealTSC(pVCpu, &p vVMCB->ctrl.u64TSCOffset))1019 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset)) 1020 1020 { 1021 1021 uint64_t u64CurTSC = ASMReadTSC(); 1022 if (u64CurTSC + p vVMCB->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu))1023 { 1024 p vVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;1025 p vVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;1022 if (u64CurTSC + pVmcb->ctrl.u64TSCOffset > TMCpuTickGetLastSeen(pVCpu)) 1023 { 1024 pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 1025 pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 1026 1026 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 1027 1027 } … … 1030 1030 /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */ 1031 1031 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 1032 p vVMCB->ctrl.u64TSCOffset, u64CurTSC + pvVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu),1033 TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - p vVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));1034 p vVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;1035 p vVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;1032 pVmcb->ctrl.u64TSCOffset, u64CurTSC + pVmcb->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), 1033 TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVmcb->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu))); 1034 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 1035 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 1036 1036 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow); 1037 1037 } … … 1039 1039 else 1040 1040 { 1041 p vVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;1042 p vVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;1041 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 1042 pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 1043 1043 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 1044 1044 } 1045 1045 1046 1046 /* Sync the various MSRs for 64-bit mode. */ 1047 p vVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */1048 p vVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */1049 p vVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */1050 p vVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */1051 p vVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */1047 pVmcb->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 1048 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */ 1049 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */ 1050 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */ 1051 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */ 1052 1052 1053 1053 #ifdef DEBUG … … 1055 1055 if ( DBGFIsStepping(pVCpu) 1056 1056 || CPUMIsHyperDebugStateActive(pVCpu)) 1057 p vVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB);1057 pVmcb->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB); 1058 1058 else 1059 p vVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB);1059 pVmcb->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB); 1060 1060 #endif 1061 1061 … … 1080 1080 AssertPtr(pVCpu); 1081 1081 1082 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;1082 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1083 1083 pCpu = HMR0GetCurrentCpu(); 1084 1084 … … 1109 1109 1110 1110 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 1111 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;1111 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 1112 1112 1113 1113 if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB)) … … 1119 1119 pVCpu->hm.s.uCurrentAsid = 1; 1120 1120 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 1121 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1121 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1122 1122 } 1123 1123 else if (pVCpu->hm.s.fForceTLBFlush) … … 1135 1135 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1136 1136 { 1137 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1137 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1138 1138 pCpu->fFlushAsidBeforeUse = true; 1139 1139 } 1140 1140 else 1141 1141 { 1142 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1142 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1143 1143 pCpu->fFlushAsidBeforeUse = false; 1144 1144 } … … 1149 1149 { 1150 1150 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1151 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1151 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1152 1152 else 1153 1153 { 1154 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1154 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1155 1155 pCpu->fFlushAsidBeforeUse = false; 1156 1156 } … … 1163 1163 { 1164 1164 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1165 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1165 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1166 1166 else 1167 p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1167 pVmcb->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1168 1168 } 1169 1169 … … 1180 1180 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1181 1181 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1182 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], p vVMCB->ctrl.TLBCtrl.n.u32ASID);1182 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVmcb->ctrl.TLBCtrl.n.u32ASID); 1183 1183 } 1184 1184 } … … 1188 1188 1189 1189 /* Update VMCB with the ASID. */ 1190 p vVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid;1190 pVmcb->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid; 1191 1191 1192 1192 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes, … … 1198 1198 1199 1199 #ifdef VBOX_WITH_STATISTICS 1200 if (p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)1200 if (pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 1201 1201 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); 1202 else if ( p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT1203 || p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)1202 else if ( pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT 1203 || pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 1204 1204 { 1205 1205 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid); … … 1228 1228 int rc2; 1229 1229 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID; 1230 SVM_VMCB *pvVMCB= NULL;1230 PSVMVMCB pVmcb = NULL; 1231 1231 bool fSyncTPR = false; 1232 1232 unsigned cResume = 0; … … 1242 1242 #endif 1243 1243 1244 p vVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;1245 AssertMsgReturn(p vVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);1244 pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1245 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 1246 1246 1247 1247 /* … … 1279 1279 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1280 1280 /* Irq inhibition is no longer active; clear the corresponding SVM state. */ 1281 p vVMCB->ctrl.u64IntShadow = 0;1281 pVmcb->ctrl.u64IntShadow = 0; 1282 1282 } 1283 1283 } … … 1285 1285 { 1286 1286 /* Irq inhibition is no longer active; clear the corresponding SVM state. */ 1287 p vVMCB->ctrl.u64IntShadow = 0;1287 pVmcb->ctrl.u64IntShadow = 0; 1288 1288 } 1289 1289 … … 1385 1385 * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!! 1386 1386 */ 1387 rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, p vVMCB, pCtx);1387 rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pVmcb, pCtx); 1388 1388 if (RT_FAILURE(rc)) 1389 1389 goto end; … … 1425 1425 { 1426 1426 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1427 p vVMCB->ctrl.IntCtrl.n.u8VTPR = (u8LastTPR >> 4);1427 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8LastTPR >> 4); 1428 1428 1429 1429 if (fPending) 1430 1430 { 1431 1431 /* A TPR change could activate a pending interrupt, so catch cr8 writes. */ 1432 p vVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);1432 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1433 1433 } 1434 1434 else … … 1438 1438 * There are enough world switches for detecting pending interrupts. 1439 1439 */ 1440 p vVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);1440 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1441 1441 } 1442 1442 } … … 1447 1447 1448 1448 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 1449 p vVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;1449 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 1450 1450 1451 1451 #ifdef LOG_ENABLED … … 1495 1495 pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB; 1496 1496 1497 Assert(sizeof(pVCpu->hm.s.svm.HCPhysV MCB) == 8);1498 Assert(p vVMCB->ctrl.IntCtrl.n.u1VIrqMasking);1499 Assert(p vVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap);1500 Assert(p vVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap);1501 Assert(p vVMCB->ctrl.u64LBRVirt == 0);1497 Assert(sizeof(pVCpu->hm.s.svm.HCPhysVmcb) == 8); 1498 Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking); 1499 Assert(pVmcb->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap); 1500 Assert(pVmcb->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap); 1501 Assert(pVmcb->ctrl.u64LBRVirt == 0); 1502 1502 1503 1503 #ifdef VBOX_STRICT … … 1512 1512 u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 1513 1513 if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1514 && !(p vVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))1514 && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1515 1515 { 1516 1516 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); … … 1522 1522 1523 1523 #ifdef VBOX_WITH_KERNEL_USING_XMM 1524 HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysV MCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu,1524 HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 1525 1525 pVCpu->hm.s.svm.pfnVMRun); 1526 1526 #else 1527 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysV MCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu);1527 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 1528 1528 #endif 1529 1529 … … 1531 1531 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 1532 1532 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 1533 if (!(p vVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))1533 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 1534 1534 { 1535 1535 /* Restore host's TSC_AUX. */ … … 1538 1538 1539 1539 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + 1540 p vVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);1540 pVmcb->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 1541 1541 } 1542 1542 … … 1556 1556 1557 1557 /* Reason for the VM exit */ 1558 exitCode = p vVMCB->ctrl.u64ExitCode;1558 exitCode = pVmcb->ctrl.u64ExitCode; 1559 1559 1560 1560 if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */ … … 1562 1562 HMDumpRegs(pVM, pVCpu, pCtx); 1563 1563 #ifdef DEBUG 1564 Log(("ctrl.u16InterceptRdCRx %x\n", p vVMCB->ctrl.u16InterceptRdCRx));1565 Log(("ctrl.u16InterceptWrCRx %x\n", p vVMCB->ctrl.u16InterceptWrCRx));1566 Log(("ctrl.u16InterceptRdDRx %x\n", p vVMCB->ctrl.u16InterceptRdDRx));1567 Log(("ctrl.u16InterceptWrDRx %x\n", p vVMCB->ctrl.u16InterceptWrDRx));1568 Log(("ctrl.u32InterceptException %x\n", p vVMCB->ctrl.u32InterceptException));1569 Log(("ctrl.u32InterceptCtrl1 %x\n", p vVMCB->ctrl.u32InterceptCtrl1));1570 Log(("ctrl.u32InterceptCtrl2 %x\n", p vVMCB->ctrl.u32InterceptCtrl2));1571 Log(("ctrl.u64IOPMPhysAddr %RX64\n", p vVMCB->ctrl.u64IOPMPhysAddr));1572 Log(("ctrl.u64MSRPMPhysAddr %RX64\n", p vVMCB->ctrl.u64MSRPMPhysAddr));1573 Log(("ctrl.u64TSCOffset %RX64\n", p vVMCB->ctrl.u64TSCOffset));1574 1575 Log(("ctrl.TLBCtrl.u32ASID %x\n", p vVMCB->ctrl.TLBCtrl.n.u32ASID));1576 Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", p vVMCB->ctrl.TLBCtrl.n.u8TLBFlush));1577 Log(("ctrl.TLBCtrl.u24Reserved %x\n", p vVMCB->ctrl.TLBCtrl.n.u24Reserved));1578 1579 Log(("ctrl.IntCtrl.u8VTPR %x\n", p vVMCB->ctrl.IntCtrl.n.u8VTPR));1580 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", p vVMCB->ctrl.IntCtrl.n.u1VIrqValid));1581 Log(("ctrl.IntCtrl.u7Reserved %x\n", p vVMCB->ctrl.IntCtrl.n.u7Reserved));1582 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", p vVMCB->ctrl.IntCtrl.n.u4VIrqPriority));1583 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", p vVMCB->ctrl.IntCtrl.n.u1IgnoreTPR));1584 Log(("ctrl.IntCtrl.u3Reserved %x\n", p vVMCB->ctrl.IntCtrl.n.u3Reserved));1585 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", p vVMCB->ctrl.IntCtrl.n.u1VIrqMasking));1586 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", p vVMCB->ctrl.IntCtrl.n.u7Reserved2));1587 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", p vVMCB->ctrl.IntCtrl.n.u8VIrqVector));1588 Log(("ctrl.IntCtrl.u24Reserved %x\n", p vVMCB->ctrl.IntCtrl.n.u24Reserved));1589 1590 Log(("ctrl.u64IntShadow %RX64\n", p vVMCB->ctrl.u64IntShadow));1591 Log(("ctrl.u64ExitCode %RX64\n", p vVMCB->ctrl.u64ExitCode));1592 Log(("ctrl.u64ExitInfo1 %RX64\n", p vVMCB->ctrl.u64ExitInfo1));1593 Log(("ctrl.u64ExitInfo2 %RX64\n", p vVMCB->ctrl.u64ExitInfo2));1594 Log(("ctrl.ExitIntInfo.u8Vector %x\n", p vVMCB->ctrl.ExitIntInfo.n.u8Vector));1595 Log(("ctrl.ExitIntInfo.u3Type %x\n", p vVMCB->ctrl.ExitIntInfo.n.u3Type));1596 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", p vVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));1597 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", p vVMCB->ctrl.ExitIntInfo.n.u19Reserved));1598 Log(("ctrl.ExitIntInfo.u1Valid %x\n", p vVMCB->ctrl.ExitIntInfo.n.u1Valid));1599 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", p vVMCB->ctrl.ExitIntInfo.n.u32ErrorCode));1600 Log(("ctrl.NestedPaging %RX64\n", p vVMCB->ctrl.NestedPaging.au64));1601 Log(("ctrl.EventInject.u8Vector %x\n", p vVMCB->ctrl.EventInject.n.u8Vector));1602 Log(("ctrl.EventInject.u3Type %x\n", p vVMCB->ctrl.EventInject.n.u3Type));1603 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", p vVMCB->ctrl.EventInject.n.u1ErrorCodeValid));1604 Log(("ctrl.EventInject.u19Reserved %x\n", p vVMCB->ctrl.EventInject.n.u19Reserved));1605 Log(("ctrl.EventInject.u1Valid %x\n", p vVMCB->ctrl.EventInject.n.u1Valid));1606 Log(("ctrl.EventInject.u32ErrorCode %x\n", p vVMCB->ctrl.EventInject.n.u32ErrorCode));1607 1608 Log(("ctrl.u64NestedPagingCR3 %RX64\n", p vVMCB->ctrl.u64NestedPagingCR3));1609 Log(("ctrl.u64LBRVirt %RX64\n", p vVMCB->ctrl.u64LBRVirt));1610 1611 Log(("guest.CS.u16Sel %04X\n", p vVMCB->guest.CS.u16Sel));1612 Log(("guest.CS.u16Attr %04X\n", p vVMCB->guest.CS.u16Attr));1613 Log(("guest.CS.u32Limit %X\n", p vVMCB->guest.CS.u32Limit));1614 Log(("guest.CS.u64Base %RX64\n", p vVMCB->guest.CS.u64Base));1615 Log(("guest.DS.u16Sel %04X\n", p vVMCB->guest.DS.u16Sel));1616 Log(("guest.DS.u16Attr %04X\n", p vVMCB->guest.DS.u16Attr));1617 Log(("guest.DS.u32Limit %X\n", p vVMCB->guest.DS.u32Limit));1618 Log(("guest.DS.u64Base %RX64\n", p vVMCB->guest.DS.u64Base));1619 Log(("guest.ES.u16Sel %04X\n", p vVMCB->guest.ES.u16Sel));1620 Log(("guest.ES.u16Attr %04X\n", p vVMCB->guest.ES.u16Attr));1621 Log(("guest.ES.u32Limit %X\n", p vVMCB->guest.ES.u32Limit));1622 Log(("guest.ES.u64Base %RX64\n", p vVMCB->guest.ES.u64Base));1623 Log(("guest.FS.u16Sel %04X\n", p vVMCB->guest.FS.u16Sel));1624 Log(("guest.FS.u16Attr %04X\n", p vVMCB->guest.FS.u16Attr));1625 Log(("guest.FS.u32Limit %X\n", p vVMCB->guest.FS.u32Limit));1626 Log(("guest.FS.u64Base %RX64\n", p vVMCB->guest.FS.u64Base));1627 Log(("guest.GS.u16Sel %04X\n", p vVMCB->guest.GS.u16Sel));1628 Log(("guest.GS.u16Attr %04X\n", p vVMCB->guest.GS.u16Attr));1629 Log(("guest.GS.u32Limit %X\n", p vVMCB->guest.GS.u32Limit));1630 Log(("guest.GS.u64Base %RX64\n", p vVMCB->guest.GS.u64Base));1631 1632 Log(("guest.GDTR.u32Limit %X\n", p vVMCB->guest.GDTR.u32Limit));1633 Log(("guest.GDTR.u64Base %RX64\n", p vVMCB->guest.GDTR.u64Base));1634 1635 Log(("guest.LDTR.u16Sel %04X\n", p vVMCB->guest.LDTR.u16Sel));1636 Log(("guest.LDTR.u16Attr %04X\n", p vVMCB->guest.LDTR.u16Attr));1637 Log(("guest.LDTR.u32Limit %X\n", p vVMCB->guest.LDTR.u32Limit));1638 Log(("guest.LDTR.u64Base %RX64\n", p vVMCB->guest.LDTR.u64Base));1639 1640 Log(("guest.IDTR.u32Limit %X\n", p vVMCB->guest.IDTR.u32Limit));1641 Log(("guest.IDTR.u64Base %RX64\n", p vVMCB->guest.IDTR.u64Base));1642 1643 Log(("guest.TR.u16Sel %04X\n", p vVMCB->guest.TR.u16Sel));1644 Log(("guest.TR.u16Attr %04X\n", p vVMCB->guest.TR.u16Attr));1645 Log(("guest.TR.u32Limit %X\n", p vVMCB->guest.TR.u32Limit));1646 Log(("guest.TR.u64Base %RX64\n", p vVMCB->guest.TR.u64Base));1647 1648 Log(("guest.u8CPL %X\n", p vVMCB->guest.u8CPL));1649 Log(("guest.u64CR0 %RX64\n", p vVMCB->guest.u64CR0));1650 Log(("guest.u64CR2 %RX64\n", p vVMCB->guest.u64CR2));1651 Log(("guest.u64CR3 %RX64\n", p vVMCB->guest.u64CR3));1652 Log(("guest.u64CR4 %RX64\n", p vVMCB->guest.u64CR4));1653 Log(("guest.u64DR6 %RX64\n", p vVMCB->guest.u64DR6));1654 Log(("guest.u64DR7 %RX64\n", p vVMCB->guest.u64DR7));1655 1656 Log(("guest.u64RIP %RX64\n", p vVMCB->guest.u64RIP));1657 Log(("guest.u64RSP %RX64\n", p vVMCB->guest.u64RSP));1658 Log(("guest.u64RAX %RX64\n", p vVMCB->guest.u64RAX));1659 Log(("guest.u64RFlags %RX64\n", p vVMCB->guest.u64RFlags));1660 1661 Log(("guest.u64SysEnterCS %RX64\n", p vVMCB->guest.u64SysEnterCS));1662 Log(("guest.u64SysEnterEIP %RX64\n", p vVMCB->guest.u64SysEnterEIP));1663 Log(("guest.u64SysEnterESP %RX64\n", p vVMCB->guest.u64SysEnterESP));1664 1665 Log(("guest.u64EFER %RX64\n", p vVMCB->guest.u64EFER));1666 Log(("guest.u64STAR %RX64\n", p vVMCB->guest.u64STAR));1667 Log(("guest.u64LSTAR %RX64\n", p vVMCB->guest.u64LSTAR));1668 Log(("guest.u64CSTAR %RX64\n", p vVMCB->guest.u64CSTAR));1669 Log(("guest.u64SFMASK %RX64\n", p vVMCB->guest.u64SFMASK));1670 Log(("guest.u64KernelGSBase %RX64\n", p vVMCB->guest.u64KernelGSBase));1671 Log(("guest.u64GPAT %RX64\n", p vVMCB->guest.u64GPAT));1672 Log(("guest.u64DBGCTL %RX64\n", p vVMCB->guest.u64DBGCTL));1673 Log(("guest.u64BR_FROM %RX64\n", p vVMCB->guest.u64BR_FROM));1674 Log(("guest.u64BR_TO %RX64\n", p vVMCB->guest.u64BR_TO));1675 Log(("guest.u64LASTEXCPFROM %RX64\n", p vVMCB->guest.u64LASTEXCPFROM));1676 Log(("guest.u64LASTEXCPTO %RX64\n", p vVMCB->guest.u64LASTEXCPTO));1564 Log(("ctrl.u16InterceptRdCRx %x\n", pVmcb->ctrl.u16InterceptRdCRx)); 1565 Log(("ctrl.u16InterceptWrCRx %x\n", pVmcb->ctrl.u16InterceptWrCRx)); 1566 Log(("ctrl.u16InterceptRdDRx %x\n", pVmcb->ctrl.u16InterceptRdDRx)); 1567 Log(("ctrl.u16InterceptWrDRx %x\n", pVmcb->ctrl.u16InterceptWrDRx)); 1568 Log(("ctrl.u32InterceptException %x\n", pVmcb->ctrl.u32InterceptException)); 1569 Log(("ctrl.u32InterceptCtrl1 %x\n", pVmcb->ctrl.u32InterceptCtrl1)); 1570 Log(("ctrl.u32InterceptCtrl2 %x\n", pVmcb->ctrl.u32InterceptCtrl2)); 1571 Log(("ctrl.u64IOPMPhysAddr %RX64\n", pVmcb->ctrl.u64IOPMPhysAddr)); 1572 Log(("ctrl.u64MSRPMPhysAddr %RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr)); 1573 Log(("ctrl.u64TSCOffset %RX64\n", pVmcb->ctrl.u64TSCOffset)); 1574 1575 Log(("ctrl.TLBCtrl.u32ASID %x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID)); 1576 Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush)); 1577 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved)); 1578 1579 Log(("ctrl.IntCtrl.u8VTPR %x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR)); 1580 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqValid)); 1581 Log(("ctrl.IntCtrl.u7Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved)); 1582 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pVmcb->ctrl.IntCtrl.n.u4VIrqPriority)); 1583 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR)); 1584 Log(("ctrl.IntCtrl.u3Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved)); 1585 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking)); 1586 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved2)); 1587 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector)); 1588 Log(("ctrl.IntCtrl.u24Reserved %x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved)); 1589 1590 Log(("ctrl.u64IntShadow %RX64\n", pVmcb->ctrl.u64IntShadow)); 1591 Log(("ctrl.u64ExitCode %RX64\n", pVmcb->ctrl.u64ExitCode)); 1592 Log(("ctrl.u64ExitInfo1 %RX64\n", pVmcb->ctrl.u64ExitInfo1)); 1593 Log(("ctrl.u64ExitInfo2 %RX64\n", pVmcb->ctrl.u64ExitInfo2)); 1594 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector)); 1595 Log(("ctrl.ExitIntInfo.u3Type %x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type)); 1596 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid)); 1597 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved)); 1598 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid)); 1599 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode)); 1600 Log(("ctrl.NestedPaging %RX64\n", pVmcb->ctrl.NestedPaging.au64)); 1601 Log(("ctrl.EventInject.u8Vector %x\n", pVmcb->ctrl.EventInject.n.u8Vector)); 1602 Log(("ctrl.EventInject.u3Type %x\n", pVmcb->ctrl.EventInject.n.u3Type)); 1603 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid)); 1604 Log(("ctrl.EventInject.u19Reserved %x\n", pVmcb->ctrl.EventInject.n.u19Reserved)); 1605 Log(("ctrl.EventInject.u1Valid %x\n", pVmcb->ctrl.EventInject.n.u1Valid)); 1606 Log(("ctrl.EventInject.u32ErrorCode %x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode)); 1607 1608 Log(("ctrl.u64NestedPagingCR3 %RX64\n", pVmcb->ctrl.u64NestedPagingCR3)); 1609 Log(("ctrl.u64LBRVirt %RX64\n", pVmcb->ctrl.u64LBRVirt)); 1610 1611 Log(("guest.CS.u16Sel %04X\n", pVmcb->guest.CS.u16Sel)); 1612 Log(("guest.CS.u16Attr %04X\n", pVmcb->guest.CS.u16Attr)); 1613 Log(("guest.CS.u32Limit %X\n", pVmcb->guest.CS.u32Limit)); 1614 Log(("guest.CS.u64Base %RX64\n", pVmcb->guest.CS.u64Base)); 1615 Log(("guest.DS.u16Sel %04X\n", pVmcb->guest.DS.u16Sel)); 1616 Log(("guest.DS.u16Attr %04X\n", pVmcb->guest.DS.u16Attr)); 1617 Log(("guest.DS.u32Limit %X\n", pVmcb->guest.DS.u32Limit)); 1618 Log(("guest.DS.u64Base %RX64\n", pVmcb->guest.DS.u64Base)); 1619 Log(("guest.ES.u16Sel %04X\n", pVmcb->guest.ES.u16Sel)); 1620 Log(("guest.ES.u16Attr %04X\n", pVmcb->guest.ES.u16Attr)); 1621 Log(("guest.ES.u32Limit %X\n", pVmcb->guest.ES.u32Limit)); 1622 Log(("guest.ES.u64Base %RX64\n", pVmcb->guest.ES.u64Base)); 1623 Log(("guest.FS.u16Sel %04X\n", pVmcb->guest.FS.u16Sel)); 1624 Log(("guest.FS.u16Attr %04X\n", pVmcb->guest.FS.u16Attr)); 1625 Log(("guest.FS.u32Limit %X\n", pVmcb->guest.FS.u32Limit)); 1626 Log(("guest.FS.u64Base %RX64\n", pVmcb->guest.FS.u64Base)); 1627 Log(("guest.GS.u16Sel %04X\n", pVmcb->guest.GS.u16Sel)); 1628 Log(("guest.GS.u16Attr %04X\n", pVmcb->guest.GS.u16Attr)); 1629 Log(("guest.GS.u32Limit %X\n", pVmcb->guest.GS.u32Limit)); 1630 Log(("guest.GS.u64Base %RX64\n", pVmcb->guest.GS.u64Base)); 1631 1632 Log(("guest.GDTR.u32Limit %X\n", pVmcb->guest.GDTR.u32Limit)); 1633 Log(("guest.GDTR.u64Base %RX64\n", pVmcb->guest.GDTR.u64Base)); 1634 1635 Log(("guest.LDTR.u16Sel %04X\n", pVmcb->guest.LDTR.u16Sel)); 1636 Log(("guest.LDTR.u16Attr %04X\n", pVmcb->guest.LDTR.u16Attr)); 1637 Log(("guest.LDTR.u32Limit %X\n", pVmcb->guest.LDTR.u32Limit)); 1638 Log(("guest.LDTR.u64Base %RX64\n", pVmcb->guest.LDTR.u64Base)); 1639 1640 Log(("guest.IDTR.u32Limit %X\n", pVmcb->guest.IDTR.u32Limit)); 1641 Log(("guest.IDTR.u64Base %RX64\n", pVmcb->guest.IDTR.u64Base)); 1642 1643 Log(("guest.TR.u16Sel %04X\n", pVmcb->guest.TR.u16Sel)); 1644 Log(("guest.TR.u16Attr %04X\n", pVmcb->guest.TR.u16Attr)); 1645 Log(("guest.TR.u32Limit %X\n", pVmcb->guest.TR.u32Limit)); 1646 Log(("guest.TR.u64Base %RX64\n", pVmcb->guest.TR.u64Base)); 1647 1648 Log(("guest.u8CPL %X\n", pVmcb->guest.u8CPL)); 1649 Log(("guest.u64CR0 %RX64\n", pVmcb->guest.u64CR0)); 1650 Log(("guest.u64CR2 %RX64\n", pVmcb->guest.u64CR2)); 1651 Log(("guest.u64CR3 %RX64\n", pVmcb->guest.u64CR3)); 1652 Log(("guest.u64CR4 %RX64\n", pVmcb->guest.u64CR4)); 1653 Log(("guest.u64DR6 %RX64\n", pVmcb->guest.u64DR6)); 1654 Log(("guest.u64DR7 %RX64\n", pVmcb->guest.u64DR7)); 1655 1656 Log(("guest.u64RIP %RX64\n", pVmcb->guest.u64RIP)); 1657 Log(("guest.u64RSP %RX64\n", pVmcb->guest.u64RSP)); 1658 Log(("guest.u64RAX %RX64\n", pVmcb->guest.u64RAX)); 1659 Log(("guest.u64RFlags %RX64\n", pVmcb->guest.u64RFlags)); 1660 1661 Log(("guest.u64SysEnterCS %RX64\n", pVmcb->guest.u64SysEnterCS)); 1662 Log(("guest.u64SysEnterEIP %RX64\n", pVmcb->guest.u64SysEnterEIP)); 1663 Log(("guest.u64SysEnterESP %RX64\n", pVmcb->guest.u64SysEnterESP)); 1664 1665 Log(("guest.u64EFER %RX64\n", pVmcb->guest.u64EFER)); 1666 Log(("guest.u64STAR %RX64\n", pVmcb->guest.u64STAR)); 1667 Log(("guest.u64LSTAR %RX64\n", pVmcb->guest.u64LSTAR)); 1668 Log(("guest.u64CSTAR %RX64\n", pVmcb->guest.u64CSTAR)); 1669 Log(("guest.u64SFMASK %RX64\n", pVmcb->guest.u64SFMASK)); 1670 Log(("guest.u64KernelGSBase %RX64\n", pVmcb->guest.u64KernelGSBase)); 1671 Log(("guest.u64GPAT %RX64\n", pVmcb->guest.u64GPAT)); 1672 Log(("guest.u64DBGCTL %RX64\n", pVmcb->guest.u64DBGCTL)); 1673 Log(("guest.u64BR_FROM %RX64\n", pVmcb->guest.u64BR_FROM)); 1674 Log(("guest.u64BR_TO %RX64\n", pVmcb->guest.u64BR_TO)); 1675 Log(("guest.u64LASTEXCPFROM %RX64\n", pVmcb->guest.u64LASTEXCPFROM)); 1676 Log(("guest.u64LASTEXCPTO %RX64\n", pVmcb->guest.u64LASTEXCPTO)); 1677 1677 #endif 1678 1678 rc = VERR_SVM_UNABLE_TO_START_VM; … … 1682 1682 1683 1683 /* Let's first sync back EIP, ESP, and EFLAGS. */ 1684 pCtx->rip = p vVMCB->guest.u64RIP;1685 pCtx->rsp = p vVMCB->guest.u64RSP;1686 pCtx->eflags.u32 = p vVMCB->guest.u64RFlags;1684 pCtx->rip = pVmcb->guest.u64RIP; 1685 pCtx->rsp = pVmcb->guest.u64RSP; 1686 pCtx->eflags.u32 = pVmcb->guest.u64RFlags; 1687 1687 /* eax is saved/restore across the vmrun instruction */ 1688 pCtx->rax = p vVMCB->guest.u64RAX;1688 pCtx->rax = pVmcb->guest.u64RAX; 1689 1689 1690 1690 /* … … 1692 1692 * FS & GS base are saved with SVM_READ_SELREG. 1693 1693 */ 1694 pCtx->msrSTAR = p vVMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */1695 pCtx->msrLSTAR = p vVMCB->guest.u64LSTAR; /* 64-bit mode syscall rip */1696 pCtx->msrCSTAR = p vVMCB->guest.u64CSTAR; /* compatibility mode syscall rip */1697 pCtx->msrSFMASK = p vVMCB->guest.u64SFMASK; /* syscall flag mask */1698 pCtx->msrKERNELGSBASE = p vVMCB->guest.u64KernelGSBase; /* swapgs exchange value */1699 pCtx->SysEnter.cs = p vVMCB->guest.u64SysEnterCS;1700 pCtx->SysEnter.eip = p vVMCB->guest.u64SysEnterEIP;1701 pCtx->SysEnter.esp = p vVMCB->guest.u64SysEnterESP;1694 pCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */ 1695 pCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */ 1696 pCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */ 1697 pCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */ 1698 pCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */ 1699 pCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS; 1700 pCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP; 1701 pCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 1702 1702 1703 1703 /* Can be updated behind our back in the nested paging case. */ 1704 pCtx->cr2 = p vVMCB->guest.u64CR2;1704 pCtx->cr2 = pVmcb->guest.u64CR2; 1705 1705 1706 1706 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 1743 1743 * SS (chapter AMD spec. 15.5.1 Basic operation). 1744 1744 */ 1745 Assert(!(p vVMCB->guest.u8CPL & ~0x3));1746 pCtx->ss.Attr.n.u2Dpl = p vVMCB->guest.u8CPL & 0x3;1745 Assert(!(pVmcb->guest.u8CPL & ~0x3)); 1746 pCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3; 1747 1747 1748 1748 /* … … 1753 1753 SVM_READ_SELREG(TR, tr); 1754 1754 1755 pCtx->gdtr.cbGdt = p vVMCB->guest.GDTR.u32Limit;1756 pCtx->gdtr.pGdt = p vVMCB->guest.GDTR.u64Base;1757 1758 pCtx->idtr.cbIdt = p vVMCB->guest.IDTR.u32Limit;1759 pCtx->idtr.pIdt = p vVMCB->guest.IDTR.u64Base;1755 pCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 1756 pCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; 1757 1758 pCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit; 1759 pCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base; 1760 1760 1761 1761 /* … … 1764 1764 */ 1765 1765 if ( pVM->hm.s.fNestedPaging 1766 && pCtx->cr3 != p vVMCB->guest.u64CR3)1767 { 1768 CPUMSetGuestCR3(pVCpu, p vVMCB->guest.u64CR3);1769 PGMUpdateCR3(pVCpu, p vVMCB->guest.u64CR3);1766 && pCtx->cr3 != pVmcb->guest.u64CR3) 1767 { 1768 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3); 1769 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3); 1770 1770 } 1771 1771 … … 1774 1774 1775 1775 /* Take care of instruction fusing (sti, mov ss) (see AMD spec. 15.20.5 Interrupt Shadows) */ 1776 if (p vVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)1777 { 1778 Log(("uInterruptState %x rip=%RGv\n", p vVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip));1776 if (pVmcb->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1777 { 1778 Log(("uInterruptState %x rip=%RGv\n", pVmcb->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip)); 1779 1779 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 1780 1780 } … … 1785 1785 1786 1786 /* Sync back DR6 as it could have been changed by hitting breakpoints. */ 1787 pCtx->dr[6] = p vVMCB->guest.u64DR6;1787 pCtx->dr[6] = pVmcb->guest.u64DR6; 1788 1788 /* DR7.GD can be cleared by debug exceptions, so sync it back as well. */ 1789 pCtx->dr[7] = p vVMCB->guest.u64DR7;1789 pCtx->dr[7] = pVmcb->guest.u64DR7; 1790 1790 1791 1791 /* Check if an injected event was interrupted prematurely. */ 1792 pVCpu->hm.s.Event.u64IntrInfo = p vVMCB->ctrl.ExitIntInfo.au64[0];1793 if ( p vVMCB->ctrl.ExitIntInfo.n.u1Valid1792 pVCpu->hm.s.Event.u64IntrInfo = pVmcb->ctrl.ExitIntInfo.au64[0]; 1793 if ( pVmcb->ctrl.ExitIntInfo.n.u1Valid 1794 1794 /* we don't care about 'int xx' as the instruction will be restarted. */ 1795 && p vVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)1795 && pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 1796 1796 { 1797 1797 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.u64IntrInfo, (RTGCPTR)pCtx->rip, exitCode)); 1798 1798 1799 1799 #ifdef LOG_ENABLED 1800 SVM _EVENT Event;1800 SVMEVENT Event; 1801 1801 Event.au64[0] = pVCpu->hm.s.Event.u64IntrInfo; 1802 1802 … … 1810 1810 pVCpu->hm.s.Event.fPending = true; 1811 1811 /* Error code present? (redundant) */ 1812 if (p vVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)1813 pVCpu->hm.s.Event.u32ErrCode = p vVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;1812 if (pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid) 1813 pVCpu->hm.s.Event.u32ErrCode = pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode; 1814 1814 else 1815 1815 pVCpu->hm.s.Event.u32ErrCode = 0; … … 1836 1836 else 1837 1837 { 1838 if ((uint8_t)(u8LastTPR >> 4) != p vVMCB->ctrl.IntCtrl.n.u8VTPR)1838 if ((uint8_t)(u8LastTPR >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR) 1839 1839 { 1840 1840 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1841 rc2 = PDMApicSetTPR(pVCpu, p vVMCB->ctrl.IntCtrl.n.u8VTPR << 4);1841 rc2 = PDMApicSetTPR(pVCpu, pVmcb->ctrl.IntCtrl.n.u8VTPR << 4); 1842 1842 AssertRC(rc2); 1843 1843 } … … 1848 1848 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "vmexit %08x at %04:%08RX64 %RX64 %RX64 %RX64", 1849 1849 exitCode, pCtx->cs.Sel, pCtx->rip, 1850 p vVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2, pvVMCB->ctrl.ExitIntInfo.au64[0]);1850 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2, pVmcb->ctrl.ExitIntInfo.au64[0]); 1851 1851 #endif 1852 1852 #if ARCH_BITS == 64 /* for the time being */ 1853 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, p vVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2,1854 p vVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);1853 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2, 1854 pVmcb->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1855 1855 #endif 1856 1856 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); … … 1869 1869 { 1870 1870 /* Pending trap. */ 1871 SVM _EVENT Event;1871 SVMEVENT Event; 1872 1872 uint32_t vector = exitCode - SVM_EXIT_EXCEPTION_0; 1873 1873 … … 1933 1933 case X86_XCPT_PF: /* Page fault */ 1934 1934 { 1935 uint32_t errCode = p vVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */1936 RTGCUINTPTR uFaultAddress = p vVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */1935 uint32_t errCode = pVmcb->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 1936 RTGCUINTPTR uFaultAddress = pVmcb->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */ 1937 1937 1938 1938 #ifdef VBOX_ALWAYS_TRAP_PF … … 2081 2081 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2082 2082 Event.n.u1ErrorCodeValid = 1; 2083 Event.n.u32ErrorCode = p vVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2083 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2084 2084 break; 2085 2085 case X86_XCPT_BP: … … 2095 2095 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2096 2096 Event.n.u1ErrorCodeValid = 1; 2097 Event.n.u32ErrorCode = p vVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2097 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2098 2098 break; 2099 2099 case X86_XCPT_NP: 2100 2100 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2101 2101 Event.n.u1ErrorCodeValid = 1; 2102 Event.n.u32ErrorCode = p vVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2102 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2103 2103 break; 2104 2104 } … … 2120 2120 { 2121 2121 /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */ 2122 uint32_t errCode = p vVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2123 RTGCPHYS GCPhysFault = p vVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */2122 uint32_t errCode = pVmcb->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2123 RTGCPHYS GCPhysFault = pVmcb->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */ 2124 2124 PGMMODE enmShwPagingMode; 2125 2125 … … 2220 2220 /* A virtual interrupt is about to be delivered, which means IF=1. */ 2221 2221 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF)); 2222 p vVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;2223 p vVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;2222 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; 2223 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; 2224 2224 goto ResumeExecution; 2225 2225 … … 2389 2389 2390 2390 /* Disable drx move intercepts. */ 2391 p vVMCB->ctrl.u16InterceptRdDRx = 0;2392 p vVMCB->ctrl.u16InterceptWrDRx = 0;2391 pVmcb->ctrl.u16InterceptRdDRx = 0; 2392 pVmcb->ctrl.u16InterceptWrDRx = 0; 2393 2393 2394 2394 /* Save the host and load the guest debug state. */ … … 2424 2424 2425 2425 /* Disable DRx move intercepts. */ 2426 p vVMCB->ctrl.u16InterceptRdDRx = 0;2427 p vVMCB->ctrl.u16InterceptWrDRx = 0;2426 pVmcb->ctrl.u16InterceptRdDRx = 0; 2427 pVmcb->ctrl.u16InterceptWrDRx = 0; 2428 2428 2429 2429 /* Save the host and load the guest debug state. */ … … 2447 2447 case SVM_EXIT_IOIO: /* I/O instruction. */ 2448 2448 { 2449 SVM _IOIO_EXIT IoExitInfo;2450 2451 IoExitInfo.au32[0] = p vVMCB->ctrl.u64ExitInfo1;2449 SVMIOIOEXIT IoExitInfo; 2450 2451 IoExitInfo.au32[0] = pVmcb->ctrl.u64ExitInfo1; 2452 2452 unsigned uIdx = (IoExitInfo.au32[0] >> 4) & 0x7; 2453 2453 uint32_t uIOSize = g_aIOSize[uIdx]; … … 2500 2500 if (rc == VINF_IOM_R3_IOPORT_WRITE) 2501 2501 { 2502 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, p vVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2502 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2503 2503 uAndVal, uIOSize); 2504 2504 } … … 2519 2519 else if (rc == VINF_IOM_R3_IOPORT_READ) 2520 2520 { 2521 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, p vVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2521 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2522 2522 uAndVal, uIOSize); 2523 2523 } … … 2532 2532 { 2533 2533 /* Update EIP and continue execution. */ 2534 pCtx->rip = p vVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */2534 pCtx->rip = pVmcb->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */ 2535 2535 if (RT_LIKELY(rc == VINF_SUCCESS)) 2536 2536 { … … 2550 2550 && (pCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO)) 2551 2551 { 2552 SVM _EVENT Event;2552 SVMEVENT Event; 2553 2553 2554 2554 Assert(CPUMIsGuestDebugStateActive(pVCpu)); … … 2563 2563 * by software after the contents have been read. 2564 2564 */ 2565 p vVMCB->guest.u64DR6 = pCtx->dr[6];2565 pVmcb->guest.u64DR6 = pCtx->dr[6]; 2566 2566 2567 2567 /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */ … … 2573 2573 pCtx->dr[7] |= 0x400; /* must be one */ 2574 2574 2575 p vVMCB->guest.u64DR7 = pCtx->dr[7];2575 pVmcb->guest.u64DR7 = pCtx->dr[7]; 2576 2576 2577 2577 /* Inject the exception. */ … … 2676 2676 { 2677 2677 /* Unsupported instructions. */ 2678 SVM _EVENT Event;2678 SVMEVENT Event; 2679 2679 2680 2680 Event.au64[0] = 0; … … 2694 2694 if ( pVM->hm.s.fTPRPatchingActive 2695 2695 && pCtx->ecx == MSR_K8_LSTAR 2696 && p vVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)2696 && pVmcb->ctrl.u64ExitInfo1 == 1 /* wrmsr */) 2697 2697 { 2698 2698 if ((pCtx->eax & 0xff) != u8LastTPR) … … 2716 2716 * so we play safe by completely disassembling the instruction. 2717 2717 */ 2718 STAM_COUNTER_INC((p vVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);2719 Log(("SVM: %s\n", (p vVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));2718 STAM_COUNTER_INC((pVmcb->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 2719 Log(("SVM: %s\n", (pVmcb->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); 2720 2720 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2721 2721 if (rc == VINF_SUCCESS) … … 2725 2725 goto ResumeExecution; 2726 2726 } 2727 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (p vVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr",2727 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pVmcb->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr", 2728 2728 VBOXSTRICTRC_VAL(rc))); 2729 2729 break; … … 2731 2731 2732 2732 case SVM_EXIT_TASK_SWITCH: /* too complicated to emulate, so fall back to the recompiler */ 2733 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", p vVMCB->ctrl.u64ExitInfo2));2734 if ( !(p vVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))2733 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVmcb->ctrl.u64ExitInfo2)); 2734 if ( !(pVmcb->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) 2735 2735 && pVCpu->hm.s.Event.fPending) 2736 2736 { 2737 SVM _EVENT Event;2737 SVMEVENT Event; 2738 2738 Event.au64[0] = pVCpu->hm.s.Event.u64IntrInfo; 2739 2739 … … 2935 2935 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2936 2936 { 2937 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;2937 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2938 2938 2939 2939 Assert(pVM->hm.s.svm.fSupported); … … 2952 2952 2953 2953 /* Intercept all DRx reads and writes again. Changed later on. */ 2954 p vVMCB->ctrl.u16InterceptRdDRx = 0xFFFF;2955 p vVMCB->ctrl.u16InterceptWrDRx = 0xFFFF;2954 pVmcb->ctrl.u16InterceptRdDRx = 0xFFFF; 2955 pVmcb->ctrl.u16InterceptWrDRx = 0xFFFF; 2956 2956 2957 2957 /* Resync the debug registers the next time. */ … … 2959 2959 } 2960 2960 else 2961 Assert(p vVMCB->ctrl.u16InterceptRdDRx == 0xFFFF && pvVMCB->ctrl.u16InterceptWrDRx == 0xFFFF);2961 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xFFFF && pVmcb->ctrl.u16InterceptWrDRx == 0xFFFF); 2962 2962 2963 2963 return VINF_SUCCESS; … … 3056 3056 if (!fFlushPending) 3057 3057 { 3058 SVM_VMCB *pvVMCB;3058 PSVMVMCB pVmcb; 3059 3059 3060 3060 Log2(("SVMR0InvalidatePage %RGv\n", GCVirt)); … … 3062 3062 Assert(pVM->hm.s.svm.fSupported); 3063 3063 3064 p vVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB;3065 AssertMsgReturn(p vVMCB, ("Invalid pvVMCB\n"), VERR_SVM_INVALID_PVMCB);3064 pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3065 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 3066 3066 3067 3067 #if HC_ARCH_BITS == 32 … … 3072 3072 #endif 3073 3073 { 3074 SVMR0InvlpgA(GCVirt, p vVMCB->ctrl.TLBCtrl.n.u32ASID);3074 SVMR0InvlpgA(GCVirt, pVmcb->ctrl.TLBCtrl.n.u32ASID); 3075 3075 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt); 3076 3076 } … … 3105 3105 * 3106 3106 * @returns VBox status code. 3107 * @param HCPhysV MCBHost Physical address of host VMCB.3108 * @param HCPhysV MCBPhysical address of the VMCB.3107 * @param HCPhysVmcbHost Physical address of host VMCB. 3108 * @param HCPhysVmcb Physical address of the VMCB. 3109 3109 * @param pCtx Pointer to the guest CPU context. 3110 3110 * @param pVM Pointer to the VM. 3111 3111 * @param pVCpu Pointer to the VMCPU. 3112 3112 */ 3113 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysV MCBHost, RTHCPHYS HCPhysVMCB, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)3113 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVmcbHost, RTHCPHYS HCPhysVmcb, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu) 3114 3114 { 3115 3115 uint32_t aParam[4]; 3116 3116 3117 aParam[0] = (uint32_t)(HCPhysV MCBHost); /* Param 1: HCPhysVMCBHost - Lo. */3118 aParam[1] = (uint32_t)(HCPhysV MCBHost >> 32); /* Param 1: HCPhysVMCBHost - Hi. */3119 aParam[2] = (uint32_t)(HCPhysV MCB); /* Param 2: HCPhysVMCB- Lo. */3120 aParam[3] = (uint32_t)(HCPhysV MCB >> 32); /* Param 2: HCPhysVMCB- Hi. */3117 aParam[0] = (uint32_t)(HCPhysVmcbHost); /* Param 1: HCPhysVmcbHost - Lo. */ 3118 aParam[1] = (uint32_t)(HCPhysVmcbHost >> 32); /* Param 1: HCPhysVmcbHost - Hi. */ 3119 aParam[2] = (uint32_t)(HCPhysVmcb); /* Param 2: HCPhysVmcb - Lo. */ 3120 aParam[3] = (uint32_t)(HCPhysVmcb >> 32); /* Param 2: HCPhysVmcb - Hi. */ 3121 3121 3122 3122 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, 4, &aParam[0]); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r46297 r46304 331 331 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding)); 332 332 333 /* Some structure checks. */334 AssertCompileMemberOffset(SVM_VMCB, ctrl.EventInject, 0xA8);335 AssertCompileMemberOffset(SVM_VMCB, ctrl.ExitIntInfo, 0x88);336 AssertCompileMemberOffset(SVM_VMCB, ctrl.TLBCtrl, 0x58);337 338 AssertCompileMemberOffset(SVM_VMCB, guest, 0x400);339 AssertCompileMemberOffset(SVM_VMCB, guest.TR, 0x490);340 AssertCompileMemberOffset(SVM_VMCB, guest.u8CPL, 0x4CB);341 AssertCompileMemberOffset(SVM_VMCB, guest.u64EFER, 0x4D0);342 AssertCompileMemberOffset(SVM_VMCB, guest.u64CR4, 0x548);343 AssertCompileMemberOffset(SVM_VMCB, guest.u64RIP, 0x578);344 AssertCompileMemberOffset(SVM_VMCB, guest.u64RSP, 0x5D8);345 AssertCompileMemberOffset(SVM_VMCB, guest.u64CR2, 0x640);346 AssertCompileMemberOffset(SVM_VMCB, guest.u64GPAT, 0x668);347 AssertCompileMemberOffset(SVM_VMCB, guest.u64LASTEXCPTO,0x690);348 AssertCompileSize(SVM_VMCB, 0x1000);349 350 333 /* 351 334 * Register the saved state data unit. -
trunk/src/VBox/VMM/include/HMInternal.h
r46286 r46304 532 532 533 533 /** SVM VMRun function. */ 534 typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pV MCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);534 typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 535 535 /** Pointer to a SVM VMRun function. */ 536 536 typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN; … … 694 694 { 695 695 /** R0 memory object for the host VM control block (VMCB). */ 696 RTR0MEMOBJ hMemObjV MCBHost;696 RTR0MEMOBJ hMemObjVmcbHost; 697 697 /** Physical address of the host VM control block (VMCB). */ 698 RTHCPHYS HCPhysV MCBHost;698 RTHCPHYS HCPhysVmcbHost; 699 699 /** Virtual address of the host VM control block (VMCB). */ 700 R0PTRTYPE(void *) pvV MCBHost;700 R0PTRTYPE(void *) pvVmcbHost; 701 701 702 702 /** R0 memory object for the VM control block (VMCB). */ 703 RTR0MEMOBJ hMemObjV MCB;703 RTR0MEMOBJ hMemObjVmcb; 704 704 /** Physical address of the VM control block (VMCB). */ 705 RTHCPHYS HCPhysV MCB;705 RTHCPHYS HCPhysVmcb; 706 706 /** Virtual address of the VM control block (VMCB). */ 707 R0PTRTYPE(void *) pvV MCB;707 R0PTRTYPE(void *) pvVmcb; 708 708 709 709 /** Ring 0 handlers for VT-x. */ … … 934 934 # ifdef VBOX_WITH_KERNEL_USING_XMM 935 935 DECLASM(int) HMR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 936 DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pV MCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun);936 DECLASM(int) HMR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 937 937 # endif 938 938
Note:
See TracChangeset
for help on using the changeset viewer.