Changeset 43455 in vbox for trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
- Timestamp:
- Sep 27, 2012 2:00:03 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r43387 r43455 58 58 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite); 59 59 60 /******************************************************************************* 61 * Defined Constants And Macros * 62 *******************************************************************************/ 63 /** Convert hidden selector attribute word between VMX and SVM formats. */ 64 #define SVM_HIDSEGATTR_VMX2SVM(a) (a & 0xFF) | ((a & 0xF000) >> 4) 65 #define SVM_HIDSEGATTR_SVM2VMX(a) (a & 0xFF) | ((a & 0x0F00) << 4) 66 67 #define SVM_WRITE_SELREG(REG, reg) \ 68 do \ 69 { \ 70 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \ 71 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \ 72 pvVMCB->guest.REG.u16Sel = pCtx->reg.Sel; \ 73 pvVMCB->guest.REG.u32Limit = pCtx->reg.u32Limit; \ 74 pvVMCB->guest.REG.u64Base = pCtx->reg.u64Base; \ 75 pvVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg.Attr.u); \ 76 } while (0) 77 78 #define SVM_READ_SELREG(REG, reg) \ 79 do \ 80 { \ 81 pCtx->reg.Sel = pvVMCB->guest.REG.u16Sel; \ 82 pCtx->reg.ValidSel = pvVMCB->guest.REG.u16Sel; \ 83 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \ 84 pCtx->reg.u32Limit = pvVMCB->guest.REG.u32Limit; \ 85 pCtx->reg.u64Base = pvVMCB->guest.REG.u64Base; \ 86 pCtx->reg.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pvVMCB->guest.REG.u16Attr); \ 87 } while (0) 60 88 61 89 /******************************************************************************* … … 159 187 int rc; 160 188 161 pVM->hm.s.svm. pMemObjIOBitmap = NIL_RTR0MEMOBJ;189 pVM->hm.s.svm.hMemObjIOBitmap = NIL_RTR0MEMOBJ; 162 190 163 191 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */ 164 rc = RTR0MemObjAllocCont(&pVM->hm.s.svm. pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping*/);192 rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */); 165 193 if (RT_FAILURE(rc)) 166 194 return rc; 167 195 168 pVM->hm.s.svm.p IOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.pMemObjIOBitmap);169 pVM->hm.s.svm. pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.pMemObjIOBitmap, 0);196 pVM->hm.s.svm.pvIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.hMemObjIOBitmap); 197 pVM->hm.s.svm.HCPhysIOBitmap = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.hMemObjIOBitmap, 0); 170 198 /* Set all bits to intercept all IO accesses. */ 171 ASMMemFill32(pVM->hm.s.svm.p IOBitmap, 3 << PAGE_SHIFT, 0xffffffff);199 ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, 0xffffffff); 172 200 173 201 /* … … 207 235 PVMCPU pVCpu = &pVM->aCpus[i]; 208 236 209 pVCpu->hm.s.svm. pMemObjVMCBHost = NIL_RTR0MEMOBJ;210 pVCpu->hm.s.svm. pMemObjVMCB = NIL_RTR0MEMOBJ;211 pVCpu->hm.s.svm. pMemObjMSRBitmap = NIL_RTR0MEMOBJ;237 pVCpu->hm.s.svm.hMemObjVMCBHost = NIL_RTR0MEMOBJ; 238 pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ; 239 pVCpu->hm.s.svm.hMemObjMSRBitmap = NIL_RTR0MEMOBJ; 212 240 213 241 /* Allocate one page for the host context */ 214 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm. pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping*/);242 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCBHost, 1 << PAGE_SHIFT, false /* fExecutable */); 215 243 if (RT_FAILURE(rc)) 216 244 return rc; 217 245 218 pVCpu->hm.s.svm.p VMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCBHost);219 pVCpu->hm.s.svm. pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCBHost, 0);220 Assert(pVCpu->hm.s.svm. pVMCBHostPhys< _4G);221 ASMMemZeroPage(pVCpu->hm.s.svm.p VMCBHost);246 pVCpu->hm.s.svm.pvVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCBHost); 247 pVCpu->hm.s.svm.HCPhysVMCBHost = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCBHost, 0); 248 Assert(pVCpu->hm.s.svm.HCPhysVMCBHost < _4G); 249 ASMMemZeroPage(pVCpu->hm.s.svm.pvVMCBHost); 222 250 223 251 /* Allocate one page for the VM control block (VMCB). */ 224 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm. pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping*/);252 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVMCB, 1 << PAGE_SHIFT, false /* fExecutable */); 225 253 if (RT_FAILURE(rc)) 226 254 return rc; 227 255 228 pVCpu->hm.s.svm.p VMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCB);229 pVCpu->hm.s.svm. pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCB, 0);230 Assert(pVCpu->hm.s.svm. pVMCBPhys< _4G);231 ASMMemZeroPage(pVCpu->hm.s.svm.p VMCB);256 pVCpu->hm.s.svm.pvVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVMCB); 257 pVCpu->hm.s.svm.HCPhysVMCB = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVMCB, 0); 258 Assert(pVCpu->hm.s.svm.HCPhysVMCB < _4G); 259 ASMMemZeroPage(pVCpu->hm.s.svm.pvVMCB); 232 260 233 261 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ 234 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm. pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping*/);262 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* fExecutable */); 235 263 if (RT_FAILURE(rc)) 236 264 return rc; 237 265 238 pVCpu->hm.s.svm.p MSRBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjMSRBitmap);239 pVCpu->hm.s.svm. pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjMSRBitmap, 0);266 pVCpu->hm.s.svm.pvMSRBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMSRBitmap); 267 pVCpu->hm.s.svm.HCPhysMSRBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMSRBitmap, 0); 240 268 /* Set all bits to intercept all MSR accesses. */ 241 ASMMemFill32(pVCpu->hm.s.svm.p MSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);269 ASMMemFill32(pVCpu->hm.s.svm.pvMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff); 242 270 } 243 271 … … 258 286 PVMCPU pVCpu = &pVM->aCpus[i]; 259 287 260 if (pVCpu->hm.s.svm. pMemObjVMCBHost != NIL_RTR0MEMOBJ)261 { 262 RTR0MemObjFree(pVCpu->hm.s.svm. pMemObjVMCBHost, false);263 pVCpu->hm.s.svm.p VMCBHost= 0;264 pVCpu->hm.s.svm. pVMCBHostPhys= 0;265 pVCpu->hm.s.svm. pMemObjVMCBHost = NIL_RTR0MEMOBJ;266 } 267 268 if (pVCpu->hm.s.svm. pMemObjVMCB != NIL_RTR0MEMOBJ)269 { 270 RTR0MemObjFree(pVCpu->hm.s.svm. pMemObjVMCB, false);271 pVCpu->hm.s.svm.p VMCB= 0;272 pVCpu->hm.s.svm. pVMCBPhys= 0;273 pVCpu->hm.s.svm. pMemObjVMCB = NIL_RTR0MEMOBJ;274 } 275 if (pVCpu->hm.s.svm. pMemObjMSRBitmap != NIL_RTR0MEMOBJ)276 { 277 RTR0MemObjFree(pVCpu->hm.s.svm. pMemObjMSRBitmap, false);278 pVCpu->hm.s.svm.p MSRBitmap= 0;279 pVCpu->hm.s.svm. pMSRBitmapPhys= 0;280 pVCpu->hm.s.svm. pMemObjMSRBitmap = NIL_RTR0MEMOBJ;281 } 282 } 283 if (pVM->hm.s.svm. pMemObjIOBitmap != NIL_RTR0MEMOBJ)284 { 285 RTR0MemObjFree(pVM->hm.s.svm. pMemObjIOBitmap, false);286 pVM->hm.s.svm.p IOBitmap= 0;287 pVM->hm.s.svm. pIOBitmapPhys= 0;288 pVM->hm.s.svm. pMemObjIOBitmap = NIL_RTR0MEMOBJ;288 if (pVCpu->hm.s.svm.hMemObjVMCBHost != NIL_RTR0MEMOBJ) 289 { 290 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCBHost, false); 291 pVCpu->hm.s.svm.pvVMCBHost = 0; 292 pVCpu->hm.s.svm.HCPhysVMCBHost = 0; 293 pVCpu->hm.s.svm.hMemObjVMCBHost = NIL_RTR0MEMOBJ; 294 } 295 296 if (pVCpu->hm.s.svm.hMemObjVMCB != NIL_RTR0MEMOBJ) 297 { 298 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVMCB, false); 299 pVCpu->hm.s.svm.pvVMCB = 0; 300 pVCpu->hm.s.svm.HCPhysVMCB = 0; 301 pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ; 302 } 303 if (pVCpu->hm.s.svm.hMemObjMSRBitmap != NIL_RTR0MEMOBJ) 304 { 305 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMSRBitmap, false); 306 pVCpu->hm.s.svm.pvMSRBitmap = 0; 307 pVCpu->hm.s.svm.HCPhysMSRBitmap = 0; 308 pVCpu->hm.s.svm.hMemObjMSRBitmap = NIL_RTR0MEMOBJ; 309 } 310 } 311 if (pVM->hm.s.svm.hMemObjIOBitmap != NIL_RTR0MEMOBJ) 312 { 313 RTR0MemObjFree(pVM->hm.s.svm.hMemObjIOBitmap, false); 314 pVM->hm.s.svm.pvIOBitmap = 0; 315 pVM->hm.s.svm.HCPhysIOBitmap = 0; 316 pVM->hm.s.svm.hMemObjIOBitmap = NIL_RTR0MEMOBJ; 289 317 } 290 318 return VINF_SUCCESS; … … 308 336 { 309 337 PVMCPU pVCpu = &pVM->aCpus[i]; 310 SVM_VMCB *p VMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pVMCB;311 312 AssertMsgReturn(p VMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);338 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pvVMCB; 339 340 AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 313 341 314 342 /* … … 317 345 * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical. 318 346 */ 319 p VMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);347 pvVMCB->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4); 320 348 321 349 /* CR0/4 writes must be intercepted for obvious reasons. */ 322 p VMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);350 pvVMCB->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4); 323 351 324 352 /* Intercept all DRx reads and writes by default. Changed later on. */ 325 p VMCB->ctrl.u16InterceptRdDRx = 0xFFFF;326 p VMCB->ctrl.u16InterceptWrDRx = 0xFFFF;353 pvVMCB->ctrl.u16InterceptRdDRx = 0xFFFF; 354 pvVMCB->ctrl.u16InterceptWrDRx = 0xFFFF; 327 355 328 356 /* Intercept traps; only #NM is always intercepted. */ 329 p VMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM);357 pvVMCB->ctrl.u32InterceptException = RT_BIT(X86_XCPT_NM); 330 358 #ifdef VBOX_ALWAYS_TRAP_PF 331 p VMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);359 pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF); 332 360 #endif 333 361 #ifdef VBOX_STRICT 334 p VMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP)362 pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_BP) 335 363 | RT_BIT(X86_XCPT_DB) 336 364 | RT_BIT(X86_XCPT_DE) … … 344 372 345 373 /* Set up instruction and miscellaneous intercepts. */ 346 p VMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR374 pvVMCB->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR 347 375 | SVM_CTRL1_INTERCEPT_VINTR 348 376 | SVM_CTRL1_INTERCEPT_NMI … … 359 387 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Legacy FPU FERR handling. */ 360 388 ; 361 p VMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */389 pvVMCB->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* required */ 362 390 | SVM_CTRL2_INTERCEPT_VMMCALL 363 391 | SVM_CTRL2_INTERCEPT_VMLOAD … … 371 399 guest (host thinks the cpu load is high) */ 372 400 373 Log(("p VMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));374 Log(("p VMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));375 Log(("p VMCB->ctrl.u32InterceptCtrl2 = %x\n", pVMCB->ctrl.u32InterceptCtrl2));401 Log(("pvVMCB->ctrl.u32InterceptException = %x\n", pvVMCB->ctrl.u32InterceptException)); 402 Log(("pvVMCB->ctrl.u32InterceptCtrl1 = %x\n", pvVMCB->ctrl.u32InterceptCtrl1)); 403 Log(("pvVMCB->ctrl.u32InterceptCtrl2 = %x\n", pvVMCB->ctrl.u32InterceptCtrl2)); 376 404 377 405 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */ 378 p VMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;406 pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1; 379 407 380 408 /* Ignore the priority in the TPR; just deliver it when we tell it to. */ 381 p VMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1;409 pvVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; 382 410 383 411 /* Set IO and MSR bitmap addresses. */ 384 p VMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.pIOBitmapPhys;385 p VMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.pMSRBitmapPhys;412 pvVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap; 413 pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMSRBitmap; 386 414 387 415 /* No LBR virtualization. */ 388 p VMCB->ctrl.u64LBRVirt = 0;416 pvVMCB->ctrl.u64LBRVirt = 0; 389 417 390 418 /* The ASID must start at 1; the host uses 0. */ 391 p VMCB->ctrl.TLBCtrl.n.u32ASID = 1;419 pvVMCB->ctrl.TLBCtrl.n.u32ASID = 1; 392 420 393 421 /* … … 396 424 * so choose type 6 for all PAT slots. 397 425 */ 398 p VMCB->guest.u64GPAT = 0x0006060606060606ULL;426 pvVMCB->guest.u64GPAT = 0x0006060606060606ULL; 399 427 400 428 /* If nested paging is not in use, additional intercepts have to be set up. */ … … 402 430 { 403 431 /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */ 404 p VMCB->ctrl.u16InterceptRdCRx |= RT_BIT(3);405 p VMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3);432 pvVMCB->ctrl.u16InterceptRdCRx |= RT_BIT(3); 433 pvVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3); 406 434 407 435 /* … … 410 438 * - task switches (may change CR3/EFLAGS/LDT) 411 439 */ 412 p VMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG440 pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG 413 441 | SVM_CTRL1_INTERCEPT_TASK_SWITCH; 414 442 415 443 /* Page faults must be intercepted to implement shadow paging. */ 416 p VMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF);444 pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_PF); 417 445 } 418 446 … … 448 476 { 449 477 unsigned ulBit; 450 uint8_t *p MSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pMSRBitmap;478 uint8_t *pvMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMSRBitmap; 451 479 452 480 if (ulMSR <= 0x00001FFF) … … 460 488 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */ 461 489 ulBit = (ulMSR - 0xC0000000) * 2; 462 p MSRBitmap += 0x800;490 pvMSRBitmap += 0x800; 463 491 } 464 492 else if ( ulMSR >= 0xC0010000 … … 467 495 /* AMD Seventh and Eighth Generation Processor MSRs */ 468 496 ulBit = (ulMSR - 0xC0001000) * 2; 469 p MSRBitmap += 0x1000;497 pvMSRBitmap += 0x1000; 470 498 } 471 499 else … … 476 504 Assert(ulBit < 16 * 1024 - 1); 477 505 if (fRead) 478 ASMBitClear(p MSRBitmap, ulBit);506 ASMBitClear(pvMSRBitmap, ulBit); 479 507 else 480 ASMBitSet(p MSRBitmap, ulBit);508 ASMBitSet(pvMSRBitmap, ulBit); 481 509 482 510 if (fWrite) 483 ASMBitClear(p MSRBitmap, ulBit + 1);511 ASMBitClear(pvMSRBitmap, ulBit + 1); 484 512 else 485 ASMBitSet(p MSRBitmap, ulBit + 1);513 ASMBitSet(pvMSRBitmap, ulBit + 1); 486 514 } 487 515 … … 491 519 * 492 520 * @param pVCpu Pointer to the VMCPU. 493 * @param p VMCB Pointer to the VMCB.521 * @param pvVMCB Pointer to the VMCB. 494 522 * @param pCtx Pointer to the guest CPU context. 495 523 * @param pIntInfo Pointer to the SVM interrupt info. 496 524 */ 497 DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *p VMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)525 DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent) 498 526 { 499 527 #ifdef VBOX_WITH_STATISTICS … … 518 546 519 547 /* Set event injection state. */ 520 p VMCB->ctrl.EventInject.au64[0] = pEvent->au64[0];548 pvVMCB->ctrl.EventInject.au64[0] = pEvent->au64[0]; 521 549 } 522 550 … … 528 556 * @param pVM Pointer to the VM. 529 557 * @param pVCpu Pointer to the VMCPU. 530 * @param p VMCB Pointer to the VMCB.558 * @param pvVMCB Pointer to the VMCB. 531 559 * @param pCtx Pointer to the guest CPU Context. 532 560 */ 533 static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *p VMCB, CPUMCTX *pCtx)561 static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pvVMCB, CPUMCTX *pCtx) 534 562 { 535 563 int rc; … … 547 575 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 548 576 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 549 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);577 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 550 578 551 579 pVCpu->hm.s.Event.fPending = false; … … 568 596 Event.n.u3Type = SVM_EVENT_NMI; 569 597 570 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);598 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 571 599 return VINF_SUCCESS; 572 600 } … … 582 610 || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 583 611 { 584 if (!p VMCB->ctrl.IntCtrl.n.u1VIrqValid)612 if (!pvVMCB->ctrl.IntCtrl.n.u1VIrqValid) 585 613 { 586 614 if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 594 622 /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as 595 623 * soon as guest.IF is set. */ 596 p VMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;597 p VMCB->ctrl.IntCtrl.n.u1VIrqValid = 1;598 p VMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */624 pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 625 pvVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1; 626 pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; /* don't care */ 599 627 } 600 628 } … … 682 710 683 711 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 684 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);712 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 685 713 } /* if (interrupts can be dispatched) */ 686 714 … … 718 746 { 719 747 RTGCUINTPTR val; 720 SVM_VMCB *p VMCB;748 SVM_VMCB *pvVMCB; 721 749 722 750 if (pVM == NULL) … … 726 754 Assert(pVM->hm.s.svm.fSupported); 727 755 728 p VMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;729 AssertMsgReturn(p VMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);756 pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB; 757 AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 730 758 731 759 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 755 783 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 756 784 { 757 p VMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt;758 p VMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt;785 pvVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 786 pvVMCB->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 759 787 } 760 788 … … 762 790 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 763 791 { 764 p VMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt;765 p VMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt;792 pvVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 793 pvVMCB->guest.IDTR.u64Base = pCtx->idtr.pIdt; 766 794 } 767 795 … … 769 797 * Sysenter MSRs (unconditional) 770 798 */ 771 p VMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs;772 p VMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip;773 p VMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp;799 pvVMCB->guest.u64SysEnterCS = pCtx->SysEnter.cs; 800 pvVMCB->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 801 pvVMCB->guest.u64SysEnterESP = pCtx->SysEnter.esp; 774 802 775 803 /* Control registers */ … … 792 820 if (!pVCpu->hm.s.fFPUOldStyleOverride) 793 821 { 794 p VMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF);822 pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF); 795 823 pVCpu->hm.s.fFPUOldStyleOverride = true; 796 824 } … … 811 839 val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */ 812 840 } 813 p VMCB->guest.u64CR0 = val;841 pvVMCB->guest.u64CR0 = val; 814 842 } 815 843 /* CR2 as well */ 816 p VMCB->guest.u64CR2 = pCtx->cr2;844 pvVMCB->guest.u64CR2 = pCtx->cr2; 817 845 818 846 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) … … 830 858 enmShwPagingMode = PGMGetHostMode(pVM); 831 859 832 p VMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode);833 Assert(p VMCB->ctrl.u64NestedPagingCR3);834 p VMCB->guest.u64CR3 = pCtx->cr3;860 pvVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 861 Assert(pvVMCB->ctrl.u64NestedPagingCR3); 862 pvVMCB->guest.u64CR3 = pCtx->cr3; 835 863 } 836 864 else 837 865 { 838 p VMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu);839 Assert(p VMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));866 pvVMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 867 Assert(pvVMCB->guest.u64CR3 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 840 868 } 841 869 } … … 877 905 } 878 906 } 879 p VMCB->guest.u64CR4 = val;907 pvVMCB->guest.u64CR4 = val; 880 908 } 881 909 … … 890 918 pCtx->dr[7] |= 0x400; /* must be one */ 891 919 892 p VMCB->guest.u64DR7 = pCtx->dr[7];893 p VMCB->guest.u64DR6 = pCtx->dr[6];920 pvVMCB->guest.u64DR7 = pCtx->dr[7]; 921 pvVMCB->guest.u64DR6 = pCtx->dr[6]; 894 922 895 923 #ifdef DEBUG … … 906 934 907 935 /* Override dr6 & dr7 with the hypervisor values. */ 908 p VMCB->guest.u64DR7 = CPUMGetHyperDR7(pVCpu);909 p VMCB->guest.u64DR6 = CPUMGetHyperDR6(pVCpu);936 pvVMCB->guest.u64DR7 = CPUMGetHyperDR7(pVCpu); 937 pvVMCB->guest.u64DR6 = CPUMGetHyperDR6(pVCpu); 910 938 } 911 939 else … … 919 947 920 948 /* Disable drx move intercepts. */ 921 p VMCB->ctrl.u16InterceptRdDRx = 0;922 p VMCB->ctrl.u16InterceptWrDRx = 0;949 pvVMCB->ctrl.u16InterceptRdDRx = 0; 950 pvVMCB->ctrl.u16InterceptWrDRx = 0; 923 951 924 952 /* Save the host and load the guest debug state. */ … … 929 957 930 958 /* EIP, ESP and EFLAGS */ 931 p VMCB->guest.u64RIP = pCtx->rip;932 p VMCB->guest.u64RSP = pCtx->rsp;933 p VMCB->guest.u64RFlags = pCtx->eflags.u32;959 pvVMCB->guest.u64RIP = pCtx->rip; 960 pvVMCB->guest.u64RSP = pCtx->rsp; 961 pvVMCB->guest.u64RFlags = pCtx->eflags.u32; 934 962 935 963 /* Set CPL */ 936 p VMCB->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl;964 pvVMCB->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 937 965 938 966 /* RAX/EAX too, as VMRUN uses RAX as an implicit parameter. */ 939 p VMCB->guest.u64RAX = pCtx->rax;967 pvVMCB->guest.u64RAX = pCtx->rax; 940 968 941 969 /* vmrun will fail without MSR_K6_EFER_SVME. */ 942 p VMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;970 pvVMCB->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 943 971 944 972 /* 64 bits guest mode? */ … … 957 985 #endif 958 986 /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */ 959 p VMCB->guest.FS.u64Base = pCtx->fs.u64Base;960 p VMCB->guest.GS.u64Base = pCtx->gs.u64Base;987 pvVMCB->guest.FS.u64Base = pCtx->fs.u64Base; 988 pvVMCB->guest.GS.u64Base = pCtx->gs.u64Base; 961 989 } 962 990 else 963 991 { 964 992 /* Filter out the MSR_K6_LME bit or else AMD-V expects amd64 shadow paging. */ 965 p VMCB->guest.u64EFER &= ~MSR_K6_EFER_LME;993 pvVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME; 966 994 967 995 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun; … … 969 997 970 998 /* TSC offset. */ 971 if (TMCpuTickCanUseRealTSC(pVCpu, &p VMCB->ctrl.u64TSCOffset))999 if (TMCpuTickCanUseRealTSC(pVCpu, &pvVMCB->ctrl.u64TSCOffset)) 972 1000 { 973 1001 uint64_t u64CurTSC = ASMReadTSC(); 974 if (u64CurTSC + p VMCB->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))975 { 976 p VMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;977 p VMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;1002 if (u64CurTSC + pvVMCB->ctrl.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 1003 { 1004 pvVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 1005 pvVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 978 1006 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset); 979 1007 } … … 982 1010 /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */ 983 1011 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 984 p VMCB->ctrl.u64TSCOffset, u64CurTSC + pVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu),985 TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - p VMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));986 p VMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;987 p VMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;1012 pvVMCB->ctrl.u64TSCOffset, u64CurTSC + pvVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), 1013 TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pvVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu))); 1014 pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 1015 pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 988 1016 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow); 989 1017 } … … 991 1019 else 992 1020 { 993 p VMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;994 p VMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;1021 pvVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 1022 pvVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 995 1023 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept); 996 1024 } 997 1025 998 1026 /* Sync the various MSRs for 64-bit mode. */ 999 p VMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */1000 p VMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */1001 p VMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */1002 p VMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */1003 p VMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */1027 pvVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 1028 pvVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */ 1029 pvVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */ 1030 pvVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */ 1031 pvVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */ 1004 1032 1005 1033 #ifdef DEBUG … … 1007 1035 if ( DBGFIsStepping(pVCpu) 1008 1036 || CPUMIsHyperDebugStateActive(pVCpu)) 1009 p VMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB);1037 pvVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_DB); 1010 1038 else 1011 p VMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB);1039 pvVMCB->ctrl.u32InterceptException &= ~RT_BIT(X86_XCPT_DB); 1012 1040 #endif 1013 1041 … … 1032 1060 AssertPtr(pVCpu); 1033 1061 1034 SVM_VMCB *p VMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;1062 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB; 1035 1063 pCpu = HMR0GetCurrentCpu(); 1036 1064 … … 1061 1089 1062 1090 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 1063 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING;1091 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 1064 1092 1065 1093 if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB)) … … 1071 1099 pVCpu->hm.s.uCurrentASID = 1; 1072 1100 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 1073 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1101 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1074 1102 } 1075 1103 else if (pVCpu->hm.s.fForceTLBFlush) … … 1087 1115 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1088 1116 { 1089 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1117 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1090 1118 pCpu->fFlushASIDBeforeUse = true; 1091 1119 } 1092 1120 else 1093 1121 { 1094 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1122 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1095 1123 pCpu->fFlushASIDBeforeUse = false; 1096 1124 } … … 1101 1129 { 1102 1130 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1103 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1131 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1104 1132 else 1105 1133 { 1106 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1134 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1107 1135 pCpu->fFlushASIDBeforeUse = false; 1108 1136 } … … 1115 1143 { 1116 1144 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1117 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT;1145 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1118 1146 else 1119 p VMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE;1147 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1120 1148 } 1121 1149 … … 1132 1160 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1133 1161 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1134 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], p VMCB->ctrl.TLBCtrl.n.u32ASID);1162 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pvVMCB->ctrl.TLBCtrl.n.u32ASID); 1135 1163 } 1136 1164 } … … 1140 1168 1141 1169 /* Update VMCB with the ASID. */ 1142 p VMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID;1170 pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID; 1143 1171 1144 1172 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, … … 1150 1178 1151 1179 #ifdef VBOX_WITH_STATISTICS 1152 if (p VMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING)1180 if (pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 1153 1181 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 1154 else if ( p VMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT1155 || p VMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)1182 else if ( pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT 1183 || pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 1156 1184 { 1157 1185 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); … … 1180 1208 int rc2; 1181 1209 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID; 1182 SVM_VMCB *p VMCB = NULL;1210 SVM_VMCB *pvVMCB = NULL; 1183 1211 bool fSyncTPR = false; 1184 1212 unsigned cResume = 0; … … 1194 1222 #endif 1195 1223 1196 p VMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;1197 AssertMsgReturn(p VMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);1224 pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB; 1225 AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 1198 1226 1199 1227 /* … … 1231 1259 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1232 1260 /* Irq inhibition is no longer active; clear the corresponding SVM state. */ 1233 p VMCB->ctrl.u64IntShadow = 0;1261 pvVMCB->ctrl.u64IntShadow = 0; 1234 1262 } 1235 1263 } … … 1237 1265 { 1238 1266 /* Irq inhibition is no longer active; clear the corresponding SVM state. */ 1239 p VMCB->ctrl.u64IntShadow = 0;1267 pvVMCB->ctrl.u64IntShadow = 0; 1240 1268 } 1241 1269 … … 1337 1365 * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!! 1338 1366 */ 1339 rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, p VMCB, pCtx);1367 rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pvVMCB, pCtx); 1340 1368 if (RT_FAILURE(rc)) 1341 1369 goto end; … … 1377 1405 { 1378 1406 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1379 p VMCB->ctrl.IntCtrl.n.u8VTPR = (u8LastTPR >> 4);1407 pvVMCB->ctrl.IntCtrl.n.u8VTPR = (u8LastTPR >> 4); 1380 1408 1381 1409 if (fPending) 1382 1410 { 1383 1411 /* A TPR change could activate a pending interrupt, so catch cr8 writes. */ 1384 p VMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8);1412 pvVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1385 1413 } 1386 1414 else … … 1390 1418 * There are enough world switches for detecting pending interrupts. 1391 1419 */ 1392 p VMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);1420 pvVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1393 1421 } 1394 1422 } … … 1399 1427 1400 1428 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 1401 p VMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;1429 pvVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 1402 1430 1403 1431 #ifdef LOG_ENABLED … … 1447 1475 pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB; 1448 1476 1449 Assert(sizeof(pVCpu->hm.s.svm. pVMCBPhys) == 8);1450 Assert(p VMCB->ctrl.IntCtrl.n.u1VIrqMasking);1451 Assert(p VMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.pIOBitmapPhys);1452 Assert(p VMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.pMSRBitmapPhys);1453 Assert(p VMCB->ctrl.u64LBRVirt == 0);1477 Assert(sizeof(pVCpu->hm.s.svm.HCPhysVMCB) == 8); 1478 Assert(pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking); 1479 Assert(pvVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap); 1480 Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMSRBitmap); 1481 Assert(pvVMCB->ctrl.u64LBRVirt == 0); 1454 1482 1455 1483 #ifdef VBOX_STRICT … … 1464 1492 u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 1465 1493 if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1466 && !(p VMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))1494 && !(pvVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1467 1495 { 1468 1496 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX); … … 1474 1502 1475 1503 #ifdef VBOX_WITH_KERNEL_USING_XMM 1476 hmR0SVMRunWrapXMM(pVCpu->hm.s.svm. pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,1504 hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu, 1477 1505 pVCpu->hm.s.svm.pfnVMRun); 1478 1506 #else 1479 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm. pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);1507 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu); 1480 1508 #endif 1481 1509 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 1482 1510 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 1483 1511 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 1484 if (!(p VMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))1512 if (!(pvVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 1485 1513 { 1486 1514 /* Restore host's TSC_AUX. */ … … 1489 1517 1490 1518 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + 1491 p VMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);1519 pvVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 1492 1520 } 1493 1521 TMNotifyEndOfExecution(pVCpu); … … 1506 1534 1507 1535 /* Reason for the VM exit */ 1508 exitCode = p VMCB->ctrl.u64ExitCode;1536 exitCode = pvVMCB->ctrl.u64ExitCode; 1509 1537 1510 1538 if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */ … … 1512 1540 HMDumpRegs(pVM, pVCpu, pCtx); 1513 1541 #ifdef DEBUG 1514 Log(("ctrl.u16InterceptRdCRx %x\n", p VMCB->ctrl.u16InterceptRdCRx));1515 Log(("ctrl.u16InterceptWrCRx %x\n", p VMCB->ctrl.u16InterceptWrCRx));1516 Log(("ctrl.u16InterceptRdDRx %x\n", p VMCB->ctrl.u16InterceptRdDRx));1517 Log(("ctrl.u16InterceptWrDRx %x\n", p VMCB->ctrl.u16InterceptWrDRx));1518 Log(("ctrl.u32InterceptException %x\n", p VMCB->ctrl.u32InterceptException));1519 Log(("ctrl.u32InterceptCtrl1 %x\n", p VMCB->ctrl.u32InterceptCtrl1));1520 Log(("ctrl.u32InterceptCtrl2 %x\n", p VMCB->ctrl.u32InterceptCtrl2));1521 Log(("ctrl.u64IOPMPhysAddr %RX64\n", p VMCB->ctrl.u64IOPMPhysAddr));1522 Log(("ctrl.u64MSRPMPhysAddr %RX64\n", p VMCB->ctrl.u64MSRPMPhysAddr));1523 Log(("ctrl.u64TSCOffset %RX64\n", p VMCB->ctrl.u64TSCOffset));1524 1525 Log(("ctrl.TLBCtrl.u32ASID %x\n", p VMCB->ctrl.TLBCtrl.n.u32ASID));1526 Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", p VMCB->ctrl.TLBCtrl.n.u8TLBFlush));1527 Log(("ctrl.TLBCtrl.u24Reserved %x\n", p VMCB->ctrl.TLBCtrl.n.u24Reserved));1528 1529 Log(("ctrl.IntCtrl.u8VTPR %x\n", p VMCB->ctrl.IntCtrl.n.u8VTPR));1530 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", p VMCB->ctrl.IntCtrl.n.u1VIrqValid));1531 Log(("ctrl.IntCtrl.u7Reserved %x\n", p VMCB->ctrl.IntCtrl.n.u7Reserved));1532 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", p VMCB->ctrl.IntCtrl.n.u4VIrqPriority));1533 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", p VMCB->ctrl.IntCtrl.n.u1IgnoreTPR));1534 Log(("ctrl.IntCtrl.u3Reserved %x\n", p VMCB->ctrl.IntCtrl.n.u3Reserved));1535 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", p VMCB->ctrl.IntCtrl.n.u1VIrqMasking));1536 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", p VMCB->ctrl.IntCtrl.n.u7Reserved2));1537 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", p VMCB->ctrl.IntCtrl.n.u8VIrqVector));1538 Log(("ctrl.IntCtrl.u24Reserved %x\n", p VMCB->ctrl.IntCtrl.n.u24Reserved));1539 1540 Log(("ctrl.u64IntShadow %RX64\n", p VMCB->ctrl.u64IntShadow));1541 Log(("ctrl.u64ExitCode %RX64\n", p VMCB->ctrl.u64ExitCode));1542 Log(("ctrl.u64ExitInfo1 %RX64\n", p VMCB->ctrl.u64ExitInfo1));1543 Log(("ctrl.u64ExitInfo2 %RX64\n", p VMCB->ctrl.u64ExitInfo2));1544 Log(("ctrl.ExitIntInfo.u8Vector %x\n", p VMCB->ctrl.ExitIntInfo.n.u8Vector));1545 Log(("ctrl.ExitIntInfo.u3Type %x\n", p VMCB->ctrl.ExitIntInfo.n.u3Type));1546 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", p VMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid));1547 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", p VMCB->ctrl.ExitIntInfo.n.u19Reserved));1548 Log(("ctrl.ExitIntInfo.u1Valid %x\n", p VMCB->ctrl.ExitIntInfo.n.u1Valid));1549 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", p VMCB->ctrl.ExitIntInfo.n.u32ErrorCode));1550 Log(("ctrl.NestedPaging %RX64\n", p VMCB->ctrl.NestedPaging.au64));1551 Log(("ctrl.EventInject.u8Vector %x\n", p VMCB->ctrl.EventInject.n.u8Vector));1552 Log(("ctrl.EventInject.u3Type %x\n", p VMCB->ctrl.EventInject.n.u3Type));1553 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", p VMCB->ctrl.EventInject.n.u1ErrorCodeValid));1554 Log(("ctrl.EventInject.u19Reserved %x\n", p VMCB->ctrl.EventInject.n.u19Reserved));1555 Log(("ctrl.EventInject.u1Valid %x\n", p VMCB->ctrl.EventInject.n.u1Valid));1556 Log(("ctrl.EventInject.u32ErrorCode %x\n", p VMCB->ctrl.EventInject.n.u32ErrorCode));1557 1558 Log(("ctrl.u64NestedPagingCR3 %RX64\n", p VMCB->ctrl.u64NestedPagingCR3));1559 Log(("ctrl.u64LBRVirt %RX64\n", p VMCB->ctrl.u64LBRVirt));1560 1561 Log(("guest.CS.u16Sel %04X\n", p VMCB->guest.CS.u16Sel));1562 Log(("guest.CS.u16Attr %04X\n", p VMCB->guest.CS.u16Attr));1563 Log(("guest.CS.u32Limit %X\n", p VMCB->guest.CS.u32Limit));1564 Log(("guest.CS.u64Base %RX64\n", p VMCB->guest.CS.u64Base));1565 Log(("guest.DS.u16Sel %04X\n", p VMCB->guest.DS.u16Sel));1566 Log(("guest.DS.u16Attr %04X\n", p VMCB->guest.DS.u16Attr));1567 Log(("guest.DS.u32Limit %X\n", p VMCB->guest.DS.u32Limit));1568 Log(("guest.DS.u64Base %RX64\n", p VMCB->guest.DS.u64Base));1569 Log(("guest.ES.u16Sel %04X\n", p VMCB->guest.ES.u16Sel));1570 Log(("guest.ES.u16Attr %04X\n", p VMCB->guest.ES.u16Attr));1571 Log(("guest.ES.u32Limit %X\n", p VMCB->guest.ES.u32Limit));1572 Log(("guest.ES.u64Base %RX64\n", p VMCB->guest.ES.u64Base));1573 Log(("guest.FS.u16Sel %04X\n", p VMCB->guest.FS.u16Sel));1574 Log(("guest.FS.u16Attr %04X\n", p VMCB->guest.FS.u16Attr));1575 Log(("guest.FS.u32Limit %X\n", p VMCB->guest.FS.u32Limit));1576 Log(("guest.FS.u64Base %RX64\n", p VMCB->guest.FS.u64Base));1577 Log(("guest.GS.u16Sel %04X\n", p VMCB->guest.GS.u16Sel));1578 Log(("guest.GS.u16Attr %04X\n", p VMCB->guest.GS.u16Attr));1579 Log(("guest.GS.u32Limit %X\n", p VMCB->guest.GS.u32Limit));1580 Log(("guest.GS.u64Base %RX64\n", p VMCB->guest.GS.u64Base));1581 1582 Log(("guest.GDTR.u32Limit %X\n", p VMCB->guest.GDTR.u32Limit));1583 Log(("guest.GDTR.u64Base %RX64\n", p VMCB->guest.GDTR.u64Base));1584 1585 Log(("guest.LDTR.u16Sel %04X\n", p VMCB->guest.LDTR.u16Sel));1586 Log(("guest.LDTR.u16Attr %04X\n", p VMCB->guest.LDTR.u16Attr));1587 Log(("guest.LDTR.u32Limit %X\n", p VMCB->guest.LDTR.u32Limit));1588 Log(("guest.LDTR.u64Base %RX64\n", p VMCB->guest.LDTR.u64Base));1589 1590 Log(("guest.IDTR.u32Limit %X\n", p VMCB->guest.IDTR.u32Limit));1591 Log(("guest.IDTR.u64Base %RX64\n", p VMCB->guest.IDTR.u64Base));1592 1593 Log(("guest.TR.u16Sel %04X\n", p VMCB->guest.TR.u16Sel));1594 Log(("guest.TR.u16Attr %04X\n", p VMCB->guest.TR.u16Attr));1595 Log(("guest.TR.u32Limit %X\n", p VMCB->guest.TR.u32Limit));1596 Log(("guest.TR.u64Base %RX64\n", p VMCB->guest.TR.u64Base));1597 1598 Log(("guest.u8CPL %X\n", p VMCB->guest.u8CPL));1599 Log(("guest.u64CR0 %RX64\n", p VMCB->guest.u64CR0));1600 Log(("guest.u64CR2 %RX64\n", p VMCB->guest.u64CR2));1601 Log(("guest.u64CR3 %RX64\n", p VMCB->guest.u64CR3));1602 Log(("guest.u64CR4 %RX64\n", p VMCB->guest.u64CR4));1603 Log(("guest.u64DR6 %RX64\n", p VMCB->guest.u64DR6));1604 Log(("guest.u64DR7 %RX64\n", p VMCB->guest.u64DR7));1605 1606 Log(("guest.u64RIP %RX64\n", p VMCB->guest.u64RIP));1607 Log(("guest.u64RSP %RX64\n", p VMCB->guest.u64RSP));1608 Log(("guest.u64RAX %RX64\n", p VMCB->guest.u64RAX));1609 Log(("guest.u64RFlags %RX64\n", p VMCB->guest.u64RFlags));1610 1611 Log(("guest.u64SysEnterCS %RX64\n", p VMCB->guest.u64SysEnterCS));1612 Log(("guest.u64SysEnterEIP %RX64\n", p VMCB->guest.u64SysEnterEIP));1613 Log(("guest.u64SysEnterESP %RX64\n", p VMCB->guest.u64SysEnterESP));1614 1615 Log(("guest.u64EFER %RX64\n", p VMCB->guest.u64EFER));1616 Log(("guest.u64STAR %RX64\n", p VMCB->guest.u64STAR));1617 Log(("guest.u64LSTAR %RX64\n", p VMCB->guest.u64LSTAR));1618 Log(("guest.u64CSTAR %RX64\n", p VMCB->guest.u64CSTAR));1619 Log(("guest.u64SFMASK %RX64\n", p VMCB->guest.u64SFMASK));1620 Log(("guest.u64KernelGSBase %RX64\n", p VMCB->guest.u64KernelGSBase));1621 Log(("guest.u64GPAT %RX64\n", p VMCB->guest.u64GPAT));1622 Log(("guest.u64DBGCTL %RX64\n", p VMCB->guest.u64DBGCTL));1623 Log(("guest.u64BR_FROM %RX64\n", p VMCB->guest.u64BR_FROM));1624 Log(("guest.u64BR_TO %RX64\n", p VMCB->guest.u64BR_TO));1625 Log(("guest.u64LASTEXCPFROM %RX64\n", p VMCB->guest.u64LASTEXCPFROM));1626 Log(("guest.u64LASTEXCPTO %RX64\n", p VMCB->guest.u64LASTEXCPTO));1542 Log(("ctrl.u16InterceptRdCRx %x\n", pvVMCB->ctrl.u16InterceptRdCRx)); 1543 Log(("ctrl.u16InterceptWrCRx %x\n", pvVMCB->ctrl.u16InterceptWrCRx)); 1544 Log(("ctrl.u16InterceptRdDRx %x\n", pvVMCB->ctrl.u16InterceptRdDRx)); 1545 Log(("ctrl.u16InterceptWrDRx %x\n", pvVMCB->ctrl.u16InterceptWrDRx)); 1546 Log(("ctrl.u32InterceptException %x\n", pvVMCB->ctrl.u32InterceptException)); 1547 Log(("ctrl.u32InterceptCtrl1 %x\n", pvVMCB->ctrl.u32InterceptCtrl1)); 1548 Log(("ctrl.u32InterceptCtrl2 %x\n", pvVMCB->ctrl.u32InterceptCtrl2)); 1549 Log(("ctrl.u64IOPMPhysAddr %RX64\n", pvVMCB->ctrl.u64IOPMPhysAddr)); 1550 Log(("ctrl.u64MSRPMPhysAddr %RX64\n", pvVMCB->ctrl.u64MSRPMPhysAddr)); 1551 Log(("ctrl.u64TSCOffset %RX64\n", pvVMCB->ctrl.u64TSCOffset)); 1552 1553 Log(("ctrl.TLBCtrl.u32ASID %x\n", pvVMCB->ctrl.TLBCtrl.n.u32ASID)); 1554 Log(("ctrl.TLBCtrl.u8TLBFlush %x\n", pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush)); 1555 Log(("ctrl.TLBCtrl.u24Reserved %x\n", pvVMCB->ctrl.TLBCtrl.n.u24Reserved)); 1556 1557 Log(("ctrl.IntCtrl.u8VTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u8VTPR)); 1558 Log(("ctrl.IntCtrl.u1VIrqValid %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqValid)); 1559 Log(("ctrl.IntCtrl.u7Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved)); 1560 Log(("ctrl.IntCtrl.u4VIrqPriority %x\n", pvVMCB->ctrl.IntCtrl.n.u4VIrqPriority)); 1561 Log(("ctrl.IntCtrl.u1IgnoreTPR %x\n", pvVMCB->ctrl.IntCtrl.n.u1IgnoreTPR)); 1562 Log(("ctrl.IntCtrl.u3Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u3Reserved)); 1563 Log(("ctrl.IntCtrl.u1VIrqMasking %x\n", pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking)); 1564 Log(("ctrl.IntCtrl.u7Reserved2 %x\n", pvVMCB->ctrl.IntCtrl.n.u7Reserved2)); 1565 Log(("ctrl.IntCtrl.u8VIrqVector %x\n", pvVMCB->ctrl.IntCtrl.n.u8VIrqVector)); 1566 Log(("ctrl.IntCtrl.u24Reserved %x\n", pvVMCB->ctrl.IntCtrl.n.u24Reserved)); 1567 1568 Log(("ctrl.u64IntShadow %RX64\n", pvVMCB->ctrl.u64IntShadow)); 1569 Log(("ctrl.u64ExitCode %RX64\n", pvVMCB->ctrl.u64ExitCode)); 1570 Log(("ctrl.u64ExitInfo1 %RX64\n", pvVMCB->ctrl.u64ExitInfo1)); 1571 Log(("ctrl.u64ExitInfo2 %RX64\n", pvVMCB->ctrl.u64ExitInfo2)); 1572 Log(("ctrl.ExitIntInfo.u8Vector %x\n", pvVMCB->ctrl.ExitIntInfo.n.u8Vector)); 1573 Log(("ctrl.ExitIntInfo.u3Type %x\n", pvVMCB->ctrl.ExitIntInfo.n.u3Type)); 1574 Log(("ctrl.ExitIntInfo.u1ErrorCodeValid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)); 1575 Log(("ctrl.ExitIntInfo.u19Reserved %x\n", pvVMCB->ctrl.ExitIntInfo.n.u19Reserved)); 1576 Log(("ctrl.ExitIntInfo.u1Valid %x\n", pvVMCB->ctrl.ExitIntInfo.n.u1Valid)); 1577 Log(("ctrl.ExitIntInfo.u32ErrorCode %x\n", pvVMCB->ctrl.ExitIntInfo.n.u32ErrorCode)); 1578 Log(("ctrl.NestedPaging %RX64\n", pvVMCB->ctrl.NestedPaging.au64)); 1579 Log(("ctrl.EventInject.u8Vector %x\n", pvVMCB->ctrl.EventInject.n.u8Vector)); 1580 Log(("ctrl.EventInject.u3Type %x\n", pvVMCB->ctrl.EventInject.n.u3Type)); 1581 Log(("ctrl.EventInject.u1ErrorCodeValid %x\n", pvVMCB->ctrl.EventInject.n.u1ErrorCodeValid)); 1582 Log(("ctrl.EventInject.u19Reserved %x\n", pvVMCB->ctrl.EventInject.n.u19Reserved)); 1583 Log(("ctrl.EventInject.u1Valid %x\n", pvVMCB->ctrl.EventInject.n.u1Valid)); 1584 Log(("ctrl.EventInject.u32ErrorCode %x\n", pvVMCB->ctrl.EventInject.n.u32ErrorCode)); 1585 1586 Log(("ctrl.u64NestedPagingCR3 %RX64\n", pvVMCB->ctrl.u64NestedPagingCR3)); 1587 Log(("ctrl.u64LBRVirt %RX64\n", pvVMCB->ctrl.u64LBRVirt)); 1588 1589 Log(("guest.CS.u16Sel %04X\n", pvVMCB->guest.CS.u16Sel)); 1590 Log(("guest.CS.u16Attr %04X\n", pvVMCB->guest.CS.u16Attr)); 1591 Log(("guest.CS.u32Limit %X\n", pvVMCB->guest.CS.u32Limit)); 1592 Log(("guest.CS.u64Base %RX64\n", pvVMCB->guest.CS.u64Base)); 1593 Log(("guest.DS.u16Sel %04X\n", pvVMCB->guest.DS.u16Sel)); 1594 Log(("guest.DS.u16Attr %04X\n", pvVMCB->guest.DS.u16Attr)); 1595 Log(("guest.DS.u32Limit %X\n", pvVMCB->guest.DS.u32Limit)); 1596 Log(("guest.DS.u64Base %RX64\n", pvVMCB->guest.DS.u64Base)); 1597 Log(("guest.ES.u16Sel %04X\n", pvVMCB->guest.ES.u16Sel)); 1598 Log(("guest.ES.u16Attr %04X\n", pvVMCB->guest.ES.u16Attr)); 1599 Log(("guest.ES.u32Limit %X\n", pvVMCB->guest.ES.u32Limit)); 1600 Log(("guest.ES.u64Base %RX64\n", pvVMCB->guest.ES.u64Base)); 1601 Log(("guest.FS.u16Sel %04X\n", pvVMCB->guest.FS.u16Sel)); 1602 Log(("guest.FS.u16Attr %04X\n", pvVMCB->guest.FS.u16Attr)); 1603 Log(("guest.FS.u32Limit %X\n", pvVMCB->guest.FS.u32Limit)); 1604 Log(("guest.FS.u64Base %RX64\n", pvVMCB->guest.FS.u64Base)); 1605 Log(("guest.GS.u16Sel %04X\n", pvVMCB->guest.GS.u16Sel)); 1606 Log(("guest.GS.u16Attr %04X\n", pvVMCB->guest.GS.u16Attr)); 1607 Log(("guest.GS.u32Limit %X\n", pvVMCB->guest.GS.u32Limit)); 1608 Log(("guest.GS.u64Base %RX64\n", pvVMCB->guest.GS.u64Base)); 1609 1610 Log(("guest.GDTR.u32Limit %X\n", pvVMCB->guest.GDTR.u32Limit)); 1611 Log(("guest.GDTR.u64Base %RX64\n", pvVMCB->guest.GDTR.u64Base)); 1612 1613 Log(("guest.LDTR.u16Sel %04X\n", pvVMCB->guest.LDTR.u16Sel)); 1614 Log(("guest.LDTR.u16Attr %04X\n", pvVMCB->guest.LDTR.u16Attr)); 1615 Log(("guest.LDTR.u32Limit %X\n", pvVMCB->guest.LDTR.u32Limit)); 1616 Log(("guest.LDTR.u64Base %RX64\n", pvVMCB->guest.LDTR.u64Base)); 1617 1618 Log(("guest.IDTR.u32Limit %X\n", pvVMCB->guest.IDTR.u32Limit)); 1619 Log(("guest.IDTR.u64Base %RX64\n", pvVMCB->guest.IDTR.u64Base)); 1620 1621 Log(("guest.TR.u16Sel %04X\n", pvVMCB->guest.TR.u16Sel)); 1622 Log(("guest.TR.u16Attr %04X\n", pvVMCB->guest.TR.u16Attr)); 1623 Log(("guest.TR.u32Limit %X\n", pvVMCB->guest.TR.u32Limit)); 1624 Log(("guest.TR.u64Base %RX64\n", pvVMCB->guest.TR.u64Base)); 1625 1626 Log(("guest.u8CPL %X\n", pvVMCB->guest.u8CPL)); 1627 Log(("guest.u64CR0 %RX64\n", pvVMCB->guest.u64CR0)); 1628 Log(("guest.u64CR2 %RX64\n", pvVMCB->guest.u64CR2)); 1629 Log(("guest.u64CR3 %RX64\n", pvVMCB->guest.u64CR3)); 1630 Log(("guest.u64CR4 %RX64\n", pvVMCB->guest.u64CR4)); 1631 Log(("guest.u64DR6 %RX64\n", pvVMCB->guest.u64DR6)); 1632 Log(("guest.u64DR7 %RX64\n", pvVMCB->guest.u64DR7)); 1633 1634 Log(("guest.u64RIP %RX64\n", pvVMCB->guest.u64RIP)); 1635 Log(("guest.u64RSP %RX64\n", pvVMCB->guest.u64RSP)); 1636 Log(("guest.u64RAX %RX64\n", pvVMCB->guest.u64RAX)); 1637 Log(("guest.u64RFlags %RX64\n", pvVMCB->guest.u64RFlags)); 1638 1639 Log(("guest.u64SysEnterCS %RX64\n", pvVMCB->guest.u64SysEnterCS)); 1640 Log(("guest.u64SysEnterEIP %RX64\n", pvVMCB->guest.u64SysEnterEIP)); 1641 Log(("guest.u64SysEnterESP %RX64\n", pvVMCB->guest.u64SysEnterESP)); 1642 1643 Log(("guest.u64EFER %RX64\n", pvVMCB->guest.u64EFER)); 1644 Log(("guest.u64STAR %RX64\n", pvVMCB->guest.u64STAR)); 1645 Log(("guest.u64LSTAR %RX64\n", pvVMCB->guest.u64LSTAR)); 1646 Log(("guest.u64CSTAR %RX64\n", pvVMCB->guest.u64CSTAR)); 1647 Log(("guest.u64SFMASK %RX64\n", pvVMCB->guest.u64SFMASK)); 1648 Log(("guest.u64KernelGSBase %RX64\n", pvVMCB->guest.u64KernelGSBase)); 1649 Log(("guest.u64GPAT %RX64\n", pvVMCB->guest.u64GPAT)); 1650 Log(("guest.u64DBGCTL %RX64\n", pvVMCB->guest.u64DBGCTL)); 1651 Log(("guest.u64BR_FROM %RX64\n", pvVMCB->guest.u64BR_FROM)); 1652 Log(("guest.u64BR_TO %RX64\n", pvVMCB->guest.u64BR_TO)); 1653 Log(("guest.u64LASTEXCPFROM %RX64\n", pvVMCB->guest.u64LASTEXCPFROM)); 1654 Log(("guest.u64LASTEXCPTO %RX64\n", pvVMCB->guest.u64LASTEXCPTO)); 1627 1655 #endif 1628 1656 rc = VERR_SVM_UNABLE_TO_START_VM; … … 1632 1660 1633 1661 /* Let's first sync back EIP, ESP, and EFLAGS. */ 1634 pCtx->rip = p VMCB->guest.u64RIP;1635 pCtx->rsp = p VMCB->guest.u64RSP;1636 pCtx->eflags.u32 = p VMCB->guest.u64RFlags;1662 pCtx->rip = pvVMCB->guest.u64RIP; 1663 pCtx->rsp = pvVMCB->guest.u64RSP; 1664 pCtx->eflags.u32 = pvVMCB->guest.u64RFlags; 1637 1665 /* eax is saved/restore across the vmrun instruction */ 1638 pCtx->rax = p VMCB->guest.u64RAX;1666 pCtx->rax = pvVMCB->guest.u64RAX; 1639 1667 1640 1668 /* … … 1642 1670 * FS & GS base are saved with SVM_READ_SELREG. 1643 1671 */ 1644 pCtx->msrSTAR = p VMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */1645 pCtx->msrLSTAR = p VMCB->guest.u64LSTAR; /* 64-bit mode syscall rip */1646 pCtx->msrCSTAR = p VMCB->guest.u64CSTAR; /* compatibility mode syscall rip */1647 pCtx->msrSFMASK = p VMCB->guest.u64SFMASK; /* syscall flag mask */1648 pCtx->msrKERNELGSBASE = p VMCB->guest.u64KernelGSBase; /* swapgs exchange value */1649 pCtx->SysEnter.cs = p VMCB->guest.u64SysEnterCS;1650 pCtx->SysEnter.eip = p VMCB->guest.u64SysEnterEIP;1651 pCtx->SysEnter.esp = p VMCB->guest.u64SysEnterESP;1672 pCtx->msrSTAR = pvVMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */ 1673 pCtx->msrLSTAR = pvVMCB->guest.u64LSTAR; /* 64-bit mode syscall rip */ 1674 pCtx->msrCSTAR = pvVMCB->guest.u64CSTAR; /* compatibility mode syscall rip */ 1675 pCtx->msrSFMASK = pvVMCB->guest.u64SFMASK; /* syscall flag mask */ 1676 pCtx->msrKERNELGSBASE = pvVMCB->guest.u64KernelGSBase; /* swapgs exchange value */ 1677 pCtx->SysEnter.cs = pvVMCB->guest.u64SysEnterCS; 1678 pCtx->SysEnter.eip = pvVMCB->guest.u64SysEnterEIP; 1679 pCtx->SysEnter.esp = pvVMCB->guest.u64SysEnterESP; 1652 1680 1653 1681 /* Can be updated behind our back in the nested paging case. */ 1654 pCtx->cr2 = p VMCB->guest.u64CR2;1682 pCtx->cr2 = pvVMCB->guest.u64CR2; 1655 1683 1656 1684 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ … … 1693 1721 * SS (chapter AMD spec. 15.5.1 Basic operation). 1694 1722 */ 1695 Assert(!(p VMCB->guest.u8CPL & ~0x3));1696 pCtx->ss.Attr.n.u2Dpl = p VMCB->guest.u8CPL & 0x3;1723 Assert(!(pvVMCB->guest.u8CPL & ~0x3)); 1724 pCtx->ss.Attr.n.u2Dpl = pvVMCB->guest.u8CPL & 0x3; 1697 1725 1698 1726 /* … … 1703 1731 SVM_READ_SELREG(TR, tr); 1704 1732 1705 pCtx->gdtr.cbGdt = p VMCB->guest.GDTR.u32Limit;1706 pCtx->gdtr.pGdt = p VMCB->guest.GDTR.u64Base;1707 1708 pCtx->idtr.cbIdt = p VMCB->guest.IDTR.u32Limit;1709 pCtx->idtr.pIdt = p VMCB->guest.IDTR.u64Base;1733 pCtx->gdtr.cbGdt = pvVMCB->guest.GDTR.u32Limit; 1734 pCtx->gdtr.pGdt = pvVMCB->guest.GDTR.u64Base; 1735 1736 pCtx->idtr.cbIdt = pvVMCB->guest.IDTR.u32Limit; 1737 pCtx->idtr.pIdt = pvVMCB->guest.IDTR.u64Base; 1710 1738 1711 1739 /* … … 1714 1742 */ 1715 1743 if ( pVM->hm.s.fNestedPaging 1716 && pCtx->cr3 != p VMCB->guest.u64CR3)1717 { 1718 CPUMSetGuestCR3(pVCpu, p VMCB->guest.u64CR3);1719 PGMUpdateCR3(pVCpu, p VMCB->guest.u64CR3);1744 && pCtx->cr3 != pvVMCB->guest.u64CR3) 1745 { 1746 CPUMSetGuestCR3(pVCpu, pvVMCB->guest.u64CR3); 1747 PGMUpdateCR3(pVCpu, pvVMCB->guest.u64CR3); 1720 1748 } 1721 1749 … … 1724 1752 1725 1753 /* Take care of instruction fusing (sti, mov ss) (see AMD spec. 15.20.5 Interrupt Shadows) */ 1726 if (p VMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)1727 { 1728 Log(("uInterruptState %x rip=%RGv\n", p VMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip));1754 if (pvVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1755 { 1756 Log(("uInterruptState %x rip=%RGv\n", pvVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip)); 1729 1757 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 1730 1758 } … … 1735 1763 1736 1764 /* Sync back DR6 as it could have been changed by hitting breakpoints. */ 1737 pCtx->dr[6] = p VMCB->guest.u64DR6;1765 pCtx->dr[6] = pvVMCB->guest.u64DR6; 1738 1766 /* DR7.GD can be cleared by debug exceptions, so sync it back as well. */ 1739 pCtx->dr[7] = p VMCB->guest.u64DR7;1767 pCtx->dr[7] = pvVMCB->guest.u64DR7; 1740 1768 1741 1769 /* Check if an injected event was interrupted prematurely. */ 1742 pVCpu->hm.s.Event.intInfo = p VMCB->ctrl.ExitIntInfo.au64[0];1743 if ( p VMCB->ctrl.ExitIntInfo.n.u1Valid1770 pVCpu->hm.s.Event.intInfo = pvVMCB->ctrl.ExitIntInfo.au64[0]; 1771 if ( pvVMCB->ctrl.ExitIntInfo.n.u1Valid 1744 1772 /* we don't care about 'int xx' as the instruction will be restarted. */ 1745 && p VMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)1773 && pvVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 1746 1774 { 1747 1775 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); … … 1760 1788 pVCpu->hm.s.Event.fPending = true; 1761 1789 /* Error code present? (redundant) */ 1762 if (p VMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid)1763 pVCpu->hm.s.Event.errCode = p VMCB->ctrl.ExitIntInfo.n.u32ErrorCode;1790 if (pvVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid) 1791 pVCpu->hm.s.Event.errCode = pvVMCB->ctrl.ExitIntInfo.n.u32ErrorCode; 1764 1792 else 1765 1793 pVCpu->hm.s.Event.errCode = 0; … … 1786 1814 else 1787 1815 { 1788 if ((uint8_t)(u8LastTPR >> 4) != p VMCB->ctrl.IntCtrl.n.u8VTPR)1816 if ((uint8_t)(u8LastTPR >> 4) != pvVMCB->ctrl.IntCtrl.n.u8VTPR) 1789 1817 { 1790 1818 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1791 rc2 = PDMApicSetTPR(pVCpu, p VMCB->ctrl.IntCtrl.n.u8VTPR << 4);1819 rc2 = PDMApicSetTPR(pVCpu, pvVMCB->ctrl.IntCtrl.n.u8VTPR << 4); 1792 1820 AssertRC(rc2); 1793 1821 } … … 1798 1826 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "vmexit %08x at %04:%08RX64 %RX64 %RX64 %RX64", 1799 1827 exitCode, pCtx->cs.Sel, pCtx->rip, 1800 p VMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2, pVMCB->ctrl.ExitIntInfo.au64[0]);1828 pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2, pvVMCB->ctrl.ExitIntInfo.au64[0]); 1801 1829 #endif 1802 1830 #if ARCH_BITS == 64 /* for the time being */ 1803 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, p VMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2,1804 p VMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);1831 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pvVMCB->ctrl.u64ExitInfo1, pvVMCB->ctrl.u64ExitInfo2, 1832 pvVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1805 1833 #endif 1806 1834 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); … … 1843 1871 Event.n.u8Vector = X86_XCPT_DB; 1844 1872 1845 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);1873 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 1846 1874 goto ResumeExecution; 1847 1875 } … … 1877 1905 Event.n.u8Vector = X86_XCPT_NM; 1878 1906 1879 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);1907 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 1880 1908 goto ResumeExecution; 1881 1909 } … … 1883 1911 case X86_XCPT_PF: /* Page fault */ 1884 1912 { 1885 uint32_t errCode = p VMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */1886 RTGCUINTPTR uFaultAddress = p VMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */1913 uint32_t errCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 1914 RTGCUINTPTR uFaultAddress = pvVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */ 1887 1915 1888 1916 #ifdef VBOX_ALWAYS_TRAP_PF … … 1906 1934 Event.n.u32ErrorCode = errCode; 1907 1935 1908 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);1936 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 1909 1937 goto ResumeExecution; 1910 1938 } … … 1980 2008 Event.n.u32ErrorCode = errCode; 1981 2009 1982 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);2010 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 1983 2011 goto ResumeExecution; 1984 2012 } … … 2009 2037 Event.n.u8Vector = X86_XCPT_MF; 2010 2038 2011 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);2039 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 2012 2040 goto ResumeExecution; 2013 2041 } … … 2031 2059 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2032 2060 Event.n.u1ErrorCodeValid = 1; 2033 Event.n.u32ErrorCode = p VMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2061 Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2034 2062 break; 2035 2063 case X86_XCPT_BP: … … 2045 2073 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2046 2074 Event.n.u1ErrorCodeValid = 1; 2047 Event.n.u32ErrorCode = p VMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2075 Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2048 2076 break; 2049 2077 case X86_XCPT_NP: 2050 2078 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2051 2079 Event.n.u1ErrorCodeValid = 1; 2052 Event.n.u32ErrorCode = p VMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2080 Event.n.u32ErrorCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2053 2081 break; 2054 2082 } 2055 2083 Log(("Trap %x at %04x:%RGv esi=%x\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->esi)); 2056 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);2084 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 2057 2085 goto ResumeExecution; 2058 2086 } … … 2070 2098 { 2071 2099 /* EXITINFO1 contains fault errorcode; EXITINFO2 contains the guest physical address causing the fault. */ 2072 uint32_t errCode = p VMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */2073 RTGCPHYS GCPhysFault = p VMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */2100 uint32_t errCode = pvVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2101 RTGCPHYS GCPhysFault = pvVMCB->ctrl.u64ExitInfo2; /* EXITINFO2 = fault address */ 2074 2102 PGMMODE enmShwPagingMode; 2075 2103 … … 2171 2199 /* A virtual interrupt is about to be delivered, which means IF=1. */ 2172 2200 Log(("SVM_EXIT_VINTR IF=%d\n", pCtx->eflags.Bits.u1IF)); 2173 p VMCB->ctrl.IntCtrl.n.u1VIrqValid = 0;2174 p VMCB->ctrl.IntCtrl.n.u8VIrqVector = 0;2201 pvVMCB->ctrl.IntCtrl.n.u1VIrqValid = 0; 2202 pvVMCB->ctrl.IntCtrl.n.u8VIrqVector = 0; 2175 2203 goto ResumeExecution; 2176 2204 … … 2341 2369 2342 2370 /* Disable drx move intercepts. */ 2343 p VMCB->ctrl.u16InterceptRdDRx = 0;2344 p VMCB->ctrl.u16InterceptWrDRx = 0;2371 pvVMCB->ctrl.u16InterceptRdDRx = 0; 2372 pvVMCB->ctrl.u16InterceptWrDRx = 0; 2345 2373 2346 2374 /* Save the host and load the guest debug state. */ … … 2376 2404 2377 2405 /* Disable DRx move intercepts. */ 2378 p VMCB->ctrl.u16InterceptRdDRx = 0;2379 p VMCB->ctrl.u16InterceptWrDRx = 0;2406 pvVMCB->ctrl.u16InterceptRdDRx = 0; 2407 pvVMCB->ctrl.u16InterceptWrDRx = 0; 2380 2408 2381 2409 /* Save the host and load the guest debug state. */ … … 2401 2429 SVM_IOIO_EXIT IoExitInfo; 2402 2430 2403 IoExitInfo.au32[0] = p VMCB->ctrl.u64ExitInfo1;2431 IoExitInfo.au32[0] = pvVMCB->ctrl.u64ExitInfo1; 2404 2432 unsigned uIdx = (IoExitInfo.au32[0] >> 4) & 0x7; 2405 2433 uint32_t uIOSize = g_aIOSize[uIdx]; … … 2452 2480 if (rc == VINF_IOM_R3_IOPORT_WRITE) 2453 2481 { 2454 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, p VMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2482 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pvVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2455 2483 uAndVal, uIOSize); 2456 2484 } … … 2471 2499 else if (rc == VINF_IOM_R3_IOPORT_READ) 2472 2500 { 2473 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, p VMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2501 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pvVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2474 2502 uAndVal, uIOSize); 2475 2503 } … … 2484 2512 { 2485 2513 /* Update EIP and continue execution. */ 2486 pCtx->rip = p VMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */2514 pCtx->rip = pvVMCB->ctrl.u64ExitInfo2; /* RIP/EIP of the next instruction is saved in EXITINFO2. */ 2487 2515 if (RT_LIKELY(rc == VINF_SUCCESS)) 2488 2516 { … … 2515 2543 * by software after the contents have been read. 2516 2544 */ 2517 p VMCB->guest.u64DR6 = pCtx->dr[6];2545 pvVMCB->guest.u64DR6 = pCtx->dr[6]; 2518 2546 2519 2547 /* X86_DR7_GD will be cleared if drx accesses should be trapped inside the guest. */ … … 2525 2553 pCtx->dr[7] |= 0x400; /* must be one */ 2526 2554 2527 p VMCB->guest.u64DR7 = pCtx->dr[7];2555 pvVMCB->guest.u64DR7 = pCtx->dr[7]; 2528 2556 2529 2557 /* Inject the exception. */ … … 2535 2563 Event.n.u8Vector = X86_XCPT_DB; 2536 2564 2537 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);2565 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 2538 2566 goto ResumeExecution; 2539 2567 } … … 2636 2664 2637 2665 Log(("Forced #UD trap at %RGv\n", (RTGCPTR)pCtx->rip)); 2638 hmR0SvmInjectEvent(pVCpu, p VMCB, pCtx, &Event);2666 hmR0SvmInjectEvent(pVCpu, pvVMCB, pCtx, &Event); 2639 2667 goto ResumeExecution; 2640 2668 } … … 2646 2674 if ( pVM->hm.s.fTPRPatchingActive 2647 2675 && pCtx->ecx == MSR_K8_LSTAR 2648 && p VMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */)2676 && pvVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */) 2649 2677 { 2650 2678 if ((pCtx->eax & 0xff) != u8LastTPR) … … 2668 2696 * so we play safe by completely disassembling the instruction. 2669 2697 */ 2670 STAM_COUNTER_INC((p VMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);2671 Log(("SVM: %s\n", (p VMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));2698 STAM_COUNTER_INC((pvVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 2699 Log(("SVM: %s\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); 2672 2700 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2673 2701 if (rc == VINF_SUCCESS) … … 2677 2705 goto ResumeExecution; 2678 2706 } 2679 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (p VMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr",2707 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n", (pvVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr", 2680 2708 VBOXSTRICTRC_VAL(rc))); 2681 2709 break; … … 2683 2711 2684 2712 case SVM_EXIT_TASK_SWITCH: /* too complicated to emulate, so fall back to the recompiler */ 2685 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", p VMCB->ctrl.u64ExitInfo2));2686 if ( !(p VMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))2713 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pvVMCB->ctrl.u64ExitInfo2)); 2714 if ( !(pvVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) 2687 2715 && pVCpu->hm.s.Event.fPending) 2688 2716 { … … 2887 2915 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2888 2916 { 2889 SVM_VMCB *p VMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;2917 SVM_VMCB *pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB; 2890 2918 2891 2919 Assert(pVM->hm.s.svm.fSupported); … … 2904 2932 2905 2933 /* Intercept all DRx reads and writes again. Changed later on. */ 2906 p VMCB->ctrl.u16InterceptRdDRx = 0xFFFF;2907 p VMCB->ctrl.u16InterceptWrDRx = 0xFFFF;2934 pvVMCB->ctrl.u16InterceptRdDRx = 0xFFFF; 2935 pvVMCB->ctrl.u16InterceptWrDRx = 0xFFFF; 2908 2936 2909 2937 /* Resync the debug registers the next time. */ … … 2911 2939 } 2912 2940 else 2913 Assert(p VMCB->ctrl.u16InterceptRdDRx == 0xFFFF && pVMCB->ctrl.u16InterceptWrDRx == 0xFFFF);2941 Assert(pvVMCB->ctrl.u16InterceptRdDRx == 0xFFFF && pvVMCB->ctrl.u16InterceptWrDRx == 0xFFFF); 2914 2942 2915 2943 return VINF_SUCCESS; … … 3008 3036 if (!fFlushPending) 3009 3037 { 3010 SVM_VMCB *p VMCB;3038 SVM_VMCB *pvVMCB; 3011 3039 3012 3040 Log2(("SVMR0InvalidatePage %RGv\n", GCVirt)); … … 3014 3042 Assert(pVM->hm.s.svm.fSupported); 3015 3043 3016 p VMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB;3017 AssertMsgReturn(p VMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);3044 pvVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pvVMCB; 3045 AssertMsgReturn(pvVMCB, ("Invalid pvVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 3018 3046 3019 3047 #if HC_ARCH_BITS == 32 … … 3023 3051 else 3024 3052 #endif 3025 SVMR0InvlpgA(GCVirt, p VMCB->ctrl.TLBCtrl.n.u32ASID);3053 SVMR0InvlpgA(GCVirt, pvVMCB->ctrl.TLBCtrl.n.u32ASID); 3026 3054 } 3027 3055 return VINF_SUCCESS; … … 3054 3082 * 3055 3083 * @returns VBox status code. 3056 * @param pVMCBHostPhysPhysical address of host VMCB.3057 * @param pVMCBPhysPhysical address of the VMCB.3084 * @param HCPhysVMCBHost Physical address of host VMCB. 3085 * @param HCPhysVMCB Physical address of the VMCB. 3058 3086 * @param pCtx Pointer to the guest CPU context. 3059 3087 * @param pVM Pointer to the VM. 3060 3088 * @param pVCpu Pointer to the VMCPU. 3061 3089 */ 3062 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)3090 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS HCPhysVMCBHost, RTHCPHYS HCPhysVMCB, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu) 3063 3091 { 3064 3092 uint32_t aParam[4]; 3065 3093 3066 aParam[0] = (uint32_t)( pVMCBHostPhys); /* Param 1: pVMCBHostPhys- Lo. */3067 aParam[1] = (uint32_t)( pVMCBHostPhys >> 32); /* Param 1: pVMCBHostPhys- Hi. */3068 aParam[2] = (uint32_t)( pVMCBPhys); /* Param 2: pVMCBPhys- Lo. */3069 aParam[3] = (uint32_t)( pVMCBPhys >> 32); /* Param 2: pVMCBPhys- Hi. */3094 aParam[0] = (uint32_t)(HCPhysVMCBHost); /* Param 1: HCPhysVMCBHost - Lo. */ 3095 aParam[1] = (uint32_t)(HCPhysVMCBHost >> 32); /* Param 1: HCPhysVMCBHost - Hi. */ 3096 aParam[2] = (uint32_t)(HCPhysVMCB); /* Param 2: HCPhysVMCB - Lo. */ 3097 aParam[3] = (uint32_t)(HCPhysVMCB >> 32); /* Param 2: HCPhysVMCB - Hi. */ 3070 3098 3071 3099 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]);
Note:
See TracChangeset
for help on using the changeset viewer.