Changeset 46441 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 7, 2013 1:38:58 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46420 r46441 29 29 * Defined Constants And Macros * 30 30 *******************************************************************************/ 31 32 /** 33 * MSR-bitmap read permissions. 31 /** @name Segment attribute conversion between CPU and AMD-V VMCB format. 32 * 33 * The CPU format of the segment attribute is described in X86DESCATTRBITS 34 * which is 16-bits (i.e. includes 4 bits of the segment limit). 35 * 36 * The AMD-V VMCB format the segment attribute is compact 12-bits (strictly 37 * only the attribute bits and nothing else). Upper 4-bits are unused. 38 * 39 * @{ */ 40 #define HMSVM_CPU_2_VMCB_SEG_ATTR(a) (a & 0xff) | ((a & 0xf000) >> 4) 41 #define HMSVM_VMCB_2_CPU_SEG_ATTR(a) (a & 0xff) | ((a & 0x0f00) << 4) 42 /** @} */ 43 44 /** @name Macros for loading, storing segment registers to/from the VMCB. 45 * @{ */ 46 #define HMSVM_LOAD_SEG_REG(REG, reg) \ 47 do \ 48 { \ 49 Assert(pCtx->reg.fFlags & CPUMSELREG_FLAGS_VALID); \ 50 Assert(pCtx->reg.ValidSel == pCtx->reg.Sel); \ 51 pVmcb->guest.REG.u16Sel = pCtx->reg.Sel; \ 52 pVmcb->guest.REG.u32Limit = pCtx->reg.u32Limit; \ 53 pVmcb->guest.REG.u64Base = pCtx->reg.u64Base; \ 54 pVmcb->guest.REG.u16Attr = HMSVM_CPU_2_VMCB_SEG_ATTR(pCtx->reg.Attr.u); \ 55 } while (0) 56 57 #define HMSVM_SAVE_SEG_REG(REG, reg) \ 58 do \ 59 { \ 60 pCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \ 61 pCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \ 62 pCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \ 63 pCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \ 64 pCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \ 65 pCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \ 66 } while (0) 67 /** @} */ 68 69 /** 70 * MSRPM (MSR permission bitmap) read permissions (for guest RDMSR). 34 71 */ 35 72 typedef enum SVMMSREXITREAD … … 42 79 43 80 /** 44 * MSR -bitmap write permissions.81 * MSRPM (MSR permission bitmap) write permissions (for guest WRMSR). 45 82 */ 46 83 typedef enum SVMMSREXITWRITE … … 56 93 * Internal Functions * 57 94 *******************************************************************************/ 58 static void hmR0SvmSetM SRPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite);95 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite); 59 96 60 97 … … 203 240 { 204 241 PVMCPU pVCpu = &pVM->aCpus[i]; 242 AssertPtr(pVCpu); 205 243 206 244 if (pVCpu->hm.s.svm.hMemObjVmcbHost != NIL_RTR0MEMOBJ) 207 245 { 208 246 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcbHost, false); 209 pVCpu->hm.s.svm.pvVmcbHost = 0;210 pVCpu->hm.s.svm.HCPhysVmcbHost = 0;211 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ;247 pVCpu->hm.s.svm.pvVmcbHost = 0; 248 pVCpu->hm.s.svm.HCPhysVmcbHost = 0; 249 pVCpu->hm.s.svm.hMemObjVmcbHost = NIL_RTR0MEMOBJ; 212 250 } 213 251 … … 215 253 { 216 254 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjVmcb, false); 217 pVCpu->hm.s.svm.pvVmcb = 0;218 pVCpu->hm.s.svm.HCPhysVmcb = 0;219 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ;255 pVCpu->hm.s.svm.pvVmcb = 0; 256 pVCpu->hm.s.svm.HCPhysVmcb = 0; 257 pVCpu->hm.s.svm.hMemObjVmcb = NIL_RTR0MEMOBJ; 220 258 } 221 259 … … 241 279 int rc = VERR_INTERNAL_ERROR_5; 242 280 243 /* Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch. */ 281 /* 282 * Check for an AMD CPU erratum which requires us to flush the TLB before every world-switch. 283 */ 244 284 uint32_t u32Family; 245 285 uint32_t u32Model; … … 251 291 } 252 292 253 /* Initialize the memory objects up-front so we can cleanup on allocation failures properly. */ 254 for (uint32_t i = 0; i < pVM->cCpus; i++) 293 /* 294 * Initialize the R0 memory objects up-front so we can properly cleanup on allocation failures. 295 */ 296 for (VMCPUID i = 0; i < pVM->cCpus; i++) 255 297 { 256 298 PVMCPU pVCpu = &pVM->aCpus[i]; … … 260 302 } 261 303 262 /* Allocate a VMCB for each VCPU. */ 263 for (uint32_t i = 0; i < pVM->cCpus; i++) 264 { 265 /* Allocate one page for the host context */ 304 for (VMCPUID i = 0; i < pVM->cCpus; i++) 305 { 306 /* 307 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as 308 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA. 309 */ 266 310 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1 << PAGE_SHIFT, false /* fExecutable */); 267 311 if (RT_FAILURE(rc)) … … 273 317 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcbHost); 274 318 275 /* Allocate one page for the VM control block (VMCB). */ 319 /* 320 * Allocate one page for the guest-state VMCB. 321 */ 276 322 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1 << PAGE_SHIFT, false /* fExecutable */); 277 323 if (RT_FAILURE(rc)) 278 324 goto failure_cleanup; 279 325 280 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb);281 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */);326 pVCpu->hm.s.svm.pvVmcb = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjVmcb); 327 pVCpu->hm.s.svm.HCPhysVmcb = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjVmcb, 0 /* iPage */); 282 328 Assert(pVCpu->hm.s.svm.HCPhysVmcb < _4G); 283 329 ASMMemZeroPage(pVCpu->hm.s.svm.pvVmcb); 284 330 285 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ 331 /* 332 * Allocate two pages (8 KB) for the MSR permission bitmap. There doesn't seem to be a way to convince 333 * SVM to not require one. 334 */ 286 335 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */); 287 336 if (RT_FAILURE(rc)) … … 290 339 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap); 291 340 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */); 292 /* Set all bits to intercept all MSR accesses . */341 /* Set all bits to intercept all MSR accesses (changed later on). */ 293 342 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff); 294 343 } … … 316 365 317 366 /** 367 * Sets the permission bits for the specified MSR in the MSRPM. 368 * 369 * @param pVCpu Pointer to the VMCPU. 370 * @param uMsr The MSR. 371 * @param fRead Whether reading is allowed. 372 * @param fWrite Whether writing is allowed. 373 */ 374 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite) 375 { 376 unsigned ulBit; 377 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 378 379 /* 380 * Layout: 381 * Byte offset MSR range 382 * 0x000 - 0x7ff 0x00000000 - 0x00001fff 383 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff 384 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff 385 * 0x1800 - 0x1fff Reserved 386 */ 387 if (uMsr <= 0x00001FFF) 388 { 389 /* Pentium-compatible MSRs */ 390 ulBit = uMsr * 2; 391 } 392 else if ( uMsr >= 0xC0000000 393 && uMsr <= 0xC0001FFF) 394 { 395 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */ 396 ulBit = (uMsr - 0xC0000000) * 2; 397 pbMsrBitmap += 0x800; 398 } 399 else if ( uMsr >= 0xC0010000 400 && uMsr <= 0xC0011FFF) 401 { 402 /* AMD Seventh and Eighth Generation Processor MSRs */ 403 ulBit = (uMsr - 0xC0001000) * 2; 404 pbMsrBitmap += 0x1000; 405 } 406 else 407 { 408 AssertFailed(); 409 return; 410 } 411 412 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */); 413 if (enmRead == SVMMSREXIT_INTERCEPT_READ) 414 ASMBitSet(pbMsrBitmap, ulBit); 415 else 416 ASMBitClear(pbMsrBitmap, ulBit); 417 418 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE) 419 ASMBitSet(pbMsrBitmap, ulBit + 1); 420 else 421 ASMBitClear(pbMsrBitmap, ulBit + 1); 422 } 423 424 425 /** 318 426 * Sets up AMD-V for the specified VM. 319 427 * This function is only called once per-VM during initalization. … … 332 440 { 333 441 PVMCPU pVCpu = &pVM->aCpus[i]; 334 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcb ;442 PSVMVMCB pVmcb = (PSVMVMCB)pVM->aCpus[i].hm.s.svm.pvVmcbGuest; 335 443 336 444 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); … … 432 540 * Don't intercept guest read/write accesses to these MSRs. 433 541 */ 434 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);435 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);436 hmR0SvmSetM SRPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);437 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);438 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);439 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);440 hmR0SvmSetM SRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);441 hmR0SvmSetM SRPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);442 hmR0SvmSetM SRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);443 hmR0SvmSetM SRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);542 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 543 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 544 hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 545 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 546 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 547 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 548 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 549 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 550 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 551 hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 444 552 } 445 553 446 554 return rc; 447 }448 449 450 /**451 * Sets the permission bits for the specified MSR.452 *453 * @param pVCpu Pointer to the VMCPU.454 * @param uMsr The MSR.455 * @param fRead Whether reading is allowed.456 * @param fWrite Whether writing is allowed.457 */458 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)459 {460 unsigned ulBit;461 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap;462 463 /*464 * Layout:465 * Byte offset MSR range466 * 0x000 - 0x7ff 0x00000000 - 0x00001fff467 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff468 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff469 * 0x1800 - 0x1fff Reserved470 */471 if (uMsr <= 0x00001FFF)472 {473 /* Pentium-compatible MSRs */474 ulBit = uMsr * 2;475 }476 else if ( uMsr >= 0xC0000000477 && uMsr <= 0xC0001FFF)478 {479 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */480 ulBit = (uMsr - 0xC0000000) * 2;481 pbMsrBitmap += 0x800;482 }483 else if ( uMsr >= 0xC0010000484 && uMsr <= 0xC0011FFF)485 {486 /* AMD Seventh and Eighth Generation Processor MSRs */487 ulBit = (uMsr - 0xC0001000) * 2;488 pbMsrBitmap += 0x1000;489 }490 else491 {492 AssertFailed();493 return;494 }495 496 Assert(ulBit < 0x3fff /* 16 * 1024 - 1 */);497 if (enmRead == SVMMSREXIT_INTERCEPT_READ)498 ASMBitSet(pbMsrBitmap, ulBit);499 else500 ASMBitClear(pbMsrBitmap, ulBit);501 502 if (enmWrite == SVMMSREXIT_INTERCEPT_WRITE)503 ASMBitSet(pbMsrBitmap, ulBit + 1);504 else505 ASMBitClear(pbMsrBitmap, ulBit + 1);506 555 } 507 556 … … 516 565 { 517 566 PVM pVM = pVCpu->CTX_SUFF(pVM); 518 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb ;567 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcbGuest; 519 568 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 520 569 … … 647 696 648 697 649 698 /** @name 64-bit guest on 32-bit host OS helper functions. 699 * 700 * The host CPU is still 64-bit capable but the host OS is running in 32-bit 701 * mode (code segment, paging). These wrappers/helpers perform the necessary 702 * bits for the 32->64 switcher. 703 * 704 * @{ */ 650 705 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 651 706 /** … … 712 767 713 768 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) */ 714 769 /** @} */ 770 771 772 /** 773 * Saves the host state. 774 * 775 * @returns VBox status code. 776 * @param pVM Pointer to the VM. 777 * @param pVCpu Pointer to the VMCPU. 778 */ 779 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu) 780 { 781 NOREF(pVM); 782 NOREF(pVCpu); 783 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */ 784 return VINF_SUCCESS; 785 } 786 787 788 /** 789 * Loads the guest segment registers into the VMCB. 790 * 791 * @returns VBox status code. 792 * @param pVCpu Pointer to the VMCPU. 793 * @param pCtx Pointer to the guest-CPU context. 794 */ 795 static int hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx) 796 { 797 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */ 798 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS) 799 { 800 HMSVM_LOAD_SEG_REG(CS, cs); 801 HMSVM_LOAD_SEG_REG(SS, cs); 802 HMSVM_LOAD_SEG_REG(DS, cs); 803 HMSVM_LOAD_SEG_REG(ES, cs); 804 HMSVM_LOAD_SEG_REG(FS, cs); 805 HMSVM_LOAD_SEG_REG(GS, cs); 806 807 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS; 808 } 809 810 /* Guest TR. */ 811 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR) 812 { 813 HMSVM_LOAD_SEG_REG(TR, tr); 814 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR; 815 } 816 817 /* Guest LDTR. */ 818 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR) 819 { 820 HMSVM_LOAD_SEG_REG(LDTR, ldtr); 821 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR; 822 } 823 824 /* Guest GDTR. */ 825 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 826 { 827 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 828 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 829 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR; 830 } 831 832 /* Guest IDTR. */ 833 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 834 { 835 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 836 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 837 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR; 838 } 839 840 return VINF_SUCCESS; 841 } 842 843 844 /** 845 * Loads the guest state. 846 * 847 * @returns VBox status code. 848 * @param pVM Pointer to the VM. 849 * @param pVCpu Pointer to the VMCPU. 850 * @param pCtx Pointer to the guest-CPU context. 851 */ 852 VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 853 { 854 AssertPtr(pVM); 855 AssertPtr(pVCpu); 856 AssertPtr(pMixedCtx); 857 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 858 859 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 860 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB); 861 862 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 863 864 int rc = hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx); 865 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestSegmentRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 866 867 rc 868 /* -XXX- todo */ 869 870 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 871 872 } 873 -
trunk/src/VBox/VMM/include/HMInternal.h
r46381 r46441 76 76 #define MASK_INJECT_IRQ_STAT 0xff 77 77 78 /** @name Changed flags78 /** @name HM changed flags. 79 79 * These flags are used to keep track of which important registers that 80 80 * have been changed since last they were reset. … … 703 703 struct 704 704 { 705 /** R0 memory object for the host VM control block (VMCB). */705 /** R0 memory object for the host VMCB which holds additional host-state. */ 706 706 RTR0MEMOBJ hMemObjVmcbHost; 707 /** Physical address of the host VM control block (VMCB). */707 /** Physical address of the host VMCB which holds additional host-state. */ 708 708 RTHCPHYS HCPhysVmcbHost; 709 /** Virtual address of the host VM control block (VMCB). */709 /** Virtual address of the host VMCB which holds additional host-state. */ 710 710 R0PTRTYPE(void *) pvVmcbHost; 711 711 712 /** R0 memory object for the VM control block (VMCB). */712 /** R0 memory object for the guest VMCB. */ 713 713 RTR0MEMOBJ hMemObjVmcb; 714 /** Physical address of the VM control block (VMCB). */714 /** Physical address of the guest VMCB. */ 715 715 RTHCPHYS HCPhysVmcb; 716 /** Virtual address of the VM control block (VMCB). */716 /** Virtual address of the guest VMCB. */ 717 717 R0PTRTYPE(void *) pvVmcb; 718 718 … … 720 720 PFNHMSVMVMRUN pfnVMRun; 721 721 722 /** R0 memory object for the MSR bitmap (8 kb). */722 /** R0 memory object for the MSR bitmap (8 KB). */ 723 723 RTR0MEMOBJ hMemObjMsrBitmap; 724 /** Physical address of the MSR bitmap (8 kb). */724 /** Physical address of the MSR bitmap (8 KB). */ 725 725 RTHCPHYS HCPhysMsrBitmap; 726 726 /** Virtual address of the MSR bitmap. */
Note:
See TracChangeset
for help on using the changeset viewer.