Changeset 41335 in vbox for trunk/src/VBox/VMM
- Timestamp:
- May 16, 2012 12:36:18 PM (13 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r41312 r41335 1 1 /* $Id$ */ 2 2 /** @file 3 * HM SVM (AMD-V) - Host Context Ring 3 * HM SVM (AMD-V) - Host Context Ring-0. 4 4 */ 5 5 6 6 /* 7 * Copyright (C) 2006-201 1Oracle Corporation7 * Copyright (C) 2006-2012 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 58 58 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite); 59 59 60 60 61 /******************************************************************************* 61 62 * Global Variables * … … 63 64 64 65 /** 65 * Sets up and activates AMD-V on the current CPU 66 * Sets up and activates AMD-V on the current CPU. 66 67 * 67 68 * @returns VBox status code. 68 * @param pCpu CPU info struct69 * @param pVM The VM to operate on. (can be NULL after a resume!!)70 * @param pvCpuPage Pointer to the global cpupage.71 * @param HCPhysCpuPage Physical address of the global cpupage.69 * @param pCpu Pointer to the CPU info struct. 70 * @param pVM Pointer to the VM (can be NULL after a resume!). 71 * @param pvCpuPage Pointer to the global CPU page. 72 * @param HCPhysCpuPage Physical address of the global CPU page. 72 73 */ 73 74 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) … … 76 77 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 77 78 78 /* We must turn on AMD-V and setup the host state physical address, as 79 those MSRs are per-cpu/core. */ 79 /* 80 * We must turn on AMD-V and setup the host state physical address, as those MSRs are per cpu/core. 81 */ 80 82 uint64_t fEfer = ASMRdMsr(MSR_K6_EFER); 81 83 if (fEfer & MSR_K6_EFER_SVME) 82 84 { 83 /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active, then we 84 blindly use AMD-V. */ 85 /* 86 * If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V. 87 */ 85 88 if ( pVM 86 89 && pVM->hwaccm.s.svm.fIgnoreInUseError) 90 { 87 91 pCpu->fIgnoreAMDVInUseError = true; 92 } 93 88 94 if (!pCpu->fIgnoreAMDVInUseError) 89 95 return VERR_SVM_IN_USE; … … 93 99 ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME); 94 100 95 /* Write the physical page address where the CPU will store the host state 96 while executing the VM. */ 101 /* Write the physical page address where the CPU will store the host state while executing the VM. */ 97 102 ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage); 98 103 … … 107 112 } 108 113 114 109 115 /** 110 * Deactivates AMD-V on the current CPU 116 * Deactivates AMD-V on the current CPU. 111 117 * 112 118 * @returns VBox status code. 113 * @param pCpu CPU info struct114 * @param pvCpuPage Pointer to the global cpupage.115 * @param HCPhysCpuPage Physical address of the global cpupage.119 * @param pCpu Pointer to the CPU info struct. 120 * @param pvCpuPage Pointer to the global CPU page. 121 * @param HCPhysCpuPage Physical address of the global CPU page. 116 122 */ 117 123 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) … … 131 137 } 132 138 139 133 140 /** 134 141 * Does Ring-0 per VM AMD-V init. 135 142 * 136 143 * @returns VBox status code. 137 * @param pVM The VM to operate on.144 * @param pVM Pointer to the VM. 138 145 */ 139 146 VMMR0DECL(int) SVMR0InitVM(PVM pVM) … … 153 160 ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff); 154 161 155 /* Erratum 170 which requires a forced TLB flush for each world switch: 162 /* 163 * Erratum 170 which requires a forced TLB flush for each world switch: 156 164 * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf 157 165 * … … 166 174 * 0x7c 2 167 175 * Turion 64: 0x68 2 168 *169 176 */ 170 177 uint32_t u32Dummy; 171 178 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily; 172 179 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy); 173 u32BaseFamily = (u32Version >> 8) & 0xf;174 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);175 u32Model = ((u32Version >> 4) & 0xf);176 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);177 u32Stepping = u32Version & 0xf;180 u32BaseFamily = (u32Version >> 8) & 0xf; 181 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0); 182 u32Model = ((u32Version >> 4) & 0xf); 183 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4); 184 u32Stepping = u32Version & 0xf; 178 185 if ( u32Family == 0xf 179 186 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1) … … 221 228 pVCpu->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0); 222 229 /* Set all bits to intercept all MSR accesses. */ 223 ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE *2, 0xffffffff);230 ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE * 2, 0xffffffff); 224 231 } 225 232 226 233 return VINF_SUCCESS; 227 234 } 235 228 236 229 237 /** … … 231 239 * 232 240 * @returns VBox status code. 233 * @param pVM The VM to operate on.241 * @param pVM Pointer to the VM. 234 242 */ 235 243 VMMR0DECL(int) SVMR0TermVM(PVM pVM) … … 272 280 } 273 281 282 274 283 /** 275 * Sets up AMD-V for the specified VM 284 * Sets up AMD-V for the specified VM. 276 285 * 277 286 * @returns VBox status code. 278 * @param pVM The VM to operate on.287 * @param pVM Pointer to the VM. 279 288 */ 280 289 VMMR0DECL(int) SVMR0SetupVM(PVM pVM) … … 283 292 284 293 AssertReturn(pVM, VERR_INVALID_PARAMETER); 285 286 294 Assert(pVM->hwaccm.s.svm.fSupported); 287 295 … … 293 301 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 294 302 295 /* Program the control fields. Most of them never have to be changed again. 303 /* 304 * Program the control fields. Most of them never have to be changed again. 296 305 * CR0/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. 297 306 * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical. … … 348 357 | SVM_CTRL2_INTERCEPT_WBINVD 349 358 | SVM_CTRL2_INTERCEPT_MONITOR 350 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */ 351 ; 359 | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the 360 guest (host thinks the cpu load is high) */ 361 352 362 Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException)); 353 363 Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1)); … … 356 366 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */ 357 367 pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1; 368 358 369 /* Ignore the priority in the TPR; just deliver it when we tell it to. */ 359 370 pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR = 1; … … 369 380 pVMCB->ctrl.TLBCtrl.n.u32ASID = 1; 370 381 371 /* Setup the PAT msr (nested paging only) */ 372 /* The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB, so choose type 6 for all PAT slots. */ 382 /* 383 * Setup the PAT MSR (nested paging only) 384 * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB, 385 * so choose type 6 for all PAT slots. 386 */ 373 387 pVMCB->guest.u64GPAT = 0x0006060606060606ULL; 374 388 … … 380 394 pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3); 381 395 382 /* We must also intercept: 396 /* 397 * We must also intercept: 383 398 * - INVLPG (must go through shadow paging) 384 399 * - task switches (may change CR3/EFLAGS/LDT) 385 400 */ 386 401 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG 387 | SVM_CTRL1_INTERCEPT_TASK_SWITCH 388 ; 402 | SVM_CTRL1_INTERCEPT_TASK_SWITCH; 389 403 390 404 /* Page faults must be intercepted to implement shadow paging. */ … … 392 406 } 393 407 394 /* The following MSRs are saved automatically by vmload/vmsave, so we allow the guest 408 /* 409 * The following MSRs are saved automatically by vmload/vmsave, so we allow the guest 395 410 * to modify them directly. 396 411 */ … … 412 427 413 428 /** 414 * Sets the permission bits for the specified MSR 429 * Sets the permission bits for the specified MSR. 415 430 * 416 * @param pVCpu The VMCPU to operate on.417 * @param ulMSR MSR value 418 * @param fRead Reading allowed/disallowed419 * @param fWrite W riting allowed/disallowed431 * @param pVCpu Pointer to the VMCPU. 432 * @param ulMSR MSR value. 433 * @param fRead Whether reading is allowed. 434 * @param fWrite Whether writing is allowed. 420 435 */ 421 436 static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite) … … 429 444 ulBit = ulMSR * 2; 430 445 } 431 else 432 if ( ulMSR >= 0xC0000000 433 && ulMSR <= 0xC0001FFF) 446 else if ( ulMSR >= 0xC0000000 447 && ulMSR <= 0xC0001FFF) 434 448 { 435 449 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */ … … 437 451 pMSRBitmap += 0x800; 438 452 } 439 else 440 if ( ulMSR >= 0xC0010000 441 && ulMSR <= 0xC0011FFF) 453 else if ( ulMSR >= 0xC0010000 454 && ulMSR <= 0xC0011FFF) 442 455 { 443 456 /* AMD Seventh and Eighth Generation Processor MSRs */ … … 462 475 } 463 476 477 464 478 /** 465 * Injects an event (trap or external interrupt) 479 * Injects an event (trap or external interrupt). 466 480 * 467 * @param pVCpu The VMCPU to operate on.468 * @param pVMCB SVM control block469 * @param pCtx CPU Context470 * @param pIntInfo SVM interrupt info481 * @param pVCpu Pointer to the VMCPU. 482 * @param pVMCB Pointer to the VMCB. 483 * @param pCtx Pointer to the guest CPU context. 484 * @param pIntInfo Pointer to the SVM interrupt info. 471 485 */ 472 486 DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent) … … 478 492 #ifdef VBOX_STRICT 479 493 if (pEvent->n.u8Vector == 0xE) 480 Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0])); 481 else 482 if (pEvent->n.u8Vector < 0x20) 494 { 495 Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector, 496 (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0])); 497 } 498 else if (pEvent->n.u8Vector < 0x20) 483 499 Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode)); 484 500 else … … 496 512 497 513 /** 498 * Checks for pending guest interrupts and injects them 514 * Checks for pending guest interrupts and injects them. 499 515 * 500 516 * @returns VBox status code. 501 * @param pVM The VM to operate on.517 * @param pVM Pointer to the VM. 502 518 * @param pVCpu The VM CPU to operate on. 503 * @param pVMCB SVM control block504 * @param pCtx CPU Context519 * @param pVMCB Pointer to the VMCB. 520 * @param pCtx Pointer to the guest CPU Context. 505 521 */ 506 522 static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx) … … 509 525 NOREF(pVM); 510 526 511 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */ 527 /* 528 * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely). 529 */ 512 530 if (pVCpu->hwaccm.s.Event.fPending) 513 531 { 514 532 SVM_EVENT Event; 515 533 516 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip)); 534 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, 535 (RTGCPTR)pCtx->rip)); 517 536 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject); 518 537 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo; … … 523 542 } 524 543 525 /* If an active trap is already pending, then we must forward it first! */ 544 /* 545 * If an active trap is already pending, we must forward it first! 546 */ 526 547 if (!TRPMHasTrap(pVCpu)) 527 548 { … … 540 561 } 541 562 542 /* @todo SMI interrupts. */ 543 544 /* When external interrupts are pending, we should exit the VM when IF is set. */ 563 /** @todo SMI interrupts. */ 564 565 /* 566 * When external interrupts are pending, we should exit the VM when IF is set. 567 */ 545 568 if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))) 546 569 { 547 if ( 548 || 570 if ( !(pCtx->eflags.u32 & X86_EFL_IF) 571 || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 549 572 { 550 573 if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid) … … 553 576 LogFlow(("Enable irq window exit!\n")); 554 577 else 555 Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n", (RTGCPTR)pCtx->rip)); 556 557 /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */ 578 { 579 Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n", 580 (RTGCPTR)pCtx->rip)); 581 } 582 583 /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as 584 * soon as guest.IF is set. */ 558 585 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 559 586 pVMCB->ctrl.IntCtrl.n.u1VIrqValid = 1; … … 620 647 if (enmType == TRPM_TRAP) 621 648 { 622 switch (u8Vector) { 623 case X86_XCPT_DF: 624 case X86_XCPT_TS: 625 case X86_XCPT_NP: 626 case X86_XCPT_SS: 627 case X86_XCPT_GP: 628 case X86_XCPT_PF: 629 case X86_XCPT_AC: 630 /* Valid error codes. */ 631 Event.n.u1ErrorCodeValid = 1; 632 break; 633 default: 634 break; 649 switch (u8Vector) 650 { 651 case X86_XCPT_DF: 652 case X86_XCPT_TS: 653 case X86_XCPT_NP: 654 case X86_XCPT_SS: 655 case X86_XCPT_GP: 656 case X86_XCPT_PF: 657 case X86_XCPT_AC: 658 /* Valid error codes. */ 659 Event.n.u1ErrorCodeValid = 1; 660 break; 661 default: 662 break; 635 663 } 636 664 if (u8Vector == X86_XCPT_NMI) … … 649 677 } 650 678 679 651 680 /** 652 * Save the host state 681 * Save the host state. 653 682 * 654 683 * @returns VBox status code. 655 * @param pVM The VM to operate on.684 * @param pVM Pointer to the VM. 656 685 * @param pVCpu The VM CPU to operate on. 657 686 */ … … 664 693 } 665 694 695 666 696 /** 667 * Loads the guest state 697 * Loads the guest state. 668 698 * 669 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!699 * NOTE: Don't do anything here that can cause a jump back to ring-3!!! 670 700 * 671 701 * @returns VBox status code. 672 * @param pVM The VM to operate on.702 * @param pVM Pointer to the VM. 673 703 * @param pVCpu The VM CPU to operate on. 674 * @param pCtx Guest context704 * @param pCtx Pointer to the guest CPU context. 675 705 */ 676 706 VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 760 790 val &= ~(X86_CR0_CD|X86_CR0_NW); 761 791 762 /* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. */ 763 /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */ 792 /* 793 * Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. 794 * Note: In nested paging mode, the guest is allowed to run with paging disabled; the guest-physical to host-physical 795 * translation will remain active. 796 */ 764 797 if (!pVM->hwaccm.s.fNestedPaging) 765 798 { 766 val |= X86_CR0_PG; 767 val |= X86_CR0_WP; 799 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */ 800 val |= X86_CR0_WP; /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */ 768 801 } 769 802 pVMCB->guest.u64CR0 = val; … … 937 970 { 938 971 /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */ 939 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, pVMCB->ctrl.u64TSCOffset, u64CurTSC + pVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu))); 972 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 973 pVMCB->ctrl.u64TSCOffset, u64CurTSC + pVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu))); 940 974 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 941 975 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; … … 950 984 } 951 985 952 /* Sync the various msrs for 64 bitsmode. */986 /* Sync the various MSRs for 64-bit mode. */ 953 987 pVMCB->guest.u64STAR = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */ 954 pVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64 bitsmode syscall rip */988 pVMCB->guest.u64LSTAR = pCtx->msrLSTAR; /* 64-bit mode syscall rip */ 955 989 pVMCB->guest.u64CSTAR = pCtx->msrCSTAR; /* compatibility mode syscall rip */ 956 990 pVMCB->guest.u64SFMASK = pCtx->msrSFMASK; /* syscall flag mask */ 957 pVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* swapgsexchange value */991 pVMCB->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; /* SWAPGS exchange value */ 958 992 959 993 #ifdef DEBUG … … 972 1006 } 973 1007 1008 974 1009 /** 975 1010 * Setup TLB for ASID. 976 1011 * 977 * @param pVM The VM to operate on.1012 * @param pVM Pointer to the VM. 978 1013 * @param pVCpu The VM CPU to operate on. 979 1014 */ … … 1094 1129 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID; 1095 1130 1096 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1097 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1098 AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID)); 1131 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, 1132 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1133 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, 1134 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1135 AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, 1136 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID)); 1099 1137 1100 1138 #ifdef VBOX_WITH_STATISTICS … … 1116 1154 * 1117 1155 * @returns VBox status code. 1118 * @param pVM The VM to operate on.1156 * @param pVM Pointer to the VM. 1119 1157 * @param pVCpu The VM CPU to operate on. 1120 * @param pCtx Guest context1158 * @param pCtx Pointer to the guest CPU context. 1121 1159 */ 1122 1160 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 1128 1166 VBOXSTRICTRC rc = VINF_SUCCESS; 1129 1167 int rc2; 1130 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID;1131 SVM_VMCB *pVMCB ;1132 bool fSyncTPR = false;1133 unsigned cResume = 0;1134 uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */1135 PHMGLOBLCPUINFO pCpu = 0;1136 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;1168 uint64_t exitCode = (uint64_t)SVM_EXIT_INVALID; 1169 SVM_VMCB *pVMCB = NULL; 1170 bool fSyncTPR = false; 1171 unsigned cResume = 0; 1172 uint8_t u8LastTPR = 0; /* Initialized for potentially stupid compilers. */ 1173 PHMGLOBLCPUINFO pCpu = 0; 1174 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0; 1137 1175 #ifdef VBOX_STRICT 1138 1176 RTCPUID idCpuCheck; … … 1145 1183 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 1146 1184 1147 /* We can jump to this point to resume execution after determining that a VM-exit is innocent. 1185 /* 1186 * We can jump to this point to resume execution after determining that a VM-exit is innocent. 1148 1187 */ 1149 1188 ResumeExecution: … … 1152 1191 Assert(!HWACCMR0SuspendPending()); 1153 1192 1154 /* Safety precaution; looping for too long here can have a very bad effect on the host */ 1193 /* 1194 * Safety precaution; looping for too long here can have a very bad effect on the host. 1195 */ 1155 1196 if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops)) 1156 1197 { … … 1160 1201 } 1161 1202 1162 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */ 1203 /* 1204 * Check for IRQ inhibition due to instruction fusing (sti, mov ss). 1205 */ 1163 1206 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1164 1207 { … … 1166 1209 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 1167 1210 { 1168 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. 1211 /* 1212 * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. 1169 1213 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might 1170 1214 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could … … 1195 1239 #endif 1196 1240 1197 /* Check for pending actions that force us to go back to ring 3. */ 1241 /* 1242 * Check for pending actions that force us to go back to ring-3. 1243 */ 1198 1244 if ( VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 1199 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)) 1245 || VMCPU_FF_ISPENDING(pVCpu, 1246 VMCPU_FF_HWACCM_TO_R3_MASK 1247 | VMCPU_FF_PGM_SYNC_CR3 1248 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 1249 | VMCPU_FF_REQUEST)) 1200 1250 { 1201 1251 /* Check if a sync operation is pending. */ … … 1269 1319 #endif 1270 1320 1271 /* When external interrupts are pending, we should exit the VM when IF is set. */ 1272 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ 1321 /* 1322 * When external interrupts are pending, we should exit the VM when IF is set. 1323 * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!! 1324 */ 1273 1325 rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx); 1274 1326 if (RT_FAILURE(rc)) 1275 1327 goto end; 1276 1328 1277 /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */ 1278 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! (no longer true) 1329 /* 1330 * TPR caching using CR8 is only available in 64-bit mode or with 32-bit guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is 1331 * supported. 1332 * Note: we can't do this in LoddGuestState as PDMApicGetTPR can jump back to ring 3 (lock)! (no longer true) 1279 1333 */ 1280 1334 /** @todo query and update the TPR only when it could have been changed (mmio access) … … 1298 1352 } 1299 1353 else 1300 /* No interrupts are pending, so we don't need to be explicitely notified. 1354 { 1355 /* 1356 * No interrupts are pending, so we don't need to be explicitely notified. 1301 1357 * There are enough world switches for detecting pending interrupts. 1302 1358 */ 1303 1359 hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true); 1360 } 1304 1361 } 1305 1362 else … … 1313 1370 } 1314 1371 else 1315 /* No interrupts are pending, so we don't need to be explicitely notified. 1372 { 1373 /* 1374 * No interrupts are pending, so we don't need to be explicitely notified. 1316 1375 * There are enough world switches for detecting pending interrupts. 1317 1376 */ 1318 1377 pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 1378 } 1319 1379 } 1320 1380 fSyncTPR = !fPending; … … 1349 1409 VMMR0LogFlushDisable(pVCpu); 1350 1410 1351 /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */ 1411 /* 1412 * Load the guest state; *must* be here as it sets up the shadow CR0 for lazy FPU syncing! 1413 */ 1352 1414 rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx); 1353 1415 if (RT_UNLIKELY(rc != VINF_SUCCESS)) … … 1358 1420 1359 1421 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1360 /* Disable interrupts to make sure a poke will interrupt execution. 1422 /* 1423 * Disable interrupts to make sure a poke will interrupt execution. 1361 1424 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this. 1362 1425 */ … … 1366 1429 STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x); 1367 1430 1368 /* 1369 * Setup TLB control and ASID in the VMCB. 1370 */ 1431 /* Setup TLB control and ASID in the VMCB. */ 1371 1432 hmR0SvmSetupTLB(pVM, pVCpu); 1372 1433 … … 1392 1453 ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false); 1393 1454 ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits); 1394 /* Possibly the last TSC value seen by the guest (too high) (only when we're in tscoffset mode). */1455 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 1395 1456 if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 1396 1457 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); … … 1536 1597 } 1537 1598 1538 /* Let's first sync back eip, esp, and eflags. */1599 /* Let's first sync back EIP, ESP, and EFLAGS. */ 1539 1600 pCtx->rip = pVMCB->guest.u64RIP; 1540 1601 pCtx->rsp = pVMCB->guest.u64RSP; … … 1543 1604 pCtx->rax = pVMCB->guest.u64RAX; 1544 1605 1545 /* Save all the MSRs that can be changed by the guest without causing a world switch. (fs & gs base are saved with SVM_READ_SELREG) */ 1606 /* 1607 * Save all the MSRs that can be changed by the guest without causing a world switch. 1608 * FS & GS base are saved with SVM_READ_SELREG. 1609 */ 1546 1610 pCtx->msrSTAR = pVMCB->guest.u64STAR; /* legacy syscall eip, cs & ss */ 1547 pCtx->msrLSTAR = pVMCB->guest.u64LSTAR; /* 64 bitsmode syscall rip */1611 pCtx->msrLSTAR = pVMCB->guest.u64LSTAR; /* 64-bit mode syscall rip */ 1548 1612 pCtx->msrCSTAR = pVMCB->guest.u64CSTAR; /* compatibility mode syscall rip */ 1549 1613 pCtx->msrSFMASK = pVMCB->guest.u64SFMASK; /* syscall flag mask */ … … 1564 1628 SVM_READ_SELREG(GS, gs); 1565 1629 1566 /* Correct the hidden CS granularity flag. Haven't seen it being wrong in 1567 any other register (yet). */ 1630 /* 1631 * Correct the hidden CS granularity flag. Haven't seen it being wrong in any other 1632 * register (yet). 1633 */ 1568 1634 if ( !pCtx->csHid.Attr.n.u1Granularity 1569 1635 && pCtx->csHid.Attr.n.u1Present … … 1589 1655 /* 1590 1656 * Correct the hidden SS DPL field. It can be wrong on certain CPUs 1591 * sometimes (seen it on AMD Fusion APUs with 64bit guests). The CPU1657 * sometimes (seen it on AMD Fusion CPUs with 64-bit guests). The CPU 1592 1658 * always uses the CPL field in the VMCB instead of the DPL in the hidden 1593 * SS (chapter 15.5.1 Basic operation).1659 * SS (chapter AMD spec. 15.5.1 Basic operation). 1594 1660 */ 1595 1661 Assert(!(pVMCB->guest.u8CPL & ~0x3)); 1596 1662 pCtx->ssHid.Attr.n.u2Dpl = pVMCB->guest.u8CPL & 0x3; 1597 1663 1598 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR; must sync everything otherwise we can get out of sync when jumping to ring 3. */ 1664 /* 1665 * Remaining guest CPU context: TR, IDTR, GDTR, LDTR; 1666 * must sync everything otherwise we can get out of sync when jumping back to ring-3. 1667 */ 1599 1668 SVM_READ_SELREG(LDTR, ldtr); 1600 1669 SVM_READ_SELREG(TR, tr); … … 1606 1675 pCtx->idtr.pIdt = pVMCB->guest.IDTR.u64Base; 1607 1676 1608 /* Note: no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */ 1609 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */ 1610 if ( pVM->hwaccm.s.fNestedPaging 1611 && pCtx->cr3 != pVMCB->guest.u64CR3) 1677 /* 1678 * No reason to sync back the CRx and DRx registers as they cannot be changed by the guest 1679 * unless in the nested paging case where CR3 & CR3 can be changed by the guest. 1680 */ 1681 if ( pVM->hwaccm.s.fNestedPaging 1682 && pCtx->cr3 != pVMCB->guest.u64CR3) 1612 1683 { 1613 1684 CPUMSetGuestCR3(pVCpu, pVMCB->guest.u64CR3); … … 1618 1689 VMMR0LogFlushEnable(pVCpu); 1619 1690 1620 /* Take care of instruction fusing (sti, mov ss) (see 15.20.5 Interrupt Shadows) */1691 /* Take care of instruction fusing (sti, mov ss) (see AMD spec. 15.20.5 Interrupt Shadows) */ 1621 1692 if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE) 1622 1693 { … … 1637 1708 pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0]; 1638 1709 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid 1639 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */) 1710 /* we don't care about 'int xx' as the instruction will be restarted. */ 1711 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 1640 1712 { 1641 1713 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); … … 1682 1754 if ((uint8_t)(u8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR) 1683 1755 { 1684 rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1756 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */ 1757 rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4); 1685 1758 AssertRC(rc2); 1686 1759 } … … 1694 1767 #endif 1695 1768 #if ARCH_BITS == 64 /* for the time being */ 1696 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pVMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2, pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1769 VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pVMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2, 1770 pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1697 1771 #endif 1698 1772 STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x); … … 1780 1854 #ifdef VBOX_ALWAYS_TRAP_PF 1781 1855 if (pVM->hwaccm.s.fNestedPaging) 1782 { /* A genuine pagefault. 1783 * Forward the trap to the guest by injecting the exception and resuming execution. 1856 { 1857 /* 1858 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution. 1784 1859 */ 1785 Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp)); 1860 Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, 1861 uFaultAddress, errCode, (RTGCPTR)pCtx->rsp)); 1786 1862 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF); 1787 1863 … … 1812 1888 { 1813 1889 RTGCPHYS GCPhysApicBase, GCPhys; 1814 PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */1890 PDMApicGetBase(pVM, &GCPhysApicBase); /** @todo cache this */ 1815 1891 GCPhysApicBase &= PAGE_BASE_GC_MASK; 1816 1892 … … 1840 1916 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc))); 1841 1917 if (rc == VINF_SUCCESS) 1842 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 1918 { 1919 /* We've successfully synced our shadow pages, so let's just continue execution. */ 1843 1920 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1844 1921 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF); … … 1847 1924 goto ResumeExecution; 1848 1925 } 1849 else 1850 if (rc == VINF_EM_RAW_GUEST_TRAP)1851 { /* A genuine pagefault.1852 * Forward the trap to the guest by injecting the exception and resuming execution.1926 else if (rc == VINF_EM_RAW_GUEST_TRAP) 1927 { 1928 /* 1929 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution. 1853 1930 */ 1854 1931 Log2(("Forward page fault to the guest\n")); … … 1915 1992 Event.n.u8Vector = vector; 1916 1993 1917 switch (vector)1918 { 1919 case X86_XCPT_GP:1920 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);1921 Event.n.u1ErrorCodeValid = 1;1922 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */1923 break;1924 case X86_XCPT_BP:1925 /** Saves the wrong EIP on the stack (pointing to the int3 instead of the next instruction. */1926 break;1927 case X86_XCPT_DE:1928 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);1929 break;1930 case X86_XCPT_UD:1931 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);1932 break;1933 case X86_XCPT_SS:1934 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);1935 Event.n.u1ErrorCodeValid = 1;1936 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */1937 break;1938 case X86_XCPT_NP:1939 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);1940 Event.n.u1ErrorCodeValid = 1;1941 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */1942 break;1994 switch (vector) 1995 { 1996 case X86_XCPT_GP: 1997 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP); 1998 Event.n.u1ErrorCodeValid = 1; 1999 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2000 break; 2001 case X86_XCPT_BP: 2002 /** Saves the wrong EIP on the stack (pointing to the int3 instead of the next instruction. */ 2003 break; 2004 case X86_XCPT_DE: 2005 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE); 2006 break; 2007 case X86_XCPT_UD: 2008 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD); 2009 break; 2010 case X86_XCPT_SS: 2011 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS); 2012 Event.n.u1ErrorCodeValid = 1; 2013 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2014 break; 2015 case X86_XCPT_NP: 2016 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP); 2017 Event.n.u1ErrorCodeValid = 1; 2018 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2019 break; 1943 2020 } 1944 2021 Log(("Trap %x at %04x:%RGv esi=%x\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->esi)); … … 1977 2054 { 1978 2055 RTGCPHYS GCPhysApicBase; 1979 PDMApicGetBase(pVM, &GCPhysApicBase); /* @todo cache this */2056 PDMApicGetBase(pVM, &GCPhysApicBase); /** @todo cache this */ 1980 2057 GCPhysApicBase &= PAGE_BASE_GC_MASK; 1981 2058 … … 2039 2116 || rc == VERR_PAGE_TABLE_NOT_PRESENT 2040 2117 || rc == VERR_PAGE_NOT_PRESENT) 2041 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 2118 { 2119 /* We've successfully synced our shadow pages, so let's just continue execution. */ 2042 2120 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode)); 2043 2121 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF); … … 2170 2248 switch (exitCode - SVM_EXIT_WRITE_CR0) 2171 2249 { 2172 case 0:2173 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2174 break;2175 case 2:2176 break;2177 case 3:2178 Assert(!pVM->hwaccm.s.fNestedPaging);2179 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;2180 break;2181 case 4:2182 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;2183 break;2184 case 8:2185 break;2186 default:2187 AssertFailed();2250 case 0: 2251 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 2252 break; 2253 case 2: 2254 break; 2255 case 3: 2256 Assert(!pVM->hwaccm.s.fNestedPaging); 2257 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 2258 break; 2259 case 4: 2260 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4; 2261 break; 2262 case 8: 2263 break; 2264 default: 2265 AssertFailed(); 2188 2266 } 2189 2267 if (rc == VINF_SUCCESS) 2190 2268 { 2191 2269 /* EIP has been updated already. */ 2192 2193 2270 /* Only resume if successful. */ 2194 2271 goto ResumeExecution; … … 2209 2286 { 2210 2287 /* EIP has been updated already. */ 2211 2212 2288 /* Only resume if successful. */ 2213 2289 goto ResumeExecution; … … 2225 2301 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite); 2226 2302 2227 if ( 2228 && 2303 if ( !DBGFIsStepping(pVCpu) 2304 && !CPUMIsHyperDebugStateActive(pVCpu)) 2229 2305 { 2230 2306 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch); … … 2265 2341 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch); 2266 2342 2267 /* Disable drx move intercepts. */2343 /* Disable DRx move intercepts. */ 2268 2344 pVMCB->ctrl.u16InterceptRdDRx = 0; 2269 2345 pVMCB->ctrl.u16InterceptWrDRx = 0; … … 2279 2355 { 2280 2356 /* EIP has been updated already. */ 2281 2282 2357 /* Only resume if successful. */ 2283 2358 goto ResumeExecution; … … 2301 2376 uAndVal = 0xff; 2302 2377 } 2303 else 2304 if (IoExitInfo.n.u1OP16) 2378 else if (IoExitInfo.n.u1OP16) 2305 2379 { 2306 2380 uIOSize = 2; 2307 2381 uAndVal = 0xffff; 2308 2382 } 2309 else 2310 if (IoExitInfo.n.u1OP32) 2383 else if (IoExitInfo.n.u1OP32) 2311 2384 { 2312 2385 uIOSize = 4; … … 2347 2420 else 2348 2421 { 2349 /* normal in/out */2422 /* Normal in/out */ 2350 2423 Assert(!IoExitInfo.n.u1REP); 2351 2424 2352 2425 if (IoExitInfo.n.u1Type == 0) 2353 2426 { 2354 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize)); 2427 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, 2428 uIOSize)); 2355 2429 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite); 2356 2430 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); 2357 2431 if (rc == VINF_IOM_R3_IOPORT_WRITE) 2358 HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize); 2432 { 2433 HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2434 uAndVal, uIOSize); 2435 } 2359 2436 } 2360 2437 else … … 2368 2445 /* Write back to the EAX register. */ 2369 2446 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal); 2370 Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize)); 2447 Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, 2448 uIOSize)); 2371 2449 } 2372 else 2373 if (rc == VINF_IOM_R3_IOPORT_READ) 2374 HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize); 2375 } 2376 } 2450 else if (rc == VINF_IOM_R3_IOPORT_READ) 2451 { 2452 HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2453 uAndVal, uIOSize); 2454 } 2455 } 2456 } 2457 2377 2458 /* 2378 2459 * Handled the I/O return codes. … … 2389 2470 { 2390 2471 /* IO operation lookup arrays. */ 2391 static uint32_t const aIOSize[4] = { 1, 2, 0, 4};2472 static uint32_t const aIOSize[4] = { 1, 2, 0, 4 }; 2392 2473 2393 2474 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck); 2394 for (unsigned i =0;i<4;i++)2475 for (unsigned i = 0; i < 4; i++) 2395 2476 { 2396 2477 unsigned uBPLen = aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)]; … … 2408 2489 pCtx->dr[6] |= (uint64_t)RT_BIT(i); 2409 2490 2410 /* Note: AMD64 Architecture Programmer's Manual 13.1: 2411 * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared by software after 2412 * the contents have been read. 2491 /* 2492 * Note: AMD64 Architecture Programmer's Manual 13.1: 2493 * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared 2494 * by software after the contents have been read. 2413 2495 */ 2414 2496 pVMCB->guest.u64DR6 = pCtx->dr[6]; … … 2500 2582 break; 2501 2583 } 2502 2503 2584 2504 2585 case SVM_EXIT_VMMCALL: … … 2532 2613 } 2533 2614 2534 /* Emulate in ring 2615 /* Emulate in ring-3. */ 2535 2616 case SVM_EXIT_MSR: 2536 2617 { … … 2556 2637 } 2557 2638 2558 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */ 2639 /* 2640 * The Intel spec. claims there's an REX version of RDMSR that's slightly different, 2641 * so we play safe by completely disassembling the instruction. 2642 */ 2559 2643 STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr); 2560 2644 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); … … 2563 2647 { 2564 2648 /* EIP has been updated already. */ 2565 2566 2649 /* Only resume if successful. */ 2567 2650 goto ResumeExecution; … … 2571 2654 } 2572 2655 2573 case SVM_EXIT_TASK_SWITCH: /* too complicated to emulate, so fall back to the recompiler */2656 case SVM_EXIT_TASK_SWITCH: /* too complicated to emulate, so fall back to the recompiler */ 2574 2657 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2)); 2575 2658 if ( !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) … … 2577 2660 { 2578 2661 SVM_EVENT Event; 2579 2580 2662 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo; 2581 2663 2582 2664 /* Caused by an injected interrupt. */ 2583 2665 pVCpu->hwaccm.s.Event.fPending = false; 2584 2585 2666 switch (Event.n.u3Type) 2586 2667 { 2587 case SVM_EVENT_EXTERNAL_IRQ:2588 case SVM_EVENT_NMI:2589 Log(("SVM_EXIT_TASK_SWITCH: reassert trap %d\n", Event.n.u8Vector));2590 Assert(!Event.n.u1ErrorCodeValid);2591 rc2 = TRPMAssertTrap(pVCpu, Event.n.u8Vector, TRPM_HARDWARE_INT);2592 AssertRC(rc2);2593 break;2594 2595 default:2596 /* Exceptions and software interrupts can just be restarted. */2597 break;2668 case SVM_EVENT_EXTERNAL_IRQ: 2669 case SVM_EVENT_NMI: 2670 Log(("SVM_EXIT_TASK_SWITCH: reassert trap %d\n", Event.n.u8Vector)); 2671 Assert(!Event.n.u1ErrorCodeValid); 2672 rc2 = TRPMAssertTrap(pVCpu, Event.n.u8Vector, TRPM_HARDWARE_INT); 2673 AssertRC(rc2); 2674 break; 2675 2676 default: 2677 /* Exceptions and software interrupts can just be restarted. */ 2678 break; 2598 2679 } 2599 2680 } … … 2628 2709 end: 2629 2710 2630 /* We now going back to ring-3, so clear the action flag. */ 2711 /* 2712 * We are now going back to ring-3, so clear the forced action flag. 2713 */ 2631 2714 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 2632 2715 2633 /* Signal changes for the recompiler. */ 2634 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS); 2635 2636 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */ 2716 /* 2717 * Signal changes to the recompiler. 2718 */ 2719 CPUMSetChangedFlags(pVCpu, 2720 CPUM_CHANGED_SYSENTER_MSR 2721 | CPUM_CHANGED_LDTR 2722 | CPUM_CHANGED_GDTR 2723 | CPUM_CHANGED_IDTR 2724 | CPUM_CHANGED_TR 2725 | CPUM_CHANGED_HIDDEN_SEL_REGS); 2726 2727 /* 2728 * If we executed vmrun and an external IRQ was pending, then we don't have to do a full sync the next time. 2729 */ 2637 2730 if (exitCode == SVM_EXIT_INTR) 2638 2731 { … … 2649 2742 } 2650 2743 2651 /* translate into a less severe return code */2744 /* Translate into a less severe return code */ 2652 2745 if (rc == VERR_EM_INTERPRETER) 2653 2746 rc = VINF_EM_RAW_EMULATE_INSTR; … … 2668 2761 } 2669 2762 2763 2670 2764 /** 2671 * Emulate simple mov tpr instruction 2765 * Emulate simple mov tpr instruction. 2672 2766 * 2673 2767 * @returns VBox status code. 2674 * @param pVM The VM to operate on.2768 * @param pVM Pointer to the VM. 2675 2769 * @param pVCpu The VM CPU to operate on. 2676 * @param pCtx CPU context2770 * @param pCtx Pointer to the guest CPU context. 2677 2771 */ 2678 2772 static int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 2682 2776 LogFlow(("Emulated VMMCall TPR access replacement at %RGv\n", pCtx->rip)); 2683 2777 2684 while (true)2778 for (;;) 2685 2779 { 2686 2780 bool fPending; … … 2693 2787 switch(pPatch->enmType) 2694 2788 { 2695 case HWACCMTPRINSTR_READ: 2696 /* TPR caching in CR8 */ 2697 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending); 2698 AssertRC(rc); 2699 2700 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr); 2701 AssertRC(rc); 2702 2703 LogFlow(("Emulated read successfully\n")); 2704 pCtx->rip += pPatch->cbOp; 2705 break; 2706 2707 case HWACCMTPRINSTR_WRITE_REG: 2708 case HWACCMTPRINSTR_WRITE_IMM: 2709 /* Fetch the new TPR value */ 2710 if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG) 2711 { 2712 uint32_t val; 2713 2714 rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &val); 2789 case HWACCMTPRINSTR_READ: 2790 /* TPR caching in CR8 */ 2791 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending); 2715 2792 AssertRC(rc); 2716 u8Tpr = val; 2717 } 2718 else 2719 u8Tpr = (uint8_t)pPatch->uSrcOperand; 2720 2721 rc = PDMApicSetTPR(pVCpu, u8Tpr); 2722 AssertRC(rc); 2723 LogFlow(("Emulated write successfully\n")); 2724 pCtx->rip += pPatch->cbOp; 2725 break; 2793 2794 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr); 2795 AssertRC(rc); 2796 2797 LogFlow(("Emulated read successfully\n")); 2798 pCtx->rip += pPatch->cbOp; 2799 break; 2800 2801 case HWACCMTPRINSTR_WRITE_REG: 2802 case HWACCMTPRINSTR_WRITE_IMM: 2803 /* Fetch the new TPR value */ 2804 if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG) 2805 { 2806 uint32_t val; 2807 2808 rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &val); 2809 AssertRC(rc); 2810 u8Tpr = val; 2811 } 2812 else 2813 u8Tpr = (uint8_t)pPatch->uSrcOperand; 2814 2815 rc = PDMApicSetTPR(pVCpu, u8Tpr); 2816 AssertRC(rc); 2817 LogFlow(("Emulated write successfully\n")); 2818 pCtx->rip += pPatch->cbOp; 2819 break; 2820 2726 2821 default: 2727 AssertMsgFailedReturn(("Unexpected type %d\n", pPatch->enmType), VERR_HMSVM_UNEXPECTED_PATCH_TYPE);2822 AssertMsgFailedReturn(("Unexpected type %d\n", pPatch->enmType), VERR_HMSVM_UNEXPECTED_PATCH_TYPE); 2728 2823 } 2729 2824 } … … 2733 2828 2734 2829 /** 2735 * Enters the AMD-V session 2830 * Enters the AMD-V session. 2736 2831 * 2737 2832 * @returns VBox status code. 2738 * @param pVM The VM to operate on.2833 * @param pVM Pointer to the VM. 2739 2834 * @param pVCpu The VM CPU to operate on. 2740 * @param pCpu CPU info struct2835 * @param pCpu Pointer to the CPU info struct. 2741 2836 */ 2742 2837 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) … … 2755 2850 2756 2851 /** 2757 * Leaves the AMD-V session 2852 * Leaves the AMD-V session. 2758 2853 * 2759 2854 * @returns VBox status code. 2760 * @param pVM The VM to operate on.2855 * @param pVM Pointer to the VM. 2761 2856 * @param pVCpu The VM CPU to operate on. 2762 * @param pCtx CPU context2857 * @param pCtx Pointer to the guest CPU context. 2763 2858 */ 2764 2859 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 2794 2889 2795 2890 2891 /** 2892 * Interprets INVLPG. 2893 * 2894 * @return VBox status code. 2895 * @param pVCpu Pointer to the VMCPU. 2896 * @param pCpu Pointer to the CPU info struct. 2897 * @param pRegFrame Pointer to the register frame. 2898 * @param ASID Tagged TLB id for the guest. 2899 */ 2796 2900 static int hmR0svmInterpretInvlPg(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID) 2797 2901 { … … 2806 2910 switch(param1.type) 2807 2911 { 2808 case PARMTYPE_IMMEDIATE: 2809 case PARMTYPE_ADDRESS: 2810 if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64))) 2912 case PARMTYPE_IMMEDIATE: 2913 case PARMTYPE_ADDRESS: 2914 if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64))) 2915 return VERR_EM_INTERPRETER; 2916 addr = param1.val.val64; 2917 break; 2918 2919 default: 2811 2920 return VERR_EM_INTERPRETER; 2812 addr = param1.val.val64;2813 break;2814 2815 default:2816 return VERR_EM_INTERPRETER;2817 2921 } 2818 2922 … … 2828 2932 } 2829 2933 2934 2830 2935 /** 2831 * Interprets INVLPG 2936 * Interprets INVLPG. 2832 2937 * 2833 2938 * @returns VBox status code. … … 2836 2941 * @retval VERR_* Fatal errors. 2837 2942 * 2838 * @param pVM The VM handle.2839 * @param pRegFrame The register frame.2840 * @param ASID Tagged TLB id for the guest 2943 * @param pVM Pointer to the VM. 2944 * @param pRegFrame Pointer to the register frame. 2945 * @param ASID Tagged TLB id for the guest. 2841 2946 * 2842 * 2947 * @remarks Updates the EIP if an instruction was executed successfully. 2843 2948 */ 2844 2949 static int hmR0SvmInterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID) 2845 2950 { 2846 2951 /* 2847 * Only allow 32 & 64 bit scode.2952 * Only allow 32 & 64 bit code. 2848 2953 */ 2849 2954 DISCPUMODE enmMode = SELMGetCpuModeFromSelector(pVCpu, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid); … … 2877 2982 2878 2983 /** 2879 * Invalidates a guest page 2984 * Invalidates a guest page by guest virtual address. 2880 2985 * 2881 2986 * @returns VBox status code. 2882 * @param pVM The VM to operate on.2987 * @param pVM Pointer to the VM. 2883 2988 * @param pVCpu The VM CPU to operate on. 2884 * @param GCVirt Page to invalidate2989 * @param GCVirt Guest virtual address of the page to invalidate. 2885 2990 */ 2886 2991 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) … … 2914 3019 #if 0 /* obsolete, but left here for clarification. */ 2915 3020 /** 2916 * Invalidates a guest page by physical address 3021 * Invalidates a guest page by physical address. 2917 3022 * 2918 3023 * @returns VBox status code. 2919 * @param pVM The VM to operate on.3024 * @param pVM Pointer to the VM. 2920 3025 * @param pVCpu The VM CPU to operate on. 2921 * @param GCPhys Page to invalidate3026 * @param GCPhys Guest physical address of the page to invalidate. 2922 3027 */ 2923 3028 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) … … 2931 3036 #endif 2932 3037 3038 2933 3039 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2934 3040 /** 2935 * Prepares for and executes VMRUN (64 bits guests from a 32 bits hosts).3041 * Prepares for and executes VMRUN (64-bit guests from a 32-bit host). 2936 3042 * 2937 3043 * @returns VBox status code. 2938 3044 * @param pVMCBHostPhys Physical address of host VMCB. 2939 3045 * @param pVMCBPhys Physical address of the VMCB. 2940 * @param pCtx Guestcontext.2941 * @param pVM The VM to operate on.2942 * @param pVCpu The VMCPU to operate on.3046 * @param pCtx Pointer to the guest CPU context. 3047 * @param pVM Pointer to the VM. 3048 * @param pVCpu Pointer to the VMCPU. 2943 3049 */ 2944 3050 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu) … … 2954 3060 } 2955 3061 3062 2956 3063 /** 2957 * Executes the specified handler in 64 mode3064 * Executes the specified handler in 64-bit mode. 2958 3065 * 2959 3066 * @returns VBox status code. 2960 * @param pVM The VM to operate on.2961 * @param pVCpu The VMCPU to operate on.2962 * @param pCtx Guest context2963 * @param pfnHandler RC handler2964 * @param cbParam Number of parameters 2965 * @param paParam Array of 32 bits parameters3067 * @param pVM Pointer to the VM. 3068 * @param pVCpu Pointer to the VMCPU. 3069 * @param pCtx Pointer to the guest CPU context. 3070 * @param pfnHandler Pointer to the RC handler function. 3071 * @param cbParam Number of parameters. 3072 * @param paParam Array of 32-bit parameters. 2966 3073 */ 2967 3074 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam) -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r38685 r41335 5 5 6 6 /* 7 * Copyright (C) 2006-201 1Oracle Corporation7 * Copyright (C) 2006-2012 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 42 42 * 43 43 * @returns VBox status code. 44 * @param pVM The VM to operate on.45 * @param pVCpu The VMCPU to operate on.46 * @param pCpu CPU info struct44 * @param pVM Pointer to the VM. 45 * @param pVCpu Pointer to the VMCPU. 46 * @param pCpu Pointer to the CPU info struct. 47 47 */ 48 48 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu); … … 52 52 * 53 53 * @returns VBox status code. 54 * @param pVM The VM to operate on.55 * @param pVCpu The VMCPU to operate on.56 * @param pCtx CPU context54 * @param pVM Pointer to the VM. 55 * @param pVCpu Pointer to the VMCPU. 56 * @param pCtx Pointer to the guest CPU context. 57 57 */ 58 58 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); … … 62 62 * 63 63 * @returns VBox status code. 64 * @param pCpu CPU info struct65 * @param pVM The VM to operate on. (can be NULL after a resume)66 * @param pvPageCpu Pointer to the global cpu page67 * @param pPageCpuPhys Physical address of the global cpu page64 * @param pCpu Pointer to the CPU info struct. 65 * @param pVM Pointer to the VM (can be NULL after a resume!). 66 * @param pvPageCpu Pointer to the global CPU page. 67 * @param pPageCpuPhys Physical address of the global CPU page. 68 68 */ 69 69 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage); … … 73 73 * 74 74 * @returns VBox status code. 75 * @param pCpu CPU info struct76 * @param pvPageCpu Pointer to the global cpu page77 * @param pPageCpuPhys Physical address of the global cpu page75 * @param pCpu Pointer to the CPU info struct. 76 * @param pvPageCpu Pointer to the global CPU page. 77 * @param pPageCpuPhys Physical address of the global CPU page. 78 78 */ 79 79 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); … … 83 83 * 84 84 * @returns VBox status code. 85 * @param pVM The VM to operate on.85 * @param pVM Pointer to the VM. 86 86 */ 87 87 VMMR0DECL(int) SVMR0InitVM(PVM pVM); … … 91 91 * 92 92 * @returns VBox status code. 93 * @param pVM The VM to operate on.93 * @param pVM Pointer to the VM. 94 94 */ 95 95 VMMR0DECL(int) SVMR0TermVM(PVM pVM); … … 99 99 * 100 100 * @returns VBox status code. 101 * @param pVM The VM to operate on.101 * @param pVM Pointer to the VM. 102 102 */ 103 103 VMMR0DECL(int) SVMR0SetupVM(PVM pVM); … … 108 108 * 109 109 * @returns VBox status code. 110 * @param pVM The VM to operate on.111 * @param pVCpu The VMCPU to operate on.112 * @param pCtx Guest context110 * @param pVM Pointer to the VM. 111 * @param pVCpu Pointer to the VMCPU. 112 * @param pCtx Pointer to the guest CPU context. 113 113 */ 114 114 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); … … 116 116 117 117 /** 118 * Save the host state 119 * 120 * @returns VBox status code. 121 * @param pVM The VM to operate on.122 * @param pVCpu The VMCPU to operate on.118 * Save the host state. 119 * 120 * @returns VBox status code. 121 * @param pVM Pointer to the VM. 122 * @param pVCpu Pointer to the VMCPU. 123 123 */ 124 124 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu); 125 125 126 126 /** 127 * Loads the guest state 128 * 129 * @returns VBox status code. 130 * @param pVM The VM to operate on.131 * @param pVCpu The VMCPU to operate on.132 * @param pCtx Guest context127 * Loads the guest state. 128 * 129 * @returns VBox status code. 130 * @param pVM Pointer to the VM. 131 * @param pVCpu Pointer to the VMCPU. 132 * @param pCtx Pointer to the guest CPU context. 133 133 */ 134 134 VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 135 135 136 136 137 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 137 138 /** 139 * Prepares for and executes VMRUN (64 bits guests from a 32 bits hosts). 138 /** 139 * Prepares for and executes VMRUN (64-bit guests from a 32-bit host). 140 140 * 141 141 * @returns VBox status code. 142 142 * @param pVMCBHostPhys Physical address of host VMCB. 143 143 * @param pVMCBPhys Physical address of the VMCB. 144 * @param pCtx Guestcontext.145 * @param pVM The VM to operate on.146 * @param pVCpu The VMCPU to operate on. (not used)144 * @param pCtx Pointer to the guest CPU context. 145 * @param pVM Pointer to the VM. 146 * @param pVCpu Pointer to the VMCPU. (not used) 147 147 */ 148 148 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 149 149 150 150 /** 151 * Executes the specified handler in 64 mode152 * 153 * @returns VBox status code. 154 * @param pVM The VM to operate on.155 * @param pVCpu The VMCPU to operate on.156 * @param pCtx Guest context157 * @param pfnHandler RC handler158 * @param cbParam Number of parameters 159 * @param paParam Array of 32 bits parameters160 */ 161 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam);162 151 * Executes the specified handler in 64-bit mode. 152 * 153 * @returns VBox status code. 154 * @param pVM Pointer to the VM. 155 * @param pVCpu Pointer to the VMCPU. 156 * @param pCtx Pointer to the guest CPU context. 157 * @param pfnHandler Pointer to the RC handler function. 158 * @param cbParam Number of parameters. 159 * @param paParam Array of 32-bit parameters. 160 */ 161 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, 162 uint32_t *paParam); 163 163 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */ 164 164 165 165 /** 166 * Prepares for and executes VMRUN (32 bitsguests).166 * Prepares for and executes VMRUN (32-bit guests). 167 167 * 168 168 * @returns VBox status code. 169 169 * @param pVMCBHostPhys Physical address of host VMCB. 170 170 * @param pVMCBPhys Physical address of the VMCB. 171 * @param pCtx Guestcontext.172 * @param pVM The VM to operate on. (not used)173 * @param pVCpu The VMCPU to operate on. (not used)171 * @param pCtx Pointer to the guest CPU context. 172 * @param pVM Pointer to the VM. (not used) 173 * @param pVCpu Pointer to the VMCPU. (not used) 174 174 */ 175 175 DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); … … 177 177 178 178 /** 179 * Prepares for and executes VMRUN (64 bitsguests).179 * Prepares for and executes VMRUN (64-bit guests). 180 180 * 181 181 * @returns VBox status code. 182 182 * @param pVMCBHostPhys Physical address of host VMCB. 183 183 * @param pVMCBPhys Physical address of the VMCB. 184 * @param pCtx Guestcontext.185 * @param pVM The VM to operate on. (not used)186 * @param pVCpu The VMCPU to operate on. (not used)184 * @param pCtx Pointer to the guest CPU context. 185 * @param pVM Pointer to the VM. (not used) 186 * @param pVCpu Pointer to the VMCPU. (not used) 187 187 */ 188 188 DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); … … 200 200 #define SVM_HIDSEGATTR_SVM2VMX(a) (a & 0xFF) | ((a & 0x0F00) << 4) 201 201 202 #define SVM_WRITE_SELREG(REG, reg) \203 { \204 pVMCB->guest.REG.u16Sel = pCtx->reg; \205 pVMCB->guest.REG.u32Limit = pCtx->reg##Hid.u32Limit; \206 pVMCB->guest.REG.u64Base = pCtx->reg##Hid.u64Base; \202 #define SVM_WRITE_SELREG(REG, reg) \ 203 { \ 204 pVMCB->guest.REG.u16Sel = pCtx->reg; \ 205 pVMCB->guest.REG.u32Limit = pCtx->reg##Hid.u32Limit; \ 206 pVMCB->guest.REG.u64Base = pCtx->reg##Hid.u64Base; \ 207 207 pVMCB->guest.REG.u16Attr = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg##Hid.Attr.u); \ 208 208 } 209 209 210 #define SVM_READ_SELREG(REG, reg) \211 { \212 pCtx->reg = pVMCB->guest.REG.u16Sel; \213 pCtx->reg##Hid.u32Limit = pVMCB->guest.REG.u32Limit; \214 pCtx->reg##Hid.u64Base = pVMCB->guest.REG.u64Base; \210 #define SVM_READ_SELREG(REG, reg) \ 211 { \ 212 pCtx->reg = pVMCB->guest.REG.u16Sel; \ 213 pCtx->reg##Hid.u32Limit = pVMCB->guest.REG.u32Limit; \ 214 pCtx->reg##Hid.u64Base = pVMCB->guest.REG.u64Base; \ 215 215 pCtx->reg##Hid.Attr.u = SVM_HIDSEGATTR_SVM2VMX(pVMCB->guest.REG.u16Attr); \ 216 216 } … … 222 222 RT_C_DECLS_END 223 223 224 #endif 225 224 #endif /* ___VMMR0_HWSVMR0_h */ 225
Note:
See TracChangeset
for help on using the changeset viewer.