VirtualBox

Changeset 41335 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
May 16, 2012 12:36:18 PM (13 years ago)
Author:
vboxsync
Message:

VMM/VMMR0/HWSVMR0: style fixes, cleanup, wrap to 130 columns.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp

    r41312 r41335  
    11/* $Id$ */
    22/** @file
    3  * HM SVM (AMD-V) - Host Context Ring 0.
     3 * HM SVM (AMD-V) - Host Context Ring-0.
    44 */
    55
    66/*
    7  * Copyright (C) 2006-2011 Oracle Corporation
     7 * Copyright (C) 2006-2012 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    5858static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
    5959
     60
    6061/*******************************************************************************
    6162*   Global Variables                                                           *
     
    6364
    6465/**
    65  * Sets up and activates AMD-V on the current CPU
     66 * Sets up and activates AMD-V on the current CPU.
    6667 *
    6768 * @returns VBox status code.
    68  * @param   pCpu            CPU info struct
    69  * @param   pVM             The VM to operate on. (can be NULL after a resume!!)
    70  * @param   pvCpuPage       Pointer to the global cpu page.
    71  * @param   HCPhysCpuPage   Physical address of the global cpu page.
     69 * @param   pCpu            Pointer to the CPU info struct.
     70 * @param   pVM             Pointer to the VM (can be NULL after a resume!).
     71 * @param   pvCpuPage       Pointer to the global CPU page.
     72 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    7273 */
    7374VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     
    7677    AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
    7778
    78     /* We must turn on AMD-V and setup the host state physical address, as
    79        those MSRs are per-cpu/core. */
     79    /*
     80     * We must turn on AMD-V and setup the host state physical address, as those MSRs are per cpu/core.
     81     */
    8082    uint64_t fEfer = ASMRdMsr(MSR_K6_EFER);
    8183    if (fEfer & MSR_K6_EFER_SVME)
    8284    {
    83         /* If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE hack is active, then we
    84            blindly use AMD-V. */
     85        /*
     86         * If the VBOX_HWVIRTEX_IGNORE_SVM_IN_USE is active, then we blindly use AMD-V.
     87         */
    8588        if (    pVM
    8689            &&  pVM->hwaccm.s.svm.fIgnoreInUseError)
     90        {
    8791            pCpu->fIgnoreAMDVInUseError = true;
     92        }
     93
    8894        if (!pCpu->fIgnoreAMDVInUseError)
    8995            return VERR_SVM_IN_USE;
     
    9399    ASMWrMsr(MSR_K6_EFER, fEfer | MSR_K6_EFER_SVME);
    94100
    95     /* Write the physical page address where the CPU will store the host state
    96        while executing the VM. */
     101    /* Write the physical page address where the CPU will store the host state while executing the VM. */
    97102    ASMWrMsr(MSR_K8_VM_HSAVE_PA, HCPhysCpuPage);
    98103
     
    107112}
    108113
     114
    109115/**
    110  * Deactivates AMD-V on the current CPU
     116 * Deactivates AMD-V on the current CPU.
    111117 *
    112118 * @returns VBox status code.
    113  * @param   pCpu            CPU info struct
    114  * @param   pvCpuPage       Pointer to the global cpu page.
    115  * @param   HCPhysCpuPage   Physical address of the global cpu page.
     119 * @param   pCpu            Pointer to the CPU info struct.
     120 * @param   pvCpuPage       Pointer to the global CPU page.
     121 * @param   HCPhysCpuPage   Physical address of the global CPU page.
    116122 */
    117123VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
     
    131137}
    132138
     139
    133140/**
    134141 * Does Ring-0 per VM AMD-V init.
    135142 *
    136143 * @returns VBox status code.
    137  * @param   pVM         The VM to operate on.
     144 * @param   pVM         Pointer to the VM.
    138145 */
    139146VMMR0DECL(int) SVMR0InitVM(PVM pVM)
     
    153160    ASMMemFill32(pVM->hwaccm.s.svm.pIOBitmap, PAGE_SIZE*3, 0xffffffff);
    154161
    155     /* Erratum 170 which requires a forced TLB flush for each world switch:
     162    /*
     163     * Erratum 170 which requires a forced TLB flush for each world switch:
    156164     * See http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/33610.pdf
    157165     *
     
    166174     *              0x7c 2
    167175     * Turion 64:   0x68 2
    168      *
    169176     */
    170177    uint32_t u32Dummy;
    171178    uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
    172179    ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
    173     u32BaseFamily= (u32Version >> 8) & 0xf;
    174     u32Family    = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
    175     u32Model     = ((u32Version >> 4) & 0xf);
    176     u32Model     = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
    177     u32Stepping  = u32Version & 0xf;
     180    u32BaseFamily = (u32Version >> 8) & 0xf;
     181    u32Family     = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
     182    u32Model      = ((u32Version >> 4) & 0xf);
     183    u32Model      = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
     184    u32Stepping   = u32Version & 0xf;
    178185    if (    u32Family == 0xf
    179186        &&  !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) &&  u32Stepping >= 1)
     
    221228        pVCpu->hwaccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0);
    222229        /* Set all bits to intercept all MSR accesses. */
    223         ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE*2, 0xffffffff);
     230        ASMMemFill32(pVCpu->hwaccm.s.svm.pMSRBitmap, PAGE_SIZE * 2, 0xffffffff);
    224231    }
    225232
    226233    return VINF_SUCCESS;
    227234}
     235
    228236
    229237/**
     
    231239 *
    232240 * @returns VBox status code.
    233  * @param   pVM         The VM to operate on.
     241 * @param   pVM         Pointer to the VM.
    234242 */
    235243VMMR0DECL(int) SVMR0TermVM(PVM pVM)
     
    272280}
    273281
     282
    274283/**
    275  * Sets up AMD-V for the specified VM
     284 * Sets up AMD-V for the specified VM.
    276285 *
    277286 * @returns VBox status code.
    278  * @param   pVM         The VM to operate on.
     287 * @param   pVM         Pointer to the VM.
    279288 */
    280289VMMR0DECL(int) SVMR0SetupVM(PVM pVM)
     
    283292
    284293    AssertReturn(pVM, VERR_INVALID_PARAMETER);
    285 
    286294    Assert(pVM->hwaccm.s.svm.fSupported);
    287295
     
    293301        AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    294302
    295         /* Program the control fields. Most of them never have to be changed again.
     303        /*
     304         * Program the control fields. Most of them never have to be changed again.
    296305         * CR0/4 reads must be intercepted, our shadow values are not necessarily the same as the guest's.
    297306         * Note: CR0 & CR4 can be safely read when guest and shadow copies are identical.
     
    348357                                        | SVM_CTRL2_INTERCEPT_WBINVD
    349358                                        | SVM_CTRL2_INTERCEPT_MONITOR
    350                                         | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
    351                                         ;
     359                                        | SVM_CTRL2_INTERCEPT_MWAIT_UNCOND; /* don't execute mwait or else we'll idle inside the
     360                                                                               guest (host thinks the cpu load is high) */
     361
    352362        Log(("pVMCB->ctrl.u32InterceptException = %x\n", pVMCB->ctrl.u32InterceptException));
    353363        Log(("pVMCB->ctrl.u32InterceptCtrl1 = %x\n", pVMCB->ctrl.u32InterceptCtrl1));
     
    356366        /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */
    357367        pVMCB->ctrl.IntCtrl.n.u1VIrqMasking = 1;
     368
    358369        /* Ignore the priority in the TPR; just deliver it when we tell it to. */
    359370        pVMCB->ctrl.IntCtrl.n.u1IgnoreTPR   = 1;
     
    369380        pVMCB->ctrl.TLBCtrl.n.u32ASID = 1;
    370381
    371         /* Setup the PAT msr (nested paging only) */
    372         /* The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB, so choose type 6 for all PAT slots. */
     382        /*
     383         * Setup the PAT MSR (nested paging only)
     384         * The default value should be 0x0007040600070406ULL, but we want to treat all guest memory as WB,
     385         * so choose type 6 for all PAT slots.
     386         */
    373387        pVMCB->guest.u64GPAT = 0x0006060606060606ULL;
    374388
     
    380394            pVMCB->ctrl.u16InterceptWrCRx |= RT_BIT(3);
    381395
    382             /* We must also intercept:
     396            /*
     397             * We must also intercept:
    383398             * - INVLPG (must go through shadow paging)
    384399             * - task switches (may change CR3/EFLAGS/LDT)
    385400             */
    386401            pVMCB->ctrl.u32InterceptCtrl1 |=   SVM_CTRL1_INTERCEPT_INVLPG
    387                                              | SVM_CTRL1_INTERCEPT_TASK_SWITCH
    388                                              ;
     402                                             | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
    389403
    390404            /* Page faults must be intercepted to implement shadow paging. */
     
    392406        }
    393407
    394         /* The following MSRs are saved automatically by vmload/vmsave, so we allow the guest
     408        /*
     409         * The following MSRs are saved automatically by vmload/vmsave, so we allow the guest
    395410         * to modify them directly.
    396411         */
     
    412427
    413428/**
    414  * Sets the permission bits for the specified MSR
     429 * Sets the permission bits for the specified MSR.
    415430 *
    416  * @param   pVCpu       The VMCPU to operate on.
    417  * @param   ulMSR       MSR value
    418  * @param   fRead       Reading allowed/disallowed
    419  * @param   fWrite      Writing allowed/disallowed
     431 * @param   pVCpu       Pointer to the VMCPU.
     432 * @param   ulMSR       MSR value.
     433 * @param   fRead       Whether reading is allowed.
     434 * @param   fWrite      Whether writing is allowed.
    420435 */
    421436static void hmR0SvmSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
     
    429444        ulBit    = ulMSR * 2;
    430445    }
    431     else
    432     if (    ulMSR >= 0xC0000000
    433         &&  ulMSR <= 0xC0001FFF)
     446    else if (   ulMSR >= 0xC0000000
     447             && ulMSR <= 0xC0001FFF)
    434448    {
    435449        /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */
     
    437451        pMSRBitmap += 0x800;
    438452    }
    439     else
    440     if (    ulMSR >= 0xC0010000
    441         &&  ulMSR <= 0xC0011FFF)
     453    else if (   ulMSR >= 0xC0010000
     454             && ulMSR <= 0xC0011FFF)
    442455    {
    443456        /* AMD Seventh and Eighth Generation Processor MSRs */
     
    462475}
    463476
     477
    464478/**
    465  * Injects an event (trap or external interrupt)
     479 * Injects an event (trap or external interrupt).
    466480 *
    467  * @param   pVCpu       The VMCPU to operate on.
    468  * @param   pVMCB       SVM control block
    469  * @param   pCtx        CPU Context
    470  * @param   pIntInfo    SVM interrupt info
     481 * @param   pVCpu       Pointer to the VMCPU.
     482 * @param   pVMCB       Pointer to the VMCB.
     483 * @param   pCtx        Pointer to the guest CPU context.
     484 * @param   pIntInfo    Pointer to the SVM interrupt info.
    471485 */
    472486DECLINLINE(void) hmR0SvmInjectEvent(PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx, SVM_EVENT *pEvent)
     
    478492#ifdef VBOX_STRICT
    479493    if (pEvent->n.u8Vector == 0xE)
    480         Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
    481     else
    482     if (pEvent->n.u8Vector < 0x20)
     494    {
     495        Log(("SVM: Inject int %d at %RGv error code=%02x CR2=%RGv intInfo=%08x\n", pEvent->n.u8Vector,
     496             (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode, (RTGCPTR)pCtx->cr2, pEvent->au64[0]));
     497    }
     498    else if (pEvent->n.u8Vector < 0x20)
    483499        Log(("SVM: Inject int %d at %RGv error code=%08x\n", pEvent->n.u8Vector, (RTGCPTR)pCtx->rip, pEvent->n.u32ErrorCode));
    484500    else
     
    496512
    497513/**
    498  * Checks for pending guest interrupts and injects them
     514 * Checks for pending guest interrupts and injects them.
    499515 *
    500516 * @returns VBox status code.
    501  * @param   pVM         The VM to operate on.
     517 * @param   pVM         Pointer to the VM.
    502518 * @param   pVCpu       The VM CPU to operate on.
    503  * @param   pVMCB       SVM control block
    504  * @param   pCtx        CPU Context
     519 * @param   pVMCB       Pointer to the VMCB.
     520 * @param   pCtx        Pointer to the guest CPU Context.
    505521 */
    506522static int hmR0SvmCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx)
     
    509525    NOREF(pVM);
    510526
    511     /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
     527    /*
     528     * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely).
     529     */
    512530    if (pVCpu->hwaccm.s.Event.fPending)
    513531    {
    514532        SVM_EVENT Event;
    515533
    516         Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip));
     534        Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode,
     535             (RTGCPTR)pCtx->rip));
    517536        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatIntReinject);
    518537        Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
     
    523542    }
    524543
    525     /* If an active trap is already pending, then we must forward it first! */
     544    /*
     545     * If an active trap is already pending, we must forward it first!
     546     */
    526547    if (!TRPMHasTrap(pVCpu))
    527548    {
     
    540561        }
    541562
    542         /* @todo SMI interrupts. */
    543 
    544         /* When external interrupts are pending, we should exit the VM when IF is set. */
     563        /** @todo SMI interrupts. */
     564
     565        /*
     566         * When external interrupts are pending, we should exit the VM when IF is set.
     567         */
    545568        if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
    546569        {
    547             if (    !(pCtx->eflags.u32 & X86_EFL_IF)
    548                 ||  VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     570            if (  !(pCtx->eflags.u32 & X86_EFL_IF)
     571                || VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    549572            {
    550573                if (!pVMCB->ctrl.IntCtrl.n.u1VIrqValid)
     
    553576                        LogFlow(("Enable irq window exit!\n"));
    554577                    else
    555                         Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n", (RTGCPTR)pCtx->rip));
    556 
    557                     /** @todo use virtual interrupt method to inject a pending irq; dispatched as soon as guest.IF is set. */
     578                    {
     579                        Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS -> irq window exit\n",
     580                             (RTGCPTR)pCtx->rip));
     581                    }
     582
     583                    /** @todo Use virtual interrupt method to inject a pending IRQ; dispatched as
     584                     *        soon as guest.IF is set. */
    558585                    pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
    559586                    pVMCB->ctrl.IntCtrl.n.u1VIrqValid    = 1;
     
    620647        if (enmType == TRPM_TRAP)
    621648        {
    622             switch (u8Vector) {
    623             case X86_XCPT_DF:
    624             case X86_XCPT_TS:
    625             case X86_XCPT_NP:
    626             case X86_XCPT_SS:
    627             case X86_XCPT_GP:
    628             case X86_XCPT_PF:
    629             case X86_XCPT_AC:
    630                 /* Valid error codes. */
    631                 Event.n.u1ErrorCodeValid = 1;
    632                 break;
    633             default:
    634                 break;
     649            switch (u8Vector)
     650            {
     651                case X86_XCPT_DF:
     652                case X86_XCPT_TS:
     653                case X86_XCPT_NP:
     654                case X86_XCPT_SS:
     655                case X86_XCPT_GP:
     656                case X86_XCPT_PF:
     657                case X86_XCPT_AC:
     658                    /* Valid error codes. */
     659                    Event.n.u1ErrorCodeValid = 1;
     660                    break;
     661                default:
     662                    break;
    635663            }
    636664            if (u8Vector == X86_XCPT_NMI)
     
    649677}
    650678
     679
    651680/**
    652  * Save the host state
     681 * Save the host state.
    653682 *
    654683 * @returns VBox status code.
    655  * @param   pVM         The VM to operate on.
     684 * @param   pVM         Pointer to the VM.
    656685 * @param   pVCpu       The VM CPU to operate on.
    657686 */
     
    664693}
    665694
     695
    666696/**
    667  * Loads the guest state
     697 * Loads the guest state.
    668698 *
    669  * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
     699 * NOTE: Don't do anything here that can cause a jump back to ring-3!!!
    670700 *
    671701 * @returns VBox status code.
    672  * @param   pVM         The VM to operate on.
     702 * @param   pVM         Pointer to the VM.
    673703 * @param   pVCpu       The VM CPU to operate on.
    674  * @param   pCtx        Guest context
     704 * @param   pCtx        Pointer to the guest CPU context.
    675705 */
    676706VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    760790        val &= ~(X86_CR0_CD|X86_CR0_NW);
    761791
    762         /* Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level. */
    763         /* Note: In nested paging mode the guest is allowed to run with paging disabled; the guest physical to host physical translation will remain active. */
     792        /*
     793         * Note: WP is not relevant in nested paging mode as we catch accesses on the (guest) physical level.
     794         * Note: In nested paging mode, the guest is allowed to run with paging disabled; the guest-physical to host-physical
     795         * translation will remain active.
     796         */
    764797        if (!pVM->hwaccm.s.fNestedPaging)
    765798        {
    766             val |= X86_CR0_PG;          /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
    767             val |= X86_CR0_WP;          /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */
     799            val |= X86_CR0_PG;  /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */
     800            val |= X86_CR0_WP;  /* Must set this as we rely on protecting various pages and supervisor writes must be caught. */
    768801        }
    769802        pVMCB->guest.u64CR0 = val;
     
    937970        {
    938971            /* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
    939             LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, pVMCB->ctrl.u64TSCOffset, u64CurTSC + pVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));
     972            LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
     973                     pVMCB->ctrl.u64TSCOffset, u64CurTSC + pVMCB->ctrl.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVMCB->ctrl.u64TSCOffset, TMCpuTickGet(pVCpu)));
    940974            pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    941975            pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
     
    950984    }
    951985
    952     /* Sync the various msrs for 64 bits mode. */
     986    /* Sync the various MSRs for 64-bit mode. */
    953987    pVMCB->guest.u64STAR            = pCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
    954     pVMCB->guest.u64LSTAR           = pCtx->msrLSTAR;           /* 64 bits mode syscall rip */
     988    pVMCB->guest.u64LSTAR           = pCtx->msrLSTAR;           /* 64-bit mode syscall rip */
    955989    pVMCB->guest.u64CSTAR           = pCtx->msrCSTAR;           /* compatibility mode syscall rip */
    956990    pVMCB->guest.u64SFMASK          = pCtx->msrSFMASK;          /* syscall flag mask */
    957     pVMCB->guest.u64KernelGSBase    = pCtx->msrKERNELGSBASE;    /* swapgs exchange value */
     991    pVMCB->guest.u64KernelGSBase    = pCtx->msrKERNELGSBASE;    /* SWAPGS exchange value */
    958992
    959993#ifdef DEBUG
     
    9721006}
    9731007
     1008
    9741009/**
    9751010 * Setup TLB for ASID.
    9761011 *
    977  * @param    pVM        The VM to operate on.
     1012 * @param    pVM        Pointer to the VM.
    9781013 * @param    pVCpu      The VM CPU to operate on.
    9791014 */
     
    10941129    pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID;
    10951130
    1096     AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
    1097     AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
    1098     AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
     1131    AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes,
     1132              ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));
     1133    AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID,
     1134              ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
     1135    AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,
     1136              ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID));
    10991137
    11001138#ifdef VBOX_WITH_STATISTICS
     
    11161154 *
    11171155 * @returns VBox status code.
    1118  * @param   pVM         The VM to operate on.
     1156 * @param   pVM         Pointer to the VM.
    11191157 * @param   pVCpu       The VM CPU to operate on.
    1120  * @param   pCtx        Guest context
     1158 * @param   pCtx        Pointer to the guest CPU context.
    11211159 */
    11221160VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    11281166    VBOXSTRICTRC rc = VINF_SUCCESS;
    11291167    int         rc2;
    1130     uint64_t    exitCode = (uint64_t)SVM_EXIT_INVALID;
    1131     SVM_VMCB   *pVMCB;
    1132     bool        fSyncTPR  = false;
    1133     unsigned    cResume = 0;
    1134     uint8_t     u8LastTPR = 0; /* Initialized for potentially stupid compilers. */
    1135     PHMGLOBLCPUINFO pCpu = 0;
    1136     RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
     1168    uint64_t    exitCode    = (uint64_t)SVM_EXIT_INVALID;
     1169    SVM_VMCB   *pVMCB       = NULL;
     1170    bool        fSyncTPR    = false;
     1171    unsigned    cResume     = 0;
     1172    uint8_t     u8LastTPR   = 0; /* Initialized for potentially stupid compilers. */
     1173    PHMGLOBLCPUINFO pCpu    = 0;
     1174    RTCCUINTREG uOldEFlags  = ~(RTCCUINTREG)0;
    11371175#ifdef VBOX_STRICT
    11381176    RTCPUID     idCpuCheck;
     
    11451183    AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB);
    11461184
    1147     /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
     1185    /*
     1186     * We can jump to this point to resume execution after determining that a VM-exit is innocent.
    11481187     */
    11491188ResumeExecution:
     
    11521191    Assert(!HWACCMR0SuspendPending());
    11531192
    1154     /* Safety precaution; looping for too long here can have a very bad effect on the host */
     1193    /*
     1194     * Safety precaution; looping for too long here can have a very bad effect on the host.
     1195     */
    11551196    if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
    11561197    {
     
    11601201    }
    11611202
    1162     /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
     1203    /*
     1204     * Check for IRQ inhibition due to instruction fusing (sti, mov ss).
     1205     */
    11631206    if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
    11641207    {
     
    11661209        if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
    11671210        {
    1168             /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
     1211            /*
     1212             * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
    11691213             * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
    11701214             * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
     
    11951239#endif
    11961240
    1197     /* Check for pending actions that force us to go back to ring 3. */
     1241    /*
     1242     * Check for pending actions that force us to go back to ring-3.
     1243     */
    11981244    if (    VM_FF_ISPENDING(pVM, VM_FF_HWACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
    1199         ||  VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HWACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
     1245        ||  VMCPU_FF_ISPENDING(pVCpu,
     1246                                 VMCPU_FF_HWACCM_TO_R3_MASK
     1247                               | VMCPU_FF_PGM_SYNC_CR3
     1248                               | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
     1249                               | VMCPU_FF_REQUEST))
    12001250    {
    12011251        /* Check if a sync operation is pending. */
     
    12691319#endif
    12701320
    1271     /* When external interrupts are pending, we should exit the VM when IF is set. */
    1272     /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */
     1321    /*
     1322     * When external interrupts are pending, we should exit the VM when IF is set.
     1323     * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!!
     1324     */
    12731325    rc = hmR0SvmCheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx);
    12741326    if (RT_FAILURE(rc))
    12751327        goto end;
    12761328
    1277     /* TPR caching using CR8 is only available in 64 bits mode or with 32 bits guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is supported. */
    1278     /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!!!!! (no longer true)
     1329    /*
     1330     * TPR caching using CR8 is only available in 64-bit mode or with 32-bit guests when X86_CPUID_AMD_FEATURE_ECX_CR8L is
     1331     * supported.
     1332     * Note: we can't do this in LoddGuestState as PDMApicGetTPR can jump back to ring 3 (lock)! (no longer true)
    12791333     */
    12801334    /** @todo query and update the TPR only when it could have been changed (mmio access)
     
    12981352            }
    12991353            else
    1300                 /* No interrupts are pending, so we don't need to be explicitely notified.
     1354            {
     1355                /*
     1356                 * No interrupts are pending, so we don't need to be explicitely notified.
    13011357                 * There are enough world switches for detecting pending interrupts.
    13021358                 */
    13031359                hmR0SvmSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
     1360            }
    13041361        }
    13051362        else
     
    13131370            }
    13141371            else
    1315                 /* No interrupts are pending, so we don't need to be explicitely notified.
     1372            {
     1373                /*
     1374                 * No interrupts are pending, so we don't need to be explicitely notified.
    13161375                 * There are enough world switches for detecting pending interrupts.
    13171376                 */
    13181377                pVMCB->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
     1378            }
    13191379        }
    13201380        fSyncTPR = !fPending;
     
    13491409    VMMR0LogFlushDisable(pVCpu);
    13501410
    1351     /* Load the guest state; *must* be here as it sets up the shadow cr0 for lazy fpu syncing! */
     1411    /*
     1412     * Load the guest state; *must* be here as it sets up the shadow CR0 for lazy FPU syncing!
     1413     */
    13521414    rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
    13531415    if (RT_UNLIKELY(rc != VINF_SUCCESS))
     
    13581420
    13591421#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
    1360     /* Disable interrupts to make sure a poke will interrupt execution.
     1422    /*
     1423     * Disable interrupts to make sure a poke will interrupt execution.
    13611424     * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
    13621425     */
     
    13661429    STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);
    13671430
    1368     /*
    1369      * Setup TLB control and ASID in the VMCB.
    1370      */
     1431    /* Setup TLB control and ASID in the VMCB. */
    13711432    hmR0SvmSetupTLB(pVM, pVCpu);
    13721433
     
    13921453    ASMAtomicWriteBool(&pVCpu->hwaccm.s.fCheckedTLBFlush, false);
    13931454    ASMAtomicIncU32(&pVCpu->hwaccm.s.cWorldSwitchExits);
    1394     /* Possibly the last TSC value seen by the guest (too high) (only when we're in tsc offset mode). */
     1455    /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
    13951456    if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
    13961457        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVMCB->ctrl.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
     
    15361597    }
    15371598
    1538     /* Let's first sync back eip, esp, and eflags. */
     1599    /* Let's first sync back EIP, ESP, and EFLAGS. */
    15391600    pCtx->rip        = pVMCB->guest.u64RIP;
    15401601    pCtx->rsp        = pVMCB->guest.u64RSP;
     
    15431604    pCtx->rax        = pVMCB->guest.u64RAX;
    15441605
    1545     /* Save all the MSRs that can be changed by the guest without causing a world switch. (fs & gs base are saved with SVM_READ_SELREG) */
     1606    /*
     1607     * Save all the MSRs that can be changed by the guest without causing a world switch.
     1608     * FS & GS base are saved with SVM_READ_SELREG.
     1609     */
    15461610    pCtx->msrSTAR         = pVMCB->guest.u64STAR;            /* legacy syscall eip, cs & ss */
    1547     pCtx->msrLSTAR        = pVMCB->guest.u64LSTAR;           /* 64 bits mode syscall rip */
     1611    pCtx->msrLSTAR        = pVMCB->guest.u64LSTAR;           /* 64-bit mode syscall rip */
    15481612    pCtx->msrCSTAR        = pVMCB->guest.u64CSTAR;           /* compatibility mode syscall rip */
    15491613    pCtx->msrSFMASK       = pVMCB->guest.u64SFMASK;          /* syscall flag mask */
     
    15641628    SVM_READ_SELREG(GS, gs);
    15651629
    1566     /* Correct the hidden CS granularity flag.  Haven't seen it being wrong in
    1567        any other register (yet). */
     1630    /*
     1631     * Correct the hidden CS granularity flag. Haven't seen it being wrong in any other
     1632     * register (yet).
     1633     */
    15681634    if (   !pCtx->csHid.Attr.n.u1Granularity
    15691635        &&  pCtx->csHid.Attr.n.u1Present
     
    15891655    /*
    15901656     * Correct the hidden SS DPL field. It can be wrong on certain CPUs
    1591      * sometimes (seen it on AMD Fusion APUs with 64bit guests). The CPU
     1657     * sometimes (seen it on AMD Fusion CPUs with 64-bit guests). The CPU
    15921658     * always uses the CPL field in the VMCB instead of the DPL in the hidden
    1593      * SS (chapter 15.5.1 Basic operation).
     1659     * SS (chapter AMD spec. 15.5.1 Basic operation).
    15941660     */
    15951661    Assert(!(pVMCB->guest.u8CPL & ~0x3));
    15961662    pCtx->ssHid.Attr.n.u2Dpl = pVMCB->guest.u8CPL & 0x3;
    15971663
    1598     /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR; must sync everything otherwise we can get out of sync when jumping to ring 3. */
     1664    /*
     1665     * Remaining guest CPU context: TR, IDTR, GDTR, LDTR;
     1666     * must sync everything otherwise we can get out of sync when jumping back to ring-3.
     1667     */
    15991668    SVM_READ_SELREG(LDTR, ldtr);
    16001669    SVM_READ_SELREG(TR, tr);
     
    16061675    pCtx->idtr.pIdt         = pVMCB->guest.IDTR.u64Base;
    16071676
    1608     /* Note: no reason to sync back the CRx and DRx registers. They can't be changed by the guest. */
    1609     /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */
    1610     if (    pVM->hwaccm.s.fNestedPaging
    1611         &&  pCtx->cr3 != pVMCB->guest.u64CR3)
     1677    /*
     1678     * No reason to sync back the CRx and DRx registers as they cannot be changed by the guest
     1679     * unless in the nested paging case where CR3 & CR3 can be changed by the guest.
     1680     */
     1681    if (   pVM->hwaccm.s.fNestedPaging
     1682        && pCtx->cr3 != pVMCB->guest.u64CR3)
    16121683    {
    16131684        CPUMSetGuestCR3(pVCpu, pVMCB->guest.u64CR3);
     
    16181689    VMMR0LogFlushEnable(pVCpu);
    16191690
    1620     /* Take care of instruction fusing (sti, mov ss) (see 15.20.5 Interrupt Shadows) */
     1691    /* Take care of instruction fusing (sti, mov ss) (see AMD spec. 15.20.5 Interrupt Shadows) */
    16211692    if (pVMCB->ctrl.u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)
    16221693    {
     
    16371708    pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];
    16381709    if (    pVMCB->ctrl.ExitIntInfo.n.u1Valid
    1639         &&  pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */)
     1710            /* we don't care about 'int xx' as the instruction will be restarted. */
     1711        &&  pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT)
    16401712    {
    16411713        Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));
     
    16821754            if ((uint8_t)(u8LastTPR >> 4) != pVMCB->ctrl.IntCtrl.n.u8VTPR)
    16831755            {
    1684                 rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4);   /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
     1756                /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
     1757                rc2 = PDMApicSetTPR(pVCpu, pVMCB->ctrl.IntCtrl.n.u8VTPR << 4);
    16851758                AssertRC(rc2);
    16861759            }
     
    16941767#endif
    16951768#if ARCH_BITS == 64 /* for the time being */
    1696     VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pVMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2, pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);
     1769    VBOXVMM_R0_HMSVM_VMEXIT(pVCpu, pCtx, exitCode, pVMCB->ctrl.u64ExitInfo1, pVMCB->ctrl.u64ExitInfo2,
     1770                            pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX);
    16971771#endif
    16981772    STAM_PROFILE_ADV_STOP_START(&pVCpu->hwaccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);
     
    17801854#ifdef VBOX_ALWAYS_TRAP_PF
    17811855            if (pVM->hwaccm.s.fNestedPaging)
    1782             {   /* A genuine pagefault.
    1783                  * Forward the trap to the guest by injecting the exception and resuming execution.
     1856            {
     1857                /*
     1858                 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
    17841859                 */
    1785                 Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
     1860                Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip,
     1861                     uFaultAddress, errCode, (RTGCPTR)pCtx->rsp));
    17861862                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestPF);
    17871863
     
    18121888            {
    18131889                RTGCPHYS GCPhysApicBase, GCPhys;
    1814                 PDMApicGetBase(pVM, &GCPhysApicBase);   /* @todo cache this */
     1890                PDMApicGetBase(pVM, &GCPhysApicBase);   /** @todo cache this */
    18151891                GCPhysApicBase &= PAGE_BASE_GC_MASK;
    18161892
     
    18401916            Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
    18411917            if (rc == VINF_SUCCESS)
    1842             {   /* We've successfully synced our shadow pages, so let's just continue execution. */
     1918            {
     1919                /* We've successfully synced our shadow pages, so let's just continue execution. */
    18431920                Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode));
    18441921                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     
    18471924                goto ResumeExecution;
    18481925            }
    1849             else
    1850             if (rc == VINF_EM_RAW_GUEST_TRAP)
    1851             {   /* A genuine pagefault.
    1852                  * Forward the trap to the guest by injecting the exception and resuming execution.
     1926            else if (rc == VINF_EM_RAW_GUEST_TRAP)
     1927            {
     1928                /*
     1929                 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
    18531930                 */
    18541931                Log2(("Forward page fault to the guest\n"));
     
    19151992            Event.n.u8Vector = vector;
    19161993
    1917             switch(vector)
    1918             {
    1919             case X86_XCPT_GP:
    1920                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
    1921                 Event.n.u1ErrorCodeValid    = 1;
    1922                 Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
    1923                 break;
    1924             case X86_XCPT_BP:
    1925                 /** Saves the wrong EIP on the stack (pointing to the int3 instead of the next instruction. */
    1926                 break;
    1927             case X86_XCPT_DE:
    1928                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
    1929                 break;
    1930             case X86_XCPT_UD:
    1931                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
    1932                 break;
    1933             case X86_XCPT_SS:
    1934                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
    1935                 Event.n.u1ErrorCodeValid    = 1;
    1936                 Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
    1937                 break;
    1938             case X86_XCPT_NP:
    1939                 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
    1940                 Event.n.u1ErrorCodeValid    = 1;
    1941                 Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
    1942                 break;
     1994            switch (vector)
     1995            {
     1996                case X86_XCPT_GP:
     1997                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestGP);
     1998                    Event.n.u1ErrorCodeValid    = 1;
     1999                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     2000                    break;
     2001                case X86_XCPT_BP:
     2002                    /** Saves the wrong EIP on the stack (pointing to the int3 instead of the next instruction. */
     2003                    break;
     2004                case X86_XCPT_DE:
     2005                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestDE);
     2006                    break;
     2007                case X86_XCPT_UD:
     2008                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestUD);
     2009                    break;
     2010                case X86_XCPT_SS:
     2011                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestSS);
     2012                    Event.n.u1ErrorCodeValid    = 1;
     2013                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     2014                    break;
     2015                case X86_XCPT_NP:
     2016                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitGuestNP);
     2017                    Event.n.u1ErrorCodeValid    = 1;
     2018                    Event.n.u32ErrorCode        = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */
     2019                    break;
    19432020            }
    19442021            Log(("Trap %x at %04x:%RGv esi=%x\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, pCtx->esi));
     
    19772054        {
    19782055            RTGCPHYS GCPhysApicBase;
    1979             PDMApicGetBase(pVM, &GCPhysApicBase);   /* @todo cache this */
     2056            PDMApicGetBase(pVM, &GCPhysApicBase);   /** @todo cache this */
    19802057            GCPhysApicBase &= PAGE_BASE_GC_MASK;
    19812058
     
    20392116            || rc == VERR_PAGE_TABLE_NOT_PRESENT
    20402117            || rc == VERR_PAGE_NOT_PRESENT)
    2041         {   /* We've successfully synced our shadow pages, so let's just continue execution. */
     2118        {
     2119            /* We've successfully synced our shadow pages, so let's just continue execution. */
    20422120            Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode));
    20432121            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitShadowPF);
     
    21702248        switch (exitCode - SVM_EXIT_WRITE_CR0)
    21712249        {
    2172         case 0:
    2173             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
    2174             break;
    2175         case 2:
    2176             break;
    2177         case 3:
    2178             Assert(!pVM->hwaccm.s.fNestedPaging);
    2179             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
    2180             break;
    2181         case 4:
    2182             pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
    2183             break;
    2184         case 8:
    2185             break;
    2186         default:
    2187             AssertFailed();
     2250            case 0:
     2251                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
     2252                break;
     2253            case 2:
     2254                break;
     2255            case 3:
     2256                Assert(!pVM->hwaccm.s.fNestedPaging);
     2257                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
     2258                break;
     2259            case 4:
     2260                pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
     2261                break;
     2262            case 8:
     2263                break;
     2264            default:
     2265                AssertFailed();
    21882266        }
    21892267        if (rc == VINF_SUCCESS)
    21902268        {
    21912269            /* EIP has been updated already. */
    2192 
    21932270            /* Only resume if successful. */
    21942271            goto ResumeExecution;
     
    22092286        {
    22102287            /* EIP has been updated already. */
    2211 
    22122288            /* Only resume if successful. */
    22132289            goto ResumeExecution;
     
    22252301        STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite);
    22262302
    2227         if (    !DBGFIsStepping(pVCpu)
    2228             &&  !CPUMIsHyperDebugStateActive(pVCpu))
     2303        if (   !DBGFIsStepping(pVCpu)
     2304            && !CPUMIsHyperDebugStateActive(pVCpu))
    22292305        {
    22302306            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
     
    22652341            STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxContextSwitch);
    22662342
    2267             /* Disable drx move intercepts. */
     2343            /* Disable DRx move intercepts. */
    22682344            pVMCB->ctrl.u16InterceptRdDRx = 0;
    22692345            pVMCB->ctrl.u16InterceptWrDRx = 0;
     
    22792355        {
    22802356            /* EIP has been updated already. */
    2281 
    22822357            /* Only resume if successful. */
    22832358            goto ResumeExecution;
     
    23012376            uAndVal = 0xff;
    23022377        }
    2303         else
    2304         if (IoExitInfo.n.u1OP16)
     2378        else if (IoExitInfo.n.u1OP16)
    23052379        {
    23062380            uIOSize = 2;
    23072381            uAndVal = 0xffff;
    23082382        }
    2309         else
    2310         if (IoExitInfo.n.u1OP32)
     2383        else if (IoExitInfo.n.u1OP32)
    23112384        {
    23122385            uIOSize = 4;
     
    23472420        else
    23482421        {
    2349             /* normal in/out */
     2422            /* Normal in/out */
    23502423            Assert(!IoExitInfo.n.u1REP);
    23512424
    23522425            if (IoExitInfo.n.u1Type == 0)
    23532426            {
    2354                 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize));
     2427                Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal,
     2428                      uIOSize));
    23552429                STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
    23562430                rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize);
    23572431                if (rc == VINF_IOM_R3_IOPORT_WRITE)
    2358                     HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
     2432                {
     2433                    HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2434                                                   uAndVal, uIOSize);
     2435                }
    23592436            }
    23602437            else
     
    23682445                    /* Write back to the EAX register. */
    23692446                    pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
    2370                     Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal, uIOSize));
     2447                    Log2(("IOMIOPortRead %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, u32Val & uAndVal,
     2448                          uIOSize));
    23712449                }
    2372                 else
    2373                 if (rc == VINF_IOM_R3_IOPORT_READ)
    2374                     HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize);
    2375             }
    2376         }
     2450                else if (rc == VINF_IOM_R3_IOPORT_READ)
     2451                {
     2452                    HWACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,
     2453                                                  uAndVal, uIOSize);
     2454                }
     2455            }
     2456        }
     2457
    23772458        /*
    23782459         * Handled the I/O return codes.
     
    23892470                {
    23902471                    /* IO operation lookup arrays. */
    2391                     static uint32_t const aIOSize[4] = {1, 2, 0, 4};
     2472                    static uint32_t const aIOSize[4] = { 1, 2, 0, 4 };
    23922473
    23932474                    STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatDRxIOCheck);
    2394                     for (unsigned i=0;i<4;i++)
     2475                    for (unsigned i = 0; i < 4; i++)
    23952476                    {
    23962477                        unsigned uBPLen = aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)];
     
    24082489                            pCtx->dr[6] |= (uint64_t)RT_BIT(i);
    24092490
    2410                             /* Note: AMD64 Architecture Programmer's Manual 13.1:
    2411                              * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared by software after
    2412                              * the contents have been read.
     2491                            /*
     2492                             * Note: AMD64 Architecture Programmer's Manual 13.1:
     2493                             * Bits 15:13 of the DR6 register is never cleared by the processor and must be cleared
     2494                             * by software after the contents have been read.
    24132495                             */
    24142496                            pVMCB->guest.u64DR6 = pCtx->dr[6];
     
    25002582        break;
    25012583    }
    2502 
    25032584
    25042585    case SVM_EXIT_VMMCALL:
     
    25322613    }
    25332614
    2534     /* Emulate in ring 3. */
     2615    /* Emulate in ring-3. */
    25352616    case SVM_EXIT_MSR:
    25362617    {
     
    25562637        }
    25572638
    2558         /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
     2639        /*
     2640         * The Intel spec. claims there's an REX version of RDMSR that's slightly different,
     2641         * so we play safe by completely disassembling the instruction.
     2642         */
    25592643        STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hwaccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);
    25602644        Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr"));
     
    25632647        {
    25642648            /* EIP has been updated already. */
    2565 
    25662649            /* Only resume if successful. */
    25672650            goto ResumeExecution;
     
    25712654    }
    25722655
    2573     case SVM_EXIT_TASK_SWITCH:          /* too complicated to emulate, so fall back to the recompiler*/
     2656    case SVM_EXIT_TASK_SWITCH:          /* too complicated to emulate, so fall back to the recompiler */
    25742657        Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2));
    25752658        if (    !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP))
     
    25772660        {
    25782661            SVM_EVENT Event;
    2579 
    25802662            Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo;
    25812663
    25822664            /* Caused by an injected interrupt. */
    25832665            pVCpu->hwaccm.s.Event.fPending = false;
    2584 
    25852666            switch (Event.n.u3Type)
    25862667            {
    2587             case SVM_EVENT_EXTERNAL_IRQ:
    2588             case SVM_EVENT_NMI:
    2589                 Log(("SVM_EXIT_TASK_SWITCH: reassert trap %d\n", Event.n.u8Vector));
    2590                 Assert(!Event.n.u1ErrorCodeValid);
    2591                 rc2 = TRPMAssertTrap(pVCpu, Event.n.u8Vector, TRPM_HARDWARE_INT);
    2592                 AssertRC(rc2);
    2593                 break;
    2594 
    2595             default:
    2596                 /* Exceptions and software interrupts can just be restarted. */
    2597                 break;
     2668                case SVM_EVENT_EXTERNAL_IRQ:
     2669                case SVM_EVENT_NMI:
     2670                    Log(("SVM_EXIT_TASK_SWITCH: reassert trap %d\n", Event.n.u8Vector));
     2671                    Assert(!Event.n.u1ErrorCodeValid);
     2672                    rc2 = TRPMAssertTrap(pVCpu, Event.n.u8Vector, TRPM_HARDWARE_INT);
     2673                    AssertRC(rc2);
     2674                    break;
     2675
     2676                default:
     2677                    /* Exceptions and software interrupts can just be restarted. */
     2678                    break;
    25982679            }
    25992680        }
     
    26282709end:
    26292710
    2630     /* We now going back to ring-3, so clear the action flag. */
     2711    /*
     2712     * We are now going back to ring-3, so clear the forced action flag.
     2713     */
    26312714    VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
    26322715
    2633     /* Signal changes for the recompiler. */
    2634     CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
    2635 
    2636     /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */
     2716    /*
     2717     * Signal changes to the recompiler.
     2718     */
     2719    CPUMSetChangedFlags(pVCpu,
     2720                          CPUM_CHANGED_SYSENTER_MSR
     2721                        | CPUM_CHANGED_LDTR
     2722                        | CPUM_CHANGED_GDTR
     2723                        | CPUM_CHANGED_IDTR
     2724                        | CPUM_CHANGED_TR
     2725                        | CPUM_CHANGED_HIDDEN_SEL_REGS);
     2726
     2727    /*
     2728     * If we executed vmrun and an external IRQ was pending, then we don't have to do a full sync the next time.
     2729     */
    26372730    if (exitCode == SVM_EXIT_INTR)
    26382731    {
     
    26492742    }
    26502743
    2651     /* translate into a less severe return code */
     2744    /* Translate into a less severe return code */
    26522745    if (rc == VERR_EM_INTERPRETER)
    26532746        rc = VINF_EM_RAW_EMULATE_INSTR;
     
    26682761}
    26692762
     2763
    26702764/**
    2671  * Emulate simple mov tpr instruction
     2765 * Emulate simple mov tpr instruction.
    26722766 *
    26732767 * @returns VBox status code.
    2674  * @param   pVM         The VM to operate on.
     2768 * @param   pVM         Pointer to the VM.
    26752769 * @param   pVCpu       The VM CPU to operate on.
    2676  * @param   pCtx        CPU context
     2770 * @param   pCtx        Pointer to the guest CPU context.
    26772771 */
    26782772static int hmR0SvmEmulateTprVMMCall(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    26822776    LogFlow(("Emulated VMMCall TPR access replacement at %RGv\n", pCtx->rip));
    26832777
    2684     while (true)
     2778    for (;;)
    26852779    {
    26862780        bool    fPending;
     
    26932787        switch(pPatch->enmType)
    26942788        {
    2695         case HWACCMTPRINSTR_READ:
    2696             /* TPR caching in CR8 */
    2697             rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending);
    2698             AssertRC(rc);
    2699 
    2700             rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
    2701             AssertRC(rc);
    2702 
    2703             LogFlow(("Emulated read successfully\n"));
    2704             pCtx->rip += pPatch->cbOp;
    2705             break;
    2706 
    2707         case HWACCMTPRINSTR_WRITE_REG:
    2708         case HWACCMTPRINSTR_WRITE_IMM:
    2709             /* Fetch the new TPR value */
    2710             if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG)
    2711             {
    2712                 uint32_t val;
    2713 
    2714                 rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &val);
     2789            case HWACCMTPRINSTR_READ:
     2790                /* TPR caching in CR8 */
     2791                rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending);
    27152792                AssertRC(rc);
    2716                 u8Tpr = val;
    2717             }
    2718             else
    2719                 u8Tpr = (uint8_t)pPatch->uSrcOperand;
    2720 
    2721             rc = PDMApicSetTPR(pVCpu, u8Tpr);
    2722             AssertRC(rc);
    2723             LogFlow(("Emulated write successfully\n"));
    2724             pCtx->rip += pPatch->cbOp;
    2725             break;
     2793
     2794                rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
     2795                AssertRC(rc);
     2796
     2797                LogFlow(("Emulated read successfully\n"));
     2798                pCtx->rip += pPatch->cbOp;
     2799                break;
     2800
     2801            case HWACCMTPRINSTR_WRITE_REG:
     2802            case HWACCMTPRINSTR_WRITE_IMM:
     2803                /* Fetch the new TPR value */
     2804                if (pPatch->enmType == HWACCMTPRINSTR_WRITE_REG)
     2805                {
     2806                    uint32_t val;
     2807
     2808                    rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &val);
     2809                    AssertRC(rc);
     2810                    u8Tpr = val;
     2811                }
     2812                else
     2813                    u8Tpr = (uint8_t)pPatch->uSrcOperand;
     2814
     2815                rc = PDMApicSetTPR(pVCpu, u8Tpr);
     2816                AssertRC(rc);
     2817                LogFlow(("Emulated write successfully\n"));
     2818                pCtx->rip += pPatch->cbOp;
     2819                break;
     2820
    27262821        default:
    2727             AssertMsgFailedReturn(("Unexpected type %d\n", pPatch->enmType), VERR_HMSVM_UNEXPECTED_PATCH_TYPE);
     2822                AssertMsgFailedReturn(("Unexpected type %d\n", pPatch->enmType), VERR_HMSVM_UNEXPECTED_PATCH_TYPE);
    27282823        }
    27292824    }
     
    27332828
    27342829/**
    2735  * Enters the AMD-V session
     2830 * Enters the AMD-V session.
    27362831 *
    27372832 * @returns VBox status code.
    2738  * @param   pVM         The VM to operate on.
     2833 * @param   pVM         Pointer to the VM.
    27392834 * @param   pVCpu       The VM CPU to operate on.
    2740  * @param   pCpu        CPU info struct
     2835 * @param   pCpu        Pointer to the CPU info struct.
    27412836 */
    27422837VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
     
    27552850
    27562851/**
    2757  * Leaves the AMD-V session
     2852 * Leaves the AMD-V session.
    27582853 *
    27592854 * @returns VBox status code.
    2760  * @param   pVM         The VM to operate on.
     2855 * @param   pVM         Pointer to the VM.
    27612856 * @param   pVCpu       The VM CPU to operate on.
    2762  * @param   pCtx        CPU context
     2857 * @param   pCtx        Pointer to the guest CPU context.
    27632858 */
    27642859VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     
    27942889
    27952890
     2891/**
     2892 * Interprets INVLPG.
     2893 *
     2894 * @return VBox status code.
     2895 * @param   pVCpu           Pointer to the VMCPU.
     2896 * @param   pCpu            Pointer to the CPU info struct.
     2897 * @param   pRegFrame       Pointer to the register frame.
     2898 * @param   ASID            Tagged TLB id for the guest.
     2899 */
    27962900static int hmR0svmInterpretInvlPg(PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
    27972901{
     
    28062910    switch(param1.type)
    28072911    {
    2808     case PARMTYPE_IMMEDIATE:
    2809     case PARMTYPE_ADDRESS:
    2810         if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64)))
     2912        case PARMTYPE_IMMEDIATE:
     2913        case PARMTYPE_ADDRESS:
     2914            if(!(param1.flags & (PARAM_VAL32|PARAM_VAL64)))
     2915                return VERR_EM_INTERPRETER;
     2916            addr = param1.val.val64;
     2917            break;
     2918
     2919        default:
    28112920            return VERR_EM_INTERPRETER;
    2812         addr = param1.val.val64;
    2813         break;
    2814 
    2815     default:
    2816         return VERR_EM_INTERPRETER;
    28172921    }
    28182922
     
    28282932}
    28292933
     2934
    28302935/**
    2831  * Interprets INVLPG
     2936 * Interprets INVLPG.
    28322937 *
    28332938 * @returns VBox status code.
     
    28362941 * @retval  VERR_*                  Fatal errors.
    28372942 *
    2838  * @param   pVM         The VM handle.
    2839  * @param   pRegFrame   The register frame.
    2840  * @param   ASID        Tagged TLB id for the guest
     2943 * @param   pVM         Pointer to the VM.
     2944 * @param   pRegFrame   Pointer to the register frame.
     2945 * @param   ASID        Tagged TLB id for the guest.
    28412946 *
    2842  *                     Updates the EIP if an instruction was executed successfully.
     2947 * @remarks Updates the EIP if an instruction was executed successfully.
    28432948 */
    28442949static int hmR0SvmInterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)
    28452950{
    28462951    /*
    2847      * Only allow 32 & 64 bits code.
     2952     * Only allow 32 & 64 bit code.
    28482953     */
    28492954    DISCPUMODE enmMode = SELMGetCpuModeFromSelector(pVCpu, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid);
     
    28772982
    28782983/**
    2879  * Invalidates a guest page
     2984 * Invalidates a guest page by guest virtual address.
    28802985 *
    28812986 * @returns VBox status code.
    2882  * @param   pVM         The VM to operate on.
     2987 * @param   pVM         Pointer to the VM.
    28832988 * @param   pVCpu       The VM CPU to operate on.
    2884  * @param   GCVirt      Page to invalidate
     2989 * @param   GCVirt      Guest virtual address of the page to invalidate.
    28852990 */
    28862991VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
     
    29143019#if 0 /* obsolete, but left here for clarification. */
    29153020/**
    2916  * Invalidates a guest page by physical address
     3021 * Invalidates a guest page by physical address.
    29173022 *
    29183023 * @returns VBox status code.
    2919  * @param   pVM         The VM to operate on.
     3024 * @param   pVM         Pointer to the VM.
    29203025 * @param   pVCpu       The VM CPU to operate on.
    2921  * @param   GCPhys      Page to invalidate
     3026 * @param   GCPhys      Guest physical address of the page to invalidate.
    29223027 */
    29233028VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
     
    29313036#endif
    29323037
     3038
    29333039#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    29343040/**
    2935  * Prepares for and executes VMRUN (64 bits guests from a 32 bits hosts).
     3041 * Prepares for and executes VMRUN (64-bit guests from a 32-bit host).
    29363042 *
    29373043 * @returns VBox status code.
    29383044 * @param   pVMCBHostPhys   Physical address of host VMCB.
    29393045 * @param   pVMCBPhys       Physical address of the VMCB.
    2940  * @param   pCtx            Guest context.
    2941  * @param   pVM             The VM to operate on.
    2942  * @param   pVCpu           The VMCPU to operate on.
     3046 * @param   pCtx            Pointer to the guest CPU context.
     3047 * @param   pVM             Pointer to the VM.
     3048 * @param   pVCpu           Pointer to the VMCPU.
    29433049 */
    29443050DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu)
     
    29543060}
    29553061
     3062
    29563063/**
    2957  * Executes the specified handler in 64 mode
     3064 * Executes the specified handler in 64-bit mode.
    29583065 *
    29593066 * @returns VBox status code.
    2960  * @param   pVM         The VM to operate on.
    2961  * @param   pVCpu       The VMCPU to operate on.
    2962  * @param   pCtx        Guest context
    2963  * @param   pfnHandler  RC handler
    2964  * @param   cbParam     Number of parameters
    2965  * @param   paParam     Array of 32 bits parameters
     3067 * @param   pVM         Pointer to the VM.
     3068 * @param   pVCpu       Pointer to the VMCPU.
     3069 * @param   pCtx        Pointer to the guest CPU context.
     3070 * @param   pfnHandler  Pointer to the RC handler function.
     3071 * @param   cbParam     Number of parameters.
     3072 * @param   paParam     Array of 32-bit parameters.
    29663073 */
    29673074VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam)
  • trunk/src/VBox/VMM/VMMR0/HWSVMR0.h

    r38685 r41335  
    55
    66/*
    7  * Copyright (C) 2006-2011 Oracle Corporation
     7 * Copyright (C) 2006-2012 Oracle Corporation
    88 *
    99 * This file is part of VirtualBox Open Source Edition (OSE), as
     
    4242 *
    4343 * @returns VBox status code.
    44  * @param   pVM         The VM to operate on.
    45  * @param   pVCpu       The VMCPU to operate on.
    46  * @param   pCpu        CPU info struct
     44 * @param   pVM         Pointer to the VM.
     45 * @param   pVCpu       Pointer to the VMCPU.
     46 * @param   pCpu        Pointer to the CPU info struct.
    4747 */
    4848VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu);
     
    5252 *
    5353 * @returns VBox status code.
    54  * @param   pVM         The VM to operate on.
    55  * @param   pVCpu       The VMCPU to operate on.
    56  * @param   pCtx        CPU context
     54 * @param   pVM         Pointer to the VM.
     55 * @param   pVCpu       Pointer to the VMCPU.
     56 * @param   pCtx        Pointer to the guest CPU context.
    5757 */
    5858VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     
    6262 *
    6363 * @returns VBox status code.
    64  * @param   pCpu            CPU info struct
    65  * @param   pVM             The VM to operate on. (can be NULL after a resume)
    66  * @param   pvPageCpu       Pointer to the global cpu page
    67  * @param   pPageCpuPhys    Physical address of the global cpu page
     64 * @param   pCpu            Pointer to the CPU info struct.
     65 * @param   pVM             Pointer to the VM (can be NULL after a resume!).
     66 * @param   pvPageCpu       Pointer to the global CPU page.
     67 * @param   pPageCpuPhys    Physical address of the global CPU page.
    6868 */
    6969VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage);
     
    7373 *
    7474 * @returns VBox status code.
    75  * @param   pCpu            CPU info struct
    76  * @param   pvPageCpu       Pointer to the global cpu page
    77  * @param   pPageCpuPhys    Physical address of the global cpu page
     75 * @param   pCpu            Pointer to the CPU info struct.
     76 * @param   pvPageCpu       Pointer to the global CPU page.
     77 * @param   pPageCpuPhys    Physical address of the global CPU page.
    7878 */
    7979VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys);
     
    8383 *
    8484 * @returns VBox status code.
    85  * @param   pVM         The VM to operate on.
     85 * @param   pVM         Pointer to the VM.
    8686 */
    8787VMMR0DECL(int) SVMR0InitVM(PVM pVM);
     
    9191 *
    9292 * @returns VBox status code.
    93  * @param   pVM         The VM to operate on.
     93 * @param   pVM         Pointer to the VM.
    9494 */
    9595VMMR0DECL(int) SVMR0TermVM(PVM pVM);
     
    9999 *
    100100 * @returns VBox status code.
    101  * @param   pVM         The VM to operate on.
     101 * @param   pVM         Pointer to the VM.
    102102 */
    103103VMMR0DECL(int) SVMR0SetupVM(PVM pVM);
     
    108108 *
    109109 * @returns VBox status code.
    110  * @param   pVM         The VM to operate on.
    111  * @param   pVCpu       The VMCPU to operate on.
    112  * @param   pCtx        Guest context
     110 * @param   pVM         Pointer to the VM.
     111 * @param   pVCpu       Pointer to the VMCPU.
     112 * @param   pCtx        Pointer to the guest CPU context.
    113113 */
    114114VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
     
    116116
    117117/**
    118  * Save the host state
    119  *
    120  * @returns VBox status code.
    121  * @param   pVM         The VM to operate on.
    122  * @param   pVCpu       The VMCPU to operate on.
     118 * Save the host state.
     119 *
     120 * @returns VBox status code.
     121 * @param   pVM         Pointer to the VM.
     122 * @param   pVCpu       Pointer to the VMCPU.
    123123 */
    124124VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu);
    125125
    126126/**
    127  * Loads the guest state
    128  *
    129  * @returns VBox status code.
    130  * @param   pVM         The VM to operate on.
    131  * @param   pVCpu       The VMCPU to operate on.
    132  * @param   pCtx        Guest context
     127 * Loads the guest state.
     128 *
     129 * @returns VBox status code.
     130 * @param   pVM         Pointer to the VM.
     131 * @param   pVCpu       Pointer to the VMCPU.
     132 * @param   pCtx        Pointer to the guest CPU context.
    133133 */
    134134VMMR0DECL(int) SVMR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    135135
     136
    136137#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
    137 
    138 /**
    139  * Prepares for and executes VMRUN (64 bits guests from a 32 bits hosts).
     138/**
     139 * Prepares for and executes VMRUN (64-bit guests from a 32-bit host).
    140140 *
    141141 * @returns VBox status code.
    142142 * @param   pVMCBHostPhys   Physical address of host VMCB.
    143143 * @param   pVMCBPhys       Physical address of the VMCB.
    144  * @param   pCtx            Guest context.
    145  * @param   pVM             The VM to operate on.
    146  * @param   pVCpu           The VMCPU to operate on. (not used)
     144 * @param   pCtx            Pointer to the guest CPU context.
     145 * @param   pVM             Pointer to the VM.
     146 * @param   pVCpu           Pointer to the VMCPU. (not used)
    147147 */
    148148DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
    149149
    150150/**
    151  * Executes the specified handler in 64 mode
    152  *
    153  * @returns VBox status code.
    154  * @param   pVM         The VM to operate on.
    155  * @param   pVCpu       The VMCPU to operate on.
    156  * @param   pCtx        Guest context
    157  * @param   pfnHandler  RC handler
    158  * @param   cbParam     Number of parameters
    159  * @param   paParam     Array of 32 bits parameters
    160  */
    161 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam, uint32_t *paParam);
    162 
     151 * Executes the specified handler in 64-bit mode.
     152 *
     153 * @returns VBox status code.
     154 * @param   pVM         Pointer to the VM.
     155 * @param   pVCpu       Pointer to the VMCPU.
     156 * @param   pCtx        Pointer to the guest CPU context.
     157 * @param   pfnHandler  Pointer to the RC handler function.
     158 * @param   cbParam     Number of parameters.
     159 * @param   paParam     Array of 32-bit parameters.
     160 */
     161VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
     162                                         uint32_t *paParam);
    163163#endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
    164164
    165165/**
    166  * Prepares for and executes VMRUN (32 bits guests).
     166 * Prepares for and executes VMRUN (32-bit guests).
    167167 *
    168168 * @returns VBox status code.
    169169 * @param   pVMCBHostPhys   Physical address of host VMCB.
    170170 * @param   pVMCBPhys       Physical address of the VMCB.
    171  * @param   pCtx            Guest context.
    172  * @param   pVM             The VM to operate on. (not used)
    173  * @param   pVCpu           The VMCPU to operate on. (not used)
     171 * @param   pCtx            Pointer to the guest CPU context.
     172 * @param   pVM             Pointer to the VM. (not used)
     173 * @param   pVCpu           Pointer to the VMCPU. (not used)
    174174 */
    175175DECLASM(int) SVMR0VMRun(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
     
    177177
    178178/**
    179  * Prepares for and executes VMRUN (64 bits guests).
     179 * Prepares for and executes VMRUN (64-bit guests).
    180180 *
    181181 * @returns VBox status code.
    182182 * @param   pVMCBHostPhys   Physical address of host VMCB.
    183183 * @param   pVMCBPhys       Physical address of the VMCB.
    184  * @param   pCtx            Guest context.
    185  * @param   pVM             The VM to operate on. (not used)
    186  * @param   pVCpu           The VMCPU to operate on. (not used)
     184 * @param   pCtx            Pointer to the guest CPU context.
     185 * @param   pVM             Pointer to the VM. (not used)
     186 * @param   pVCpu           Pointer to the VMCPU. (not used)
    187187 */
    188188DECLASM(int) SVMR0VMRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);
     
    200200#define SVM_HIDSEGATTR_SVM2VMX(a)     (a & 0xFF) | ((a & 0x0F00) << 4)
    201201
    202 #define SVM_WRITE_SELREG(REG, reg)                                      \
    203 {                                                                       \
    204         pVMCB->guest.REG.u16Sel   = pCtx->reg;                          \
    205         pVMCB->guest.REG.u32Limit = pCtx->reg##Hid.u32Limit;            \
    206         pVMCB->guest.REG.u64Base  = pCtx->reg##Hid.u64Base;             \
     202#define SVM_WRITE_SELREG(REG, reg)                                                 \
     203{                                                                                  \
     204        pVMCB->guest.REG.u16Sel   = pCtx->reg;                                     \
     205        pVMCB->guest.REG.u32Limit = pCtx->reg##Hid.u32Limit;                       \
     206        pVMCB->guest.REG.u64Base  = pCtx->reg##Hid.u64Base;                        \
    207207        pVMCB->guest.REG.u16Attr  = SVM_HIDSEGATTR_VMX2SVM(pCtx->reg##Hid.Attr.u); \
    208208}
    209209
    210 #define SVM_READ_SELREG(REG, reg)                                       \
    211 {                                                                       \
    212         pCtx->reg                = pVMCB->guest.REG.u16Sel;             \
    213         pCtx->reg##Hid.u32Limit  = pVMCB->guest.REG.u32Limit;           \
    214         pCtx->reg##Hid.u64Base   = pVMCB->guest.REG.u64Base;            \
     210#define SVM_READ_SELREG(REG, reg)                                                    \
     211{                                                                                    \
     212        pCtx->reg                = pVMCB->guest.REG.u16Sel;                          \
     213        pCtx->reg##Hid.u32Limit  = pVMCB->guest.REG.u32Limit;                        \
     214        pCtx->reg##Hid.u64Base   = pVMCB->guest.REG.u64Base;                         \
    215215        pCtx->reg##Hid.Attr.u    = SVM_HIDSEGATTR_SVM2VMX(pVMCB->guest.REG.u16Attr); \
    216216}
     
    222222RT_C_DECLS_END
    223223
    224 #endif
    225 
     224#endif /* ___VMMR0_HWSVMR0_h */
     225
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette