VirtualBox

Changeset 65989 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Mar 7, 2017 9:36:03 PM (8 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: Implement AMD-V VMMCALL in IEM. Cleanup the code in HMAll and segregate SVM all-context code.

Location:
trunk/src/VBox/VMM
Files:
1 added
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r65952 r65989  
    229229        VMMAll/DBGFAll.cpp \
    230230        VMMAll/HMAll.cpp \
     231        VMMAll/HMSVMAll.cpp \
    231232        VMMAll/IEMAll.cpp \
    232233        VMMAll/IEMAllAImpl.asm \
     
    579580        VMMAll/GIMAllHv.cpp \
    580581        VMMAll/GIMAllKvm.cpp \
     582        VMMAll/HMAll.cpp \
     583        VMMAll/HMSVMAll.cpp \
    581584        VMMAll/MMAll.cpp \
    582585        VMMAll/MMAllHyper.cpp \
     
    724727        VMMAll/GIMAllKvm.cpp \
    725728        VMMAll/HMAll.cpp \
     729        VMMAll/HMSVMAll.cpp \
    726730        VMMAll/IEMAll.cpp \
    727731        VMMAll/IEMAllAImpl.asm \
  • trunk/src/VBox/VMM/VMMAll/GIMAll.cpp

    r62478 r65989  
    9292 * @retval  VINF_SUCCESS if the hypercall succeeded (even if its operation
    9393 *          failed).
     94 * @retval  VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
     95 *          RIP.
    9496 * @retval  VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
    9597 * @retval  VERR_GIM_HYPERCALL_ACCESS_DENIED CPL is insufficient.
     
    104106 * @param   pCtx        Pointer to the guest-CPU context.
    105107 *
     108 * @remarks The caller of this function needs to advance RIP as required.
    106109 * @thread  EMT.
    107110 */
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r65908 r65989  
    9696
    9797
    98 /**
    99  * Flushes the guest TLB.
    100  *
    101  * @returns VBox status code.
    102  * @param   pVCpu       The cross context virtual CPU structure.
    103  */
    104 VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
    105 {
    106     LogFlow(("HMFlushTLB\n"));
    107 
    108     VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
    109     STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
    110     return VINF_SUCCESS;
    111 }
    112 
    11398#ifdef IN_RING0
    11499
     
    170155#endif /* IN_RING0 */
    171156#ifndef IN_RC
     157/**
     158 * Flushes the guest TLB.
     159 *
     160 * @returns VBox status code.
     161 * @param   pVCpu       The cross context virtual CPU structure.
     162 */
     163VMM_INT_DECL(int) HMFlushTLB(PVMCPU pVCpu)
     164{
     165    LogFlow(("HMFlushTLB\n"));
     166
     167    VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
     168    STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbManual);
     169    return VINF_SUCCESS;
     170}
    172171
    173172/**
     
    271270
    272271    return VINF_SUCCESS;
    273 }
    274 
    275 #endif /* !IN_RC */
    276 
    277 /**
    278  * Checks if nested paging is enabled.
    279  *
    280  * @returns true if nested paging is active, false otherwise.
    281  * @param   pVM         The cross context VM structure.
    282  *
    283  * @remarks Works before hmR3InitFinalizeR0.
    284  */
    285 VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
    286 {
    287     return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
    288 }
    289 
    290 
    291 /**
    292  * Checks if both nested paging and unhampered guest execution are enabled.
    293  *
    294  * The almost complete guest execution in hardware is only applicable to VT-x.
    295  *
    296  * @returns true if we have both enabled, otherwise false.
    297  * @param   pVM         The cross context VM structure.
    298  *
    299  * @remarks Works before hmR3InitFinalizeR0.
    300  */
    301 VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
    302 {
    303     return HMIsEnabled(pVM)
    304         && pVM->hm.s.fNestedPaging
    305         && (   pVM->hm.s.vmx.fUnrestrictedGuest
    306             || pVM->hm.s.svm.fSupported);
    307 }
    308 
    309 
    310 /**
    311  * Checks if this VM is long-mode capable.
    312  *
    313  * @returns true if long mode is allowed, false otherwise.
    314  * @param   pVM         The cross context VM structure.
    315  */
    316 VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
    317 {
    318     return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
    319 }
    320 
    321 
    322 /**
    323  * Checks if MSR bitmaps are available. It is assumed that when it's available
    324  * it will be used as well.
    325  *
    326  * @returns true if MSR bitmaps are available, false otherwise.
    327  * @param   pVM         The cross context VM structure.
    328  */
    329 VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
    330 {
    331     if (HMIsEnabled(pVM))
    332     {
    333         if (pVM->hm.s.svm.fSupported)
    334             return true;
    335 
    336         if (   pVM->hm.s.vmx.fSupported
    337             && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    338         {
    339             return true;
    340         }
    341     }
    342     return false;
    343 }
    344 
    345 
    346 /**
    347  * Return the shadow paging mode for nested paging/ept
    348  *
    349  * @returns shadow paging mode
    350  * @param   pVM         The cross context VM structure.
    351  */
    352 VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
    353 {
    354     Assert(HMIsNestedPagingActive(pVM));
    355     if (pVM->hm.s.svm.fSupported)
    356         return PGMMODE_NESTED;
    357 
    358     Assert(pVM->hm.s.vmx.fSupported);
    359     return PGMMODE_EPT;
    360272}
    361273
     
    410322    return VINF_SUCCESS;
    411323}
     324
     325
     326/**
     327 * Checks if nested paging is enabled.
     328 *
     329 * @returns true if nested paging is active, false otherwise.
     330 * @param   pVM         The cross context VM structure.
     331 *
     332 * @remarks Works before hmR3InitFinalizeR0.
     333 */
     334VMM_INT_DECL(bool) HMIsNestedPagingActive(PVM pVM)
     335{
     336    return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging;
     337}
     338
     339
     340/**
     341 * Checks if both nested paging and unhampered guest execution are enabled.
     342 *
     343 * The almost complete guest execution in hardware is only applicable to VT-x.
     344 *
     345 * @returns true if we have both enabled, otherwise false.
     346 * @param   pVM         The cross context VM structure.
     347 *
     348 * @remarks Works before hmR3InitFinalizeR0.
     349 */
     350VMM_INT_DECL(bool) HMAreNestedPagingAndFullGuestExecEnabled(PVM pVM)
     351{
     352    return HMIsEnabled(pVM)
     353        && pVM->hm.s.fNestedPaging
     354        && (   pVM->hm.s.vmx.fUnrestrictedGuest
     355            || pVM->hm.s.svm.fSupported);
     356}
     357
     358
     359/**
     360 * Checks if this VM is long-mode capable.
     361 *
     362 * @returns true if long mode is allowed, false otherwise.
     363 * @param   pVM         The cross context VM structure.
     364 */
     365VMM_INT_DECL(bool) HMIsLongModeAllowed(PVM pVM)
     366{
     367    return HMIsEnabled(pVM) && pVM->hm.s.fAllow64BitGuests;
     368}
     369
     370
     371/**
     372 * Checks if MSR bitmaps are available. It is assumed that when it's available
     373 * it will be used as well.
     374 *
     375 * @returns true if MSR bitmaps are available, false otherwise.
     376 * @param   pVM         The cross context VM structure.
     377 */
     378VMM_INT_DECL(bool) HMAreMsrBitmapsAvailable(PVM pVM)
     379{
     380    if (HMIsEnabled(pVM))
     381    {
     382        if (pVM->hm.s.svm.fSupported)
     383            return true;
     384
     385        if (   pVM->hm.s.vmx.fSupported
     386            && (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     387        {
     388            return true;
     389        }
     390    }
     391    return false;
     392}
     393
     394
     395/**
     396 * Return the shadow paging mode for nested paging/ept
     397 *
     398 * @returns shadow paging mode
     399 * @param   pVM         The cross context VM structure.
     400 */
     401VMM_INT_DECL(PGMMODE) HMGetShwPagingMode(PVM pVM)
     402{
     403    Assert(HMIsNestedPagingActive(pVM));
     404    if (pVM->hm.s.svm.fSupported)
     405        return PGMMODE_NESTED;
     406
     407    Assert(pVM->hm.s.vmx.fSupported);
     408    return PGMMODE_EPT;
     409}
     410#endif /* !IN_RC */
     411
    412412
    413413/**
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r65933 r65989  
    58775877#ifdef VBOX_WITH_NESTED_HWVIRT
    58785878/**
     5879 * Implements 'VMMCALL'.
     5880 */
     5881IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
     5882{
     5883    /*
     5884     * We do not check for presence of SVM/AMD-V here as the KVM GIM provider
     5885     * might patch in an invalid vmmcall instruction with an Intel vmcall
     5886     * instruction.
     5887     */
     5888    bool fUpdatedRipAndRF;
     5889    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5890    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
     5891    if (RT_SUCCESS(rcStrict))
     5892    {
     5893        if (!fUpdatedRipAndRF)
     5894            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5895        return rcStrict;
     5896    }
     5897
     5898    return iemRaiseUndefinedOpcode(pVCpu);
     5899}
     5900
     5901
     5902/**
    58795903 * Implements 'VMLOAD'.
    58805904 */
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r65933 r65989  
    442442FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
    443443
     444#ifdef VBOX_WITH_NESTED_HWVIRT
     445/** Opcode 0x0f 0x01 0xd9. */
     446FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
     447{
     448    IEMOP_MNEMONIC(vmmcall, "vmmcall");
     449    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
     450}
     451
     452
     453/** Opcode 0x0f 0x01 0xda. */
     454FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
     455{
     456    IEMOP_MNEMONIC(vmload, "vmload");
     457    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
     458}
     459
     460
     461/** Opcode 0x0f 0x01 0xdb. */
     462FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
     463{
     464    IEMOP_MNEMONIC(vmsave, "vmsave");
     465    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
     466}
     467
     468
     469/** Opcode 0x0f 0x01 0xdc. */
     470FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
     471{
     472    IEMOP_MNEMONIC(stgi, "stgi");
     473    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
     474}
     475
     476
     477/** Opcode 0x0f 0x01 0xdd. */
     478FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
     479{
     480    IEMOP_MNEMONIC(clgi, "clgi");
     481    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
     482}
     483
     484
     485/** Opcode 0x0f 0x01 0xdf. */
     486FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
     487{
     488    IEMOP_MNEMONIC(invlpga, "invlpga");
     489    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
     490}
     491#else
    444492/** Opcode 0x0f 0x01 0xd9. */
    445493FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
    446494
    447 #ifdef VBOX_WITH_NESTED_HWVIRT
    448 /** Opcode 0x0f 0x01 0xda. */
    449 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
    450 {
    451     IEMOP_MNEMONIC(vmload, "vmload");
    452     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
    453 }
    454 
    455 
    456 /** Opcode 0x0f 0x01 0xdb. */
    457 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
    458 {
    459     IEMOP_MNEMONIC(vmsave, "vmsave");
    460     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
    461 }
    462 
    463 
    464 /** Opcode 0x0f 0x01 0xdc. */
    465 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
    466 {
    467     IEMOP_MNEMONIC(stgi, "stgi");
    468     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
    469 }
    470 
    471 
    472 /** Opcode 0x0f 0x01 0xdd. */
    473 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
    474 {
    475     IEMOP_MNEMONIC(clgi, "clgi");
    476     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
    477 }
    478 
    479 
    480 /** Opcode 0x0f 0x01 0xdf. */
    481 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
    482 {
    483     IEMOP_MNEMONIC(invlpga, "invlpga");
    484     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    485 }
    486 #else
    487495/** Opcode 0x0f 0x01 0xda. */
    488496FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r65938 r65989  
    39663966    Event.n.u32ErrorCode     = 0;
    39673967    hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */);
    3968 }
    3969 
    3970 
    3971 /**
    3972  * Emulates a simple MOV TPR (CR8) instruction, used for TPR patching on 32-bit
    3973  * guests. This simply looks up the patch record at EIP and does the required.
    3974  *
    3975  * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
    3976  * like how we want it to be (e.g. not followed by shr 4 as is usually done for
    3977  * TPR). See hmR3ReplaceTprInstr() for the details.
    3978  *
    3979  * @returns VBox status code.
    3980  * @retval VINF_SUCCESS if the access was handled successfully.
    3981  * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
    3982  * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
    3983  *
    3984  * @param   pVM         The cross context VM structure.
    3985  * @param   pVCpu       The cross context virtual CPU structure.
    3986  * @param   pCtx        Pointer to the guest-CPU context.
    3987  */
    3988 static int hmR0SvmEmulateMovTpr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    3989 {
    3990     Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
    3991 
    3992     /*
    3993      * We do this in a loop as we increment the RIP after a successful emulation
    3994      * and the new RIP may be a patched instruction which needs emulation as well.
    3995      */
    3996     bool fPatchFound = false;
    3997     for (;;)
    3998     {
    3999         bool    fPending;
    4000         uint8_t u8Tpr;
    4001 
    4002         PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
    4003         if (!pPatch)
    4004             break;
    4005 
    4006         fPatchFound = true;
    4007         switch (pPatch->enmType)
    4008         {
    4009             case HMTPRINSTR_READ:
    4010             {
    4011                 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
    4012                 AssertRC(rc);
    4013 
    4014                 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
    4015                 AssertRC(rc);
    4016                 pCtx->rip += pPatch->cbOp;
    4017                 break;
    4018             }
    4019 
    4020             case HMTPRINSTR_WRITE_REG:
    4021             case HMTPRINSTR_WRITE_IMM:
    4022             {
    4023                 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
    4024                 {
    4025                     uint32_t u32Val;
    4026                     int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
    4027                     AssertRC(rc);
    4028                     u8Tpr = u32Val;
    4029                 }
    4030                 else
    4031                     u8Tpr = (uint8_t)pPatch->uSrcOperand;
    4032 
    4033                 int rc2 = APICSetTpr(pVCpu, u8Tpr);
    4034                 AssertRC(rc2);
    4035                 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE);
    4036 
    4037                 pCtx->rip += pPatch->cbOp;
    4038                 break;
    4039             }
    4040 
    4041             default:
    4042                 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
    4043                 pVCpu->hm.s.u32HMError = pPatch->enmType;
    4044                 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
    4045         }
    4046     }
    4047 
    4048     if (fPatchFound)
    4049         return VINF_SUCCESS;
    4050     return VERR_NOT_FOUND;
    40513968}
    40523969
     
    52605177    STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);
    52615178
    5262     /* First check if this is a patched VMMCALL for mov TPR */
    5263     int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx);
    5264     if (rc == VINF_SUCCESS)
    5265     {
    5266         HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    5267         return VINF_SUCCESS;
    5268     }
    5269 
    5270     if (rc == VERR_NOT_FOUND)
    5271     {
    5272         if (pVCpu->hm.s.fHypercallsEnabled)
    5273         {
    5274             VBOXSTRICTRC rcStrict = GIMHypercall(pVCpu, pCtx);
    5275             if (RT_SUCCESS(VBOXSTRICTRC_VAL(rcStrict)))
    5276             {
    5277                 if (rcStrict == VINF_SUCCESS)
    5278                     hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3 /* cbInstr */);
    5279                 else
    5280                     Assert(   rcStrict == VINF_GIM_HYPERCALL_CONTINUING
    5281                            || rcStrict == VINF_GIM_R3_HYPERCALL);
    5282 
    5283                 /* If the hypercall changes anything other than guest's general-purpose registers,
    5284                    we would need to reload the guest changed bits here before VM-entry. */
    5285             }
    5286             rc = VBOXSTRICTRC_VAL(rcStrict);
    5287             HMSVM_CHECK_SINGLE_STEP(pVCpu, rc);
    5288         }
    5289         else
    5290             Log4(("hmR0SvmExitVmmCall: Hypercalls not enabled\n"));
    5291     }
    5292 
    5293     /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */
    5294     if (RT_FAILURE(rc))
    5295     {
    5296         hmR0SvmSetPendingXcptUD(pVCpu);
    5297         rc = VINF_SUCCESS;
    5298     }
    5299 
    5300     return rc;
     5179    bool fRipUpdated;
     5180    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fRipUpdated);
     5181    if (RT_SUCCESS(rcStrict))
     5182    {
     5183        if (!fRipUpdated)
     5184            hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 3 /* cbInstr */);
     5185
     5186        /* If the hypercall or TPR patching changes anything other than guest's general-purpose registers,
     5187           we would need to reload the guest changed bits here before VM-entry. */
     5188        return VBOXSTRICTRC_VAL(rcStrict);
     5189    }
     5190
     5191    hmR0SvmSetPendingXcptUD(pVCpu);
     5192    return VINF_SUCCESS;
    53015193}
    53025194
  • trunk/src/VBox/VMM/include/EMHandleRCTmpl.h

    r62478 r65989  
    237237        case VINF_GIM_R3_HYPERCALL:
    238238        {
     239            /* Currently hypercall instruction (vmcall/vmmcall) emulation is compiled
     240               only when Nested Hw. virt feature is enabled in IEM (for easier IEM backports). */
     241#ifdef VBOX_WITH_NESTED_HWVIRT
     242            rc = emR3ExecuteInstruction(pVM, pVCpu, "Hypercall");
     243            break;
     244#else
    239245            /** @todo IEM/REM need to handle VMCALL/VMMCALL, see
    240246             *        @bugref{7270#c168}. */
     
    259265            }
    260266            break;
     267#endif
    261268        }
    262269
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette