VirtualBox

Changeset 66000 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Mar 8, 2017 8:29:40 PM (8 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: Preps for SVM vmrun/#VMEXIT impl.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r65989 r66000  
    558558
    559559
    560 
    561 /**
    562  * SVM nested-guest \#VMEXIT handler.
    563  *
    564  * @param   pVCpu       The cross context virtual CPU structure.
    565  * @param   uExitCode   The exit reason.
    566  */
    567 VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)
    568 {
    569     RT_NOREF2(pVCpu, uExitCode);
    570 }
    571 
    572 
    573560/**
    574561 * VMX nested-guest VM-exit handler.
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r65989 r66000  
    126126/**
    127127 * Performs the operations necessary that are part of the vmmcall instruction
    128  * execution for AMD-V.
     128 * execution in the guest.
    129129 *
    130130 * @returns Strict VBox status code (i.e. informational status codes too).
    131  *
    132131 * @retval  VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
    133132 *          update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
     
    173172}
    174173
     174
     175/**
     176 * Performs the operations necessary that are part of the vmrun instruction
     177 * execution in the guest.
     178 *
     179 * @returns Strict VBox status code (i.e. informational status codes too).
     180 *
     181 * @param   pVCpu               The cross context virtual CPU structure.
     182 * @param   pCtx                Pointer to the guest-CPU context.
     183 */
     184VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx)
     185{
     186    RT_NOREF2(pVCpu, pCtx);
     187
     188    return VERR_NOT_IMPLEMENTED;
     189}
     190
     191
     192/**
     193 * SVM nested-guest \#VMEXIT handler.
     194 *
     195 * @returns Strict VBox status code.
     196 * @param   pVCpu       The cross context virtual CPU structure.
     197 * @param   pCtx        The guest-CPU context.
     198 * @param   uExitCode   The exit reason.
     199 * @param   uExitInfo1  The exit info. 1 field.
     200 * @param   uExitInfo1  The exit info. 2 field.
     201 */
     202VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
     203                                             uint64_t uExitInfo2)
     204{
     205    if (   CPUMIsGuestInNestedHwVirtMode(pCtx)
     206        || iExitCode == SVM_EXIT_INVALID)
     207    {
     208        RT_NOREF(pVCpu);
     209
     210        pCtx->hwvirt.svm.fGif = 0;
     211
     212        /** @todo implement #VMEXIT. */
     213
     214        return VINF_SUCCESS;
     215    }
     216    else
     217        Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%RI64 uExitInfo1=%RU64 uExitInfo2=%RU64\n", iExitCode,
     218             uExitInfo1, uExitInfo2));
     219
     220    return VERR_SVM_IPE_5;
     221}
     222
     223
     224/**
     225 * Peforms the functions of a VMRUN instruction.
     226 *
     227 * @returns Strict VBox status code.
     228 * @param   pVCpu       The cross context virtual CPU structure.
     229 * @param   pCtx        The guest-CPU context.
     230 */
     231VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmRun(PVMCPU pVCpu, PCPUMCTX pCtx)
     232{
     233    RT_NOREF2(pVCpu, pCtx);
     234    return VERR_NOT_IMPLEMENTED;
     235}
     236
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r65934 r66000  
    1495414954#ifdef VBOX_WITH_NESTED_HWVIRT
    1495514955/**
     14956 * Checks if IEM is in the process of delivering an event (interrupt or
     14957 * exception).
     14958 *
     14959 * @returns true if it's raising an interrupt or exception, false otherwise.
     14960 * @param   pVCpu       The cross context virtual CPU structure.
     14961 */
     14962VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
     14963{
     14964    return pVCpu->iem.s.cXcptRecursions > 0;
     14965}
     14966
     14967
     14968/**
    1495614969 * Interface for HM and EM to emulate the STGI instruction.
    1495714970 * 
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r65989 r66000  
    58775877#ifdef VBOX_WITH_NESTED_HWVIRT
    58785878/**
     5879 * Implements 'VMRUN'.
     5880 */
     5881IEM_CIMPL_DEF_0(iemCImpl_vmrun)
     5882{
     5883    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5884    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
     5885
     5886    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     5887    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     5888        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     5889    {
     5890        Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
     5891        return iemRaiseGeneralProtectionFault0(pVCpu);
     5892    }
     5893
     5894#ifndef IN_RC
     5895    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     5896    {
     5897        Log(("vmrun: Guest intercept -> VMexit\n"));
     5898        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5899    }
     5900#endif
     5901
     5902    /** @todo think - I probably need to map both the HSAVE area page and the
     5903     *        guest VMCB via iemMemPageMap here and do the copying? */
     5904    pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
     5905    void *pvVmcb;
     5906    PGMPAGEMAPLOCK PgLockVmcb;
     5907    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
     5908    if (rcStrict == VINF_SUCCESS)
     5909        return HMSvmVmrun(pVCpu, pCtx);
     5910    RT_NOREF(cbInstr);
     5911    return rcStrict;
     5912}
     5913
     5914
     5915/**
    58795916 * Implements 'VMMCALL'.
    58805917 */
    58815918IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
    58825919{
    5883     /*
    5884      * We do not check for presence of SVM/AMD-V here as the KVM GIM provider
    5885      * might patch in an invalid vmmcall instruction with an Intel vmcall
    5886      * instruction.
    5887      */
     5920    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5921#ifndef IN_RC
     5922    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     5923    {
     5924        Log(("vmrun: Guest intercept -> VMexit\n"));
     5925        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5926    }
     5927#endif
     5928
    58885929    bool fUpdatedRipAndRF;
    5889     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    58905930    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
    58915931    if (RT_SUCCESS(rcStrict))
     
    59115951    {
    59125952        Log(("vmload: Guest intercept -> VMexit\n"));
    5913         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD);
    5914         return VINF_EM_RESCHEDULE;
     5953        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59155954    }
    59165955#endif
    59175956
    59185957    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    5919     if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
    5920     {
    5921         Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
     5958    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     5959        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     5960    {
     5961        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59225962        return iemRaiseGeneralProtectionFault0(pVCpu);
    59235963    }
     
    59626002    {
    59636003        Log(("vmsave: Guest intercept -> VMexit\n"));
    5964         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE);
    5965         return VINF_EM_RESCHEDULE;
     6004        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59666005    }
    59676006#endif
    59686007
    59696008    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    5970     if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
    5971     {
    5972         Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
     6009    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     6010        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     6011    {
     6012        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59736013        return iemRaiseGeneralProtectionFault0(pVCpu);
    59746014    }
     
    60136053    {
    60146054        Log(("clgi: Guest intercept -> VMexit\n"));
    6015         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI);
    6016         return VINF_EM_RESCHEDULE;
     6055        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60176056    }
    60186057#endif
     
    60356074    {
    60366075        Log2(("stgi: Guest intercept -> VMexit\n"));
    6037         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI);
    6038         return VINF_EM_RESCHEDULE;
     6076        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60396077    }
    60406078#endif
     
    60576095    {
    60586096        Log2(("invlpga: Guest intercept -> VMexit\n"));
    6059         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA);
    6060         return VINF_EM_RESCHEDULE;
     6097        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60616098    }
    60626099#endif
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r65989 r66000  
    439439
    440440
     441#ifdef VBOX_WITH_NESTED_HWVIRT
     442/** Opcode 0x0f 0x01 0xd8. */
     443FNIEMOP_DEF(iemOp_Grp7_Amd_vmrun)
     444{
     445    IEMOP_MNEMONIC(vmrun, "vmrun");
     446    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmrun);
     447}
     448
     449/** Opcode 0x0f 0x01 0xd9. */
     450FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
     451{
     452    IEMOP_MNEMONIC(vmmcall, "vmmcall");
     453    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
     454}
     455
     456
     457/** Opcode 0x0f 0x01 0xda. */
     458FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
     459{
     460    IEMOP_MNEMONIC(vmload, "vmload");
     461    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
     462}
     463
     464
     465/** Opcode 0x0f 0x01 0xdb. */
     466FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
     467{
     468    IEMOP_MNEMONIC(vmsave, "vmsave");
     469    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
     470}
     471
     472
     473/** Opcode 0x0f 0x01 0xdc. */
     474FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
     475{
     476    IEMOP_MNEMONIC(stgi, "stgi");
     477    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
     478}
     479
     480
     481/** Opcode 0x0f 0x01 0xdd. */
     482FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
     483{
     484    IEMOP_MNEMONIC(clgi, "clgi");
     485    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
     486}
     487
     488
     489/** Opcode 0x0f 0x01 0xdf. */
     490FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
     491{
     492    IEMOP_MNEMONIC(invlpga, "invlpga");
     493    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
     494}
     495#else
    441496/** Opcode 0x0f 0x01 0xd8. */
    442497FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
    443498
    444 #ifdef VBOX_WITH_NESTED_HWVIRT
    445 /** Opcode 0x0f 0x01 0xd9. */
    446 FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
    447 {
    448     IEMOP_MNEMONIC(vmmcall, "vmmcall");
    449     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
    450 }
    451 
    452 
    453 /** Opcode 0x0f 0x01 0xda. */
    454 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
    455 {
    456     IEMOP_MNEMONIC(vmload, "vmload");
    457     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
    458 }
    459 
    460 
    461 /** Opcode 0x0f 0x01 0xdb. */
    462 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
    463 {
    464     IEMOP_MNEMONIC(vmsave, "vmsave");
    465     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
    466 }
    467 
    468 
    469 /** Opcode 0x0f 0x01 0xdc. */
    470 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
    471 {
    472     IEMOP_MNEMONIC(stgi, "stgi");
    473     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
    474 }
    475 
    476 
    477 /** Opcode 0x0f 0x01 0xdd. */
    478 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
    479 {
    480     IEMOP_MNEMONIC(clgi, "clgi");
    481     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
    482 }
    483 
    484 
    485 /** Opcode 0x0f 0x01 0xdf. */
    486 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
    487 {
    488     IEMOP_MNEMONIC(invlpga, "invlpga");
    489     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    490 }
    491 #else
    492499/** Opcode 0x0f 0x01 0xd9. */
    493500FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
    494 
    495501/** Opcode 0x0f 0x01 0xda. */
    496502FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r65909 r66000  
    239239    .Guest.hwvirt.svm.u16InterceptWrDRx   resw    1
    240240    .Guest.hwvirt.svm.fGif                resb    1
     241    .Guest.hwvirt.svm.abPadding           resb    3
     242    .Guest.hwvirt.svm.GCPhysNstGstVmcb    resq    1
    241243    alignb 64
    242244
     
    510512    .Hyper.hwvirt.svm.u16InterceptWrDRx   resw    1
    511513    .Hyper.hwvirt.svm.fGif                resb    1
     514    .Hyper.hwvirt.svm.abPadding           resb    3
     515    .Hyper.hwvirt.svm.GCPhysNstGstVmcb    resq    1
    512516    alignb 64
    513517
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette