VirtualBox

Changeset 72451 in vbox for trunk


Ignore:
Timestamp:
Jun 5, 2018 2:35:15 PM (7 years ago)
Author:
vboxsync
Message:

IEM: Quick vmcall (VT-x) implementation, made vmmcall available when nested svm is disabled. Need this for NEM/win. bugref:9044

Location:
trunk/src/VBox/VMM
Files:
1 added
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/Makefile.kmk

    r72286 r72451  
    10771077IEMAllInstructions3DNow.cpp.o      IEMAllInstructions3DNow.cpp.obj \
    10781078IEMAllCImpl.cpp.o                  IEMAllCImpl.cpp.obj \
    1079 IEMAllCImplStrInstr.cpp.o          IEMAllCImplStrInstr.cpp.obj: IEMAll.o
     1079IEMAllCImplStrInstr.cpp.o          IEMAllCImplStrInstr.cpp.obj \
     1080IEMAllCImplSvmInstr.cpp.o          IEMAllCImplSvmInstr.cpp.obj \
     1081IEMAllCImplVmxInstr.cpp.o          IEMAllCImplVmxInstr.cpp.obj: IEMAll.o
    10801082
    10811083# Alias the NEM template to the objects where it is used:
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r72448 r72451  
    102102#include <VBox/vmm/em.h>
    103103#include <VBox/vmm/hm.h>
     104#include <VBox/vmm/gim.h>
    104105#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    105106# include <VBox/vmm/em.h>
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r72209 r72451  
    1616 */
    1717
    18 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    19 # include "IEMAllCImplSvmInstr.cpp.h"
    20 #endif
     18#include "IEMAllCImplSvmInstr.cpp.h"
     19#include "IEMAllCImplVmxInstr.cpp.h"
     20
    2121
    2222/** @name Misc Helpers
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h

    r72421 r72451  
    1717
    1818
     19#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     20
    1921/**
    2022 * Converts an IEM exception event type to an SVM event type.
     
    6668     */
    6769    int rc = PGMChangeMode(pVCpu, pCtx->cr0 | X86_CR0_PE, pCtx->cr4, pCtx->msrEFER);
    68 #ifdef IN_RING3
     70# ifdef IN_RING3
    6971    Assert(rc != VINF_PGM_CHANGE_MODE);
    70 #endif
     72# endif
    7173    AssertRCReturn(rc, rc);
    7274
     
    671673         * in iemSvmWorldSwitch() below.
    672674         */
    673 #if 0
     675# if 0
    674676        /** @todo @bugref{7243}: ASID based PGM TLB flushes. */
    675677        if (   pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE
     
    677679            || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)
    678680            PGMFlushTLB(pVCpu, pVmcbNstGst->u64CR3, true /* fGlobal */);
    679 #endif
     681# endif
    680682
    681683        /*
     
    891893        {
    892894            PSVMVMCBCTRL  pVmcbCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
    893 #ifdef IEM_WITH_CODE_TLB
     895# ifdef IEM_WITH_CODE_TLB
    894896            uint8_t const *pbInstrBuf = pVCpu->iem.s.pbInstrBuf;
    895897            uint8_t const  cbInstrBuf = pVCpu->iem.s.cbInstrBuf;
     
    898900                && cbInstrBuf > 0)
    899901                memcpy(&pVmcbCtrl->abInstr[0], pbInstrBuf, pVmcbCtrl->cbInstrFetched);
    900 #else
     902# else
    901903            uint8_t const cbOpcode    = pVCpu->iem.s.cbOpcode;
    902904            pVmcbCtrl->cbInstrFetched = RT_MIN(cbOpcode, SVM_CTRL_GUEST_INSTR_BYTES_MAX);
    903905            if (cbOpcode > 0)
    904906                memcpy(&pVmcbCtrl->abInstr[0], &pVCpu->iem.s.abOpcode[0], pVmcbCtrl->cbInstrFetched);
    905 #endif
     907# endif
    906908        }
    907909        if (u8Vector == X86_XCPT_BR)
     
    10521054IEM_CIMPL_DEF_0(iemCImpl_vmrun)
    10531055{
    1054 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     1056# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    10551057    RT_NOREF2(pVCpu, cbInstr);
    10561058    return VINF_EM_RAW_EMULATE_INSTR;
    1057 #else
     1059# else
    10581060    LogFlow(("iemCImpl_vmrun\n"));
    10591061    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    10821084    }
    10831085    return rcStrict;
    1084 #endif
    1085 }
    1086 
     1086# endif
     1087}
     1088
     1089#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
    10871090
    10881091/**
     
    11101113}
    11111114
     1115#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    11121116
    11131117/**
     
    11161120IEM_CIMPL_DEF_0(iemCImpl_vmload)
    11171121{
    1118 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     1122# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    11191123    RT_NOREF2(pVCpu, cbInstr);
    11201124    return VINF_EM_RAW_EMULATE_INSTR;
    1121 #else
     1125# else
    11221126    LogFlow(("iemCImpl_vmload\n"));
    11231127    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    11631167    }
    11641168    return rcStrict;
    1165 #endif
     1169# endif
    11661170}
    11671171
     
    11721176IEM_CIMPL_DEF_0(iemCImpl_vmsave)
    11731177{
    1174 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     1178# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    11751179    RT_NOREF2(pVCpu, cbInstr);
    11761180    return VINF_EM_RAW_EMULATE_INSTR;
    1177 #else
     1181# else
    11781182    LogFlow(("iemCImpl_vmsave\n"));
    11791183    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    12221226    }
    12231227    return rcStrict;
    1224 #endif
     1228# endif
    12251229}
    12261230
     
    12311235IEM_CIMPL_DEF_0(iemCImpl_clgi)
    12321236{
    1233 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     1237# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    12341238    RT_NOREF2(pVCpu, cbInstr);
    12351239    return VINF_EM_RAW_EMULATE_INSTR;
    1236 #else
     1240# else
    12371241    LogFlow(("iemCImpl_clgi\n"));
    12381242    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    12471251    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    12481252
    1249 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     1253#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    12501254    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
    1251 # else
     1255#  else
    12521256    return VINF_SUCCESS;
     1257#  endif
    12531258# endif
    1254 #endif
    12551259}
    12561260
     
    12611265IEM_CIMPL_DEF_0(iemCImpl_stgi)
    12621266{
    1263 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     1267# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    12641268    RT_NOREF2(pVCpu, cbInstr);
    12651269    return VINF_EM_RAW_EMULATE_INSTR;
    1266 #else
     1270# else
    12671271    LogFlow(("iemCImpl_stgi\n"));
    12681272    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     
    12771281    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    12781282
    1279 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     1283#  if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    12801284    return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);
    1281 # else
     1285#  else
    12821286    return VINF_SUCCESS;
     1287#  endif
    12831288# endif
    1284 #endif
    12851289}
    12861290
     
    12951299    RTGCPTR  const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    12961300    /** @todo PGM needs virtual ASID support. */
    1297 #if 0
     1301# if 0
    12981302    uint32_t const uAsid     = pCtx->ecx;
    1299 #endif
     1303# endif
    13001304
    13011305    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);
     
    13351339}
    13361340
     1341#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
     1342
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r72209 r72451  
    303303FNIEMOP_DEF(iemOp_Grp7_vmcall)
    304304{
    305     IEMOP_BITCH_ABOUT_STUB();
    306     return IEMOP_RAISE_INVALID_OPCODE();
     305    IEMOP_MNEMONIC(vmcall, "vmcall");
     306    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmcall);
    307307}
    308308
     
    432432
    433433
     434/** Opcode 0x0f 0x01 0xd8. */
    434435#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    435 /** Opcode 0x0f 0x01 0xd8. */
    436436FNIEMOP_DEF(iemOp_Grp7_Amd_vmrun)
    437437{
     
    439439    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmrun);
    440440}
     441#else
     442FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
     443#endif
    441444
    442445/** Opcode 0x0f 0x01 0xd9. */
     
    447450}
    448451
    449 
    450452/** Opcode 0x0f 0x01 0xda. */
     453#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    451454FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
    452455{
     
    454457    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
    455458}
     459#else
     460FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
     461#endif
    456462
    457463
    458464/** Opcode 0x0f 0x01 0xdb. */
     465#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    459466FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
    460467{
     
    462469    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
    463470}
     471#else
     472FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
     473#endif
    464474
    465475
    466476/** Opcode 0x0f 0x01 0xdc. */
     477#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    467478FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
    468479{
     
    470481    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
    471482}
     483#else
     484FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
     485#endif
    472486
    473487
    474488/** Opcode 0x0f 0x01 0xdd. */
     489#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    475490FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
    476491{
     
    478493    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
    479494}
     495#else
     496FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
     497#endif
    480498
    481499
    482500/** Opcode 0x0f 0x01 0xdf. */
     501#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    483502FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
    484503{
     
    486505    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    487506}
     507#else
     508FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
     509#endif
    488510
    489511
    490512/** Opcode 0x0f 0x01 0xde. */
     513#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    491514FNIEMOP_DEF(iemOp_Grp7_Amd_skinit)
    492515{
     
    495518}
    496519#else
    497 /** Opcode 0x0f 0x01 0xd8. */
    498 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
    499 
    500 /** Opcode 0x0f 0x01 0xd9. */
    501 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
    502 
    503 /** Opcode 0x0f 0x01 0xda. */
    504 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
    505 
    506 /** Opcode 0x0f 0x01 0xdb. */
    507 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
    508 
    509 /** Opcode 0x0f 0x01 0xdc. */
    510 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
    511 
    512 /** Opcode 0x0f 0x01 0xdd. */
    513 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
    514 
    515 /** Opcode 0x0f 0x01 0xdf. */
    516 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
    517 
    518 /** Opcode 0x0f 0x01 0xde. */
    519520FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
    520 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
     521#endif
     522
    521523
    522524/** Opcode 0x0f 0x01 /4. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette