VirtualBox

Changeset 66000 in vbox for trunk


Ignore:
Timestamp:
Mar 8, 2017 8:29:40 PM (8 years ago)
Author:
vboxsync
Message:

VMM: Nested Hw.virt: Preps for SVM vmrun/#VMEXIT impl.

Location:
trunk
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/vmm/cpum.h

    r65933 r66000  
    3232#include <VBox/vmm/stam.h>
    3333#include <VBox/vmm/vmapi.h>
     34#include <VBox/vmm/hm_svm.h>
    3435
    3536RT_C_DECLS_BEGIN
     
    13471348}
    13481349
     1350/**
     1351 * Checks if the guest is currently in nested hardware-virtualized
     1352 * guest mode.
     1353 *
     1354 * @returns true if in nested-guest mode, false otherwise.
     1355 * @param   pCtx        Pointer to the context.
     1356 */
     1357DECLINLINE(bool) CPUMIsGuestInNestedHwVirtMode(PCPUMCTX pCtx)
     1358{
     1359    /*
     1360     * With SVM, the VMRUN intercept is a pre-requisite to entering guest-mode.
     1361     * See AMD spec., 15.5 "VMRUN instruction" subsection "Canonicalization and Consistency Checks".
     1362     */
     1363    return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VMRUN);
     1364    /** @todo Intel VMX.  */
     1365}
    13491366#endif /* VBOX_WITHOUT_UNNAMED_UNIONS */
    13501367
  • trunk/include/VBox/vmm/cpum.mac

    r65909 r66000  
    266266    .hwvirt.svm.u16InterceptWrDRx   resw    1
    267267    .hwvirt.svm.fGif                resb    1
     268    .hwvirt.svm.abPadding           resb    3
     269    .hwvirt.svm.GCPhysNstGstVmcb    resq    1
    268270    alignb 64
    269271endstruc
  • trunk/include/VBox/vmm/cpumctx.h

    r65910 r66000  
    462462                /** 756 - Global interrupt flag. */
    463463                uint8_t            fGif;
    464                 /** 757 - Padding. */
    465                 uint8_t            abPadding[11];
     464                uint8_t            abPadding[3];
     465                /** 760 - Nested-guest VMCB. */
     466                RTGCPHYS           GCPhysNstGstVmcb;
    466467            } svm;
    467468#if 0
  • trunk/include/VBox/vmm/hm.h

    r65989 r66000  
    149149
    150150VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfRipUpdated);
    151 
    152 VMM_INT_DECL(void)              HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode);
    153 VMM_INT_DECL(void)              HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
     151VMM_INT_DECL(VBOXSTRICTRC)      HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx);
     152VMM_INT_DECL(VBOXSTRICTRC)      HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
     153                                                  uint64_t uExitInfo2);
     154VMM_INT_DECL(void)              HMVmxNstGstVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
    154155
    155156#ifndef IN_RC
  • trunk/include/VBox/vmm/hm_svm.h

    r65933 r66000  
    630630AssertCompileSize(SVMAVICPHYS, 8);
    631631
     632
     633/**
     634 * SVM VMCB control area.
     635 */
     636#pragma pack(1)
     637typedef struct
     638{
     639    /** Offset 0x00 - Intercept reads of CR0-CR15. */
     640    uint16_t    u16InterceptRdCRx;
     641    /** Offset 0x02 - Intercept writes to CR0-CR15. */
     642    uint16_t    u16InterceptWrCRx;
     643    /** Offset 0x04 - Intercept reads of DR0-DR15. */
     644    uint16_t    u16InterceptRdDRx;
     645    /** Offset 0x06 - Intercept writes to DR0-DR15. */
     646    uint16_t    u16InterceptWrDRx;
     647    /** Offset 0x08 - Intercept exception vectors 0-31. */
     648    uint32_t    u32InterceptException;
     649    /** Offset 0x0c - Intercept control. */
     650    uint64_t    u64InterceptCtrl;
     651    /** Offset 0x14-0x3f - Reserved. */
     652    uint8_t     u8Reserved[0x3c - 0x14];
     653    /** Offset 0x3c - PAUSE filter threshold.  */
     654    uint16_t    u16PauseFilterThreshold;
     655    /** Offset 0x3e - PAUSE intercept filter count. */
     656    uint16_t    u16PauseFilterCount;
     657    /** Offset 0x40 - Physical address of IOPM. */
     658    uint64_t    u64IOPMPhysAddr;
     659    /** Offset 0x48 - Physical address of MSRPM. */
     660    uint64_t    u64MSRPMPhysAddr;
     661    /** Offset 0x50 - TSC Offset. */
     662    uint64_t    u64TSCOffset;
     663    /** Offset 0x58 - TLB control field. */
     664    SVMTLBCTRL  TLBCtrl;
     665    /** Offset 0x60 - Interrupt control field. */
     666    SVMINTCTRL  IntCtrl;
     667    /** Offset 0x68 - Interrupt shadow. */
     668    uint64_t    u64IntShadow;
     669    /** Offset 0x70 - Exit code. */
     670    uint64_t    u64ExitCode;
     671    /** Offset 0x78 - Exit info 1. */
     672    uint64_t    u64ExitInfo1;
     673    /** Offset 0x80 - Exit info 2. */
     674    uint64_t    u64ExitInfo2;
     675    /** Offset 0x88 - Exit Interrupt info. */
     676    SVMEVENT    ExitIntInfo;
     677    /** Offset 0x90 - Nested Paging. */
     678    SVMNPCTRL   NestedPaging;
     679    /** Offset 0x98 - AVIC APIC BAR.  */
     680    SVMAVIC     AvicBar;
     681    /** Offset 0xa0-0xa7 - Reserved. */
     682    uint8_t     u8Reserved2[0xA8-0xA0];
     683    /** Offset 0xa8 - Event injection. */
     684    SVMEVENT    EventInject;
     685    /** Offset 0xb0 - Host CR3 for nested paging. */
     686    uint64_t    u64NestedPagingCR3;
     687    /** Offset 0xb8 - LBR Virtualization. */
     688    uint64_t    u64LBRVirt;
     689    /** Offset 0xc0 - VMCB Clean Bits. */
     690    uint64_t    u64VmcbCleanBits;
     691    /** Offset 0xc8 - Next sequential instruction pointer. */
     692    uint64_t    u64NextRIP;
     693    /** Offset 0xd0 - Number of bytes fetched. */
     694    uint8_t     cbInstrFetched;
     695    /** Offset 0xd1 - Fetched bytes. */
     696    uint8_t     abInstr[15];
     697    /** Offset 0xe0 - AVIC APIC_BACKING_PAGE pointer. */
     698    SVMAVIC     AvicBackingPagePtr;
     699    /** Offset 0xe8-0xef - Reserved. */
     700    uint8_t     u8Reserved3[0xF0 - 0xE8];
     701    /** Offset 0xf0 - AVIC LOGICAL_TABLE pointer. */
     702    SVMAVIC     AvicLogicalTablePtr;
     703    /** Offset 0xf8 - AVIC PHYSICAL_TABLE pointer. */
     704    SVMAVICPHYS AvicPhysicalTablePtr;
     705} SVMVMCBCTRL;
     706#pragma pack()
     707/** Pointer to the SVMVMCBSTATESAVE structure. */
     708typedef SVMVMCBCTRL *PSVMVMCBCTRL;
     709/** Pointer to a const SVMVMCBSTATESAVE structure. */
     710typedef const SVMVMCBCTRL *PCSVMVMCBCTRL;
     711AssertCompileSize(SVMVMCBCTRL, 0x100);
     712AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptRdCRx,       0x00);
     713AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptWrCRx,       0x02);
     714AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptRdDRx,       0x04);
     715AssertCompileMemberOffset(SVMVMCBCTRL, u16InterceptWrDRx,       0x06);
     716AssertCompileMemberOffset(SVMVMCBCTRL, u32InterceptException,   0x08);
     717AssertCompileMemberOffset(SVMVMCBCTRL, u64InterceptCtrl,        0x0c);
     718AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved,              0x14);
     719AssertCompileMemberOffset(SVMVMCBCTRL, u16PauseFilterThreshold, 0x3c);
     720AssertCompileMemberOffset(SVMVMCBCTRL, u16PauseFilterCount,     0x3e);
     721AssertCompileMemberOffset(SVMVMCBCTRL, u64IOPMPhysAddr,         0x40);
     722AssertCompileMemberOffset(SVMVMCBCTRL, u64MSRPMPhysAddr,        0x48);
     723AssertCompileMemberOffset(SVMVMCBCTRL, u64TSCOffset,            0x50);
     724AssertCompileMemberOffset(SVMVMCBCTRL, TLBCtrl,                 0x58);
     725AssertCompileMemberOffset(SVMVMCBCTRL, IntCtrl,                 0x60);
     726AssertCompileMemberOffset(SVMVMCBCTRL, u64IntShadow,            0x68);
     727AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitCode,             0x70);
     728AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitInfo1,            0x78);
     729AssertCompileMemberOffset(SVMVMCBCTRL, u64ExitInfo2,            0x80);
     730AssertCompileMemberOffset(SVMVMCBCTRL, ExitIntInfo,             0x88);
     731AssertCompileMemberOffset(SVMVMCBCTRL, NestedPaging,            0x90);
     732AssertCompileMemberOffset(SVMVMCBCTRL, AvicBar,                 0x98);
     733AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved2,             0xa0);
     734AssertCompileMemberOffset(SVMVMCBCTRL, EventInject,             0xa8);
     735AssertCompileMemberOffset(SVMVMCBCTRL, u64NestedPagingCR3,      0xb0);
     736AssertCompileMemberOffset(SVMVMCBCTRL, u64LBRVirt,              0xb8);
     737AssertCompileMemberOffset(SVMVMCBCTRL, u64VmcbCleanBits,        0xc0);
     738AssertCompileMemberOffset(SVMVMCBCTRL, u64NextRIP,              0xc8);
     739AssertCompileMemberOffset(SVMVMCBCTRL, cbInstrFetched,          0xd0);
     740AssertCompileMemberOffset(SVMVMCBCTRL, abInstr,                 0xd1);
     741AssertCompileMemberOffset(SVMVMCBCTRL, AvicBackingPagePtr,      0xe0);
     742AssertCompileMemberOffset(SVMVMCBCTRL, u8Reserved3,             0xe8);
     743AssertCompileMemberOffset(SVMVMCBCTRL, AvicLogicalTablePtr,     0xf0);
     744AssertCompileMemberOffset(SVMVMCBCTRL, AvicPhysicalTablePtr,    0xf8);
     745
     746/**
     747 * SVM VMCB state save area.
     748 */
     749typedef struct
     750{
     751    /** Offset 0x400 - Guest ES register + hidden parts. */
     752    SVMSEL      ES;
     753    /** Offset 0x410 - Guest CS register + hidden parts. */
     754    SVMSEL      CS;
     755    /** Offset 0x420 - Guest SS register + hidden parts. */
     756    SVMSEL      SS;
     757    /** Offset 0x430 - Guest DS register + hidden parts. */
     758    SVMSEL      DS;
     759    /** Offset 0x440 - Guest FS register + hidden parts. */
     760    SVMSEL      FS;
     761    /** Offset 0x450 - Guest GS register + hidden parts. */
     762    SVMSEL      GS;
     763    /** Offset 0x460 - Guest GDTR register. */
     764    SVMGDTR     GDTR;
     765    /** Offset 0x470 - Guest LDTR register + hidden parts. */
     766    SVMSEL      LDTR;
     767    /** Offset 0x480 - Guest IDTR register. */
     768    SVMIDTR     IDTR;
     769    /** Offset 0x490 - Guest TR register + hidden parts. */
     770    SVMSEL      TR;
     771    /** Offset 0x4A0-0x4CA - Reserved. */
     772    uint8_t     u8Reserved4[0x4CB-0x4A0];
     773    /** Offset 0x4CB - CPL. */
     774    uint8_t     u8CPL;
     775    /** Offset 0x4CC-0x4CF - Reserved. */
     776    uint8_t     u8Reserved5[0x4D0-0x4CC];
     777    /** Offset 0x4D0 - EFER. */
     778    uint64_t    u64EFER;
     779    /** Offset 0x4D8-0x547 - Reserved. */
     780    uint8_t     u8Reserved6[0x548-0x4D8];
     781    /** Offset 0x548 - CR4. */
     782    uint64_t    u64CR4;
     783    /** Offset 0x550 - CR3. */
     784    uint64_t    u64CR3;
     785    /** Offset 0x558 - CR0. */
     786    uint64_t    u64CR0;
     787    /** Offset 0x560 - DR7. */
     788    uint64_t    u64DR7;
     789    /** Offset 0x568 - DR6. */
     790    uint64_t    u64DR6;
     791    /** Offset 0x570 - RFLAGS. */
     792    uint64_t    u64RFlags;
     793    /** Offset 0x578 - RIP. */
     794    uint64_t    u64RIP;
     795    /** Offset 0x580-0x5D7 - Reserved. */
     796    uint8_t     u8Reserved7[0x5D8-0x580];
     797    /** Offset 0x5D8 - RSP. */
     798    uint64_t    u64RSP;
     799    /** Offset 0x5E0-0x5F7 - Reserved. */
     800    uint8_t     u8Reserved8[0x5F8-0x5E0];
     801    /** Offset 0x5F8 - RAX. */
     802    uint64_t    u64RAX;
     803    /** Offset 0x600 - STAR. */
     804    uint64_t    u64STAR;
     805    /** Offset 0x608 - LSTAR. */
     806    uint64_t    u64LSTAR;
     807    /** Offset 0x610 - CSTAR. */
     808    uint64_t    u64CSTAR;
     809    /** Offset 0x618 - SFMASK. */
     810    uint64_t    u64SFMASK;
     811    /** Offset 0x620 - KernelGSBase. */
     812    uint64_t    u64KernelGSBase;
     813    /** Offset 0x628 - SYSENTER_CS. */
     814    uint64_t    u64SysEnterCS;
     815    /** Offset 0x630 - SYSENTER_ESP. */
     816    uint64_t    u64SysEnterESP;
     817    /** Offset 0x638 - SYSENTER_EIP. */
     818    uint64_t    u64SysEnterEIP;
     819    /** Offset 0x640 - CR2. */
     820    uint64_t    u64CR2;
     821    /** Offset 0x648-0x667 - Reserved. */
     822    uint8_t     u8Reserved9[0x668-0x648];
     823    /** Offset 0x668 - G_PAT. */
     824    uint64_t    u64GPAT;
     825    /** Offset 0x670 - DBGCTL. */
     826    uint64_t    u64DBGCTL;
     827    /** Offset 0x678 - BR_FROM. */
     828    uint64_t    u64BR_FROM;
     829    /** Offset 0x680 - BR_TO. */
     830    uint64_t    u64BR_TO;
     831    /** Offset 0x688 - LASTEXCPFROM. */
     832    uint64_t    u64LASTEXCPFROM;
     833    /** Offset 0x690 - LASTEXCPTO. */
     834    uint64_t    u64LASTEXCPTO;
     835} SVMVMCBSTATESAVE;
     836/** Pointer to the SVMVMCBSTATESAVE structure. */
     837typedef SVMVMCBSTATESAVE *PSVMVMCBSTATESAVE;
     838/** Pointer to a const SVMVMCBSTATESAVE structure. */
     839typedef const SVMVMCBSTATESAVE *PCSVMVMCBSTATESAVE;
     840AssertCompileSize(SVMVMCBSTATESAVE, 0x298);
     841AssertCompileMemberOffset(SVMVMCBSTATESAVE, ES,              0x400 - 0x400);
     842AssertCompileMemberOffset(SVMVMCBSTATESAVE, CS,              0x410 - 0x400);
     843AssertCompileMemberOffset(SVMVMCBSTATESAVE, SS,              0x420 - 0x400);
     844AssertCompileMemberOffset(SVMVMCBSTATESAVE, DS,              0x430 - 0x400);
     845AssertCompileMemberOffset(SVMVMCBSTATESAVE, FS,              0x440 - 0x400);
     846AssertCompileMemberOffset(SVMVMCBSTATESAVE, GS,              0x450 - 0x400);
     847AssertCompileMemberOffset(SVMVMCBSTATESAVE, GDTR,            0x460 - 0x400);
     848AssertCompileMemberOffset(SVMVMCBSTATESAVE, LDTR,            0x470 - 0x400);
     849AssertCompileMemberOffset(SVMVMCBSTATESAVE, IDTR,            0x480 - 0x400);
     850AssertCompileMemberOffset(SVMVMCBSTATESAVE, TR,              0x490 - 0x400);
     851AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved4,     0x4a0 - 0x400);
     852AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8CPL,           0x4cb - 0x400);
     853AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved5,     0x4cc - 0x400);
     854AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64EFER,         0x4d0 - 0x400);
     855AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved6,     0x4d8 - 0x400);
     856AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR4,          0x548 - 0x400);
     857AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR3,          0x550 - 0x400);
     858AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR0,          0x558 - 0x400);
     859AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DR7,          0x560 - 0x400);
     860AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DR6,          0x568 - 0x400);
     861AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RFlags,       0x570 - 0x400);
     862AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RIP,          0x578 - 0x400);
     863AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved7,     0x580 - 0x400);
     864AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RSP,          0x5d8 - 0x400);
     865AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved8,     0x5e0 - 0x400);
     866AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64RAX,          0x5f8 - 0x400);
     867AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64STAR,         0x600 - 0x400);
     868AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LSTAR,        0x608 - 0x400);
     869AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CSTAR,        0x610 - 0x400);
     870AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SFMASK,       0x618 - 0x400);
     871AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64KernelGSBase, 0x620 - 0x400);
     872AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterCS,   0x628 - 0x400);
     873AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterESP,  0x630 - 0x400);
     874AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64SysEnterEIP,  0x638 - 0x400);
     875AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64CR2,          0x640 - 0x400);
     876AssertCompileMemberOffset(SVMVMCBSTATESAVE, u8Reserved9,     0x648 - 0x400);
     877AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64GPAT,         0x668 - 0x400);
     878AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64DBGCTL,       0x670 - 0x400);
     879AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_FROM,      0x678 - 0x400);
     880AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64BR_TO,        0x680 - 0x400);
     881AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LASTEXCPFROM, 0x688 - 0x400);
     882AssertCompileMemberOffset(SVMVMCBSTATESAVE, u64LASTEXCPTO,   0x690 - 0x400);
     883
    632884/**
    633885 * SVM VM Control Block. (VMCB)
     
    636888typedef struct SVMVMCB
    637889{
    638     /** Control Area. */
    639     struct
    640     {
    641         /** Offset 0x00 - Intercept reads of CR0-CR15. */
    642         uint16_t    u16InterceptRdCRx;
    643         /** Offset 0x02 - Intercept writes to CR0-CR15. */
    644         uint16_t    u16InterceptWrCRx;
    645         /** Offset 0x04 - Intercept reads of DR0-DR15. */
    646         uint16_t    u16InterceptRdDRx;
    647         /** Offset 0x06 - Intercept writes to DR0-DR15. */
    648         uint16_t    u16InterceptWrDRx;
    649         /** Offset 0x08 - Intercept exception vectors 0-31. */
    650         uint32_t    u32InterceptException;
    651         /** Offset 0x0C - Intercept control. */
    652         uint64_t    u64InterceptCtrl;
    653         /** Offset 0x14-0x3F - Reserved. */
    654         uint8_t     u8Reserved[0x3c - 0x14];
    655         /** Offset 0x3c - PAUSE filter threshold.  */
    656         uint16_t    u16PauseFilterThreshold;
    657         /** Offset 0x3e - PAUSE intercept filter count. */
    658         uint16_t    u16PauseFilterCount;
    659         /** Offset 0x40 - Physical address of IOPM. */
    660         uint64_t    u64IOPMPhysAddr;
    661         /** Offset 0x48 - Physical address of MSRPM. */
    662         uint64_t    u64MSRPMPhysAddr;
    663         /** Offset 0x50 - TSC Offset. */
    664         uint64_t    u64TSCOffset;
    665         /** Offset 0x58 - TLB control field. */
    666         SVMTLBCTRL  TLBCtrl;
    667         /** Offset 0x60 - Interrupt control field. */
    668         SVMINTCTRL  IntCtrl;
    669         /** Offset 0x68 - Interrupt shadow. */
    670         uint64_t    u64IntShadow;
    671         /** Offset 0x70 - Exit code. */
    672         uint64_t    u64ExitCode;
    673         /** Offset 0x78 - Exit info 1. */
    674         uint64_t    u64ExitInfo1;
    675         /** Offset 0x80 - Exit info 2. */
    676         uint64_t    u64ExitInfo2;
    677         /** Offset 0x88 - Exit Interrupt info. */
    678         SVMEVENT    ExitIntInfo;
    679         /** Offset 0x90 - Nested Paging. */
    680         SVMNPCTRL   NestedPaging;
    681         /** Offset 0x98 - AVIC APIC BAR.  */
    682         SVMAVIC     AvicBar;
    683         /** Offset 0xA0-0xA7 - Reserved. */
    684         uint8_t     u8Reserved2[0xA8-0xA0];
    685         /** Offset 0xA8 - Event injection. */
    686         SVMEVENT    EventInject;
    687         /** Offset 0xB0 - Host CR3 for nested paging. */
    688         uint64_t    u64NestedPagingCR3;
    689         /** Offset 0xB8 - LBR Virtualization. */
    690         uint64_t    u64LBRVirt;
    691         /** Offset 0xC0 - VMCB Clean Bits. */
    692         uint64_t    u64VmcbCleanBits;
    693         /** Offset 0xC8 - Next sequential instruction pointer. */
    694         uint64_t    u64NextRIP;
    695         /** Offset 0xD0 - Number of bytes fetched. */
    696         uint8_t     cbInstrFetched;
    697         /** Offset 0xD1 - Fetched bytes. */
    698         uint8_t     abInstr[15];
    699         /** Offset 0xE0 - AVIC APIC_BACKING_PAGE pointer. */
    700         SVMAVIC     AvicBackingPagePtr;
    701         /** Offset 0xE8-0xEF - Reserved. */
    702         uint8_t     u8Reserved3[0xF0 - 0xE8];
    703         /** Offset 0xF0 - AVIC LOGICAL_TABLE pointer. */
    704         SVMAVIC     AvicLogicalTablePtr;
    705         /** Offset 0xF8 - AVIC PHYSICAL_TABLE pointer. */
    706         SVMAVICPHYS AvicPhysicalTablePtr;
    707     } ctrl;
    708 
     890    /** Offset 0x00 - Control area. */
     891    SVMVMCBCTRL ctrl;
    709892    /** Offset 0x100-0x3FF - Reserved. */
    710893    uint8_t     u8Reserved3[0x400-0x100];
    711 
    712     /** State Save Area. Starts at offset 0x400. */
    713     struct
    714     {
    715         /** Offset 0x400 - Guest ES register + hidden parts. */
    716         SVMSEL      ES;
    717         /** Offset 0x410 - Guest CS register + hidden parts. */
    718         SVMSEL      CS;
    719         /** Offset 0x420 - Guest SS register + hidden parts. */
    720         SVMSEL      SS;
    721         /** Offset 0x430 - Guest DS register + hidden parts. */
    722         SVMSEL      DS;
    723         /** Offset 0x440 - Guest FS register + hidden parts. */
    724         SVMSEL      FS;
    725         /** Offset 0x450 - Guest GS register + hidden parts. */
    726         SVMSEL      GS;
    727         /** Offset 0x460 - Guest GDTR register. */
    728         SVMGDTR     GDTR;
    729         /** Offset 0x470 - Guest LDTR register + hidden parts. */
    730         SVMSEL      LDTR;
    731         /** Offset 0x480 - Guest IDTR register. */
    732         SVMIDTR     IDTR;
    733         /** Offset 0x490 - Guest TR register + hidden parts. */
    734         SVMSEL      TR;
    735         /** Offset 0x4A0-0x4CA - Reserved. */
    736         uint8_t     u8Reserved4[0x4CB-0x4A0];
    737         /** Offset 0x4CB - CPL. */
    738         uint8_t     u8CPL;
    739         /** Offset 0x4CC-0x4CF - Reserved. */
    740         uint8_t     u8Reserved5[0x4D0-0x4CC];
    741         /** Offset 0x4D0 - EFER. */
    742         uint64_t    u64EFER;
    743         /** Offset 0x4D8-0x547 - Reserved. */
    744         uint8_t     u8Reserved6[0x548-0x4D8];
    745         /** Offset 0x548 - CR4. */
    746         uint64_t    u64CR4;
    747         /** Offset 0x550 - CR3. */
    748         uint64_t    u64CR3;
    749         /** Offset 0x558 - CR0. */
    750         uint64_t    u64CR0;
    751         /** Offset 0x560 - DR7. */
    752         uint64_t    u64DR7;
    753         /** Offset 0x568 - DR6. */
    754         uint64_t    u64DR6;
    755         /** Offset 0x570 - RFLAGS. */
    756         uint64_t    u64RFlags;
    757         /** Offset 0x578 - RIP. */
    758         uint64_t    u64RIP;
    759         /** Offset 0x580-0x5D7 - Reserved. */
    760         uint8_t     u8Reserved7[0x5D8-0x580];
    761         /** Offset 0x5D8 - RSP. */
    762         uint64_t    u64RSP;
    763         /** Offset 0x5E0-0x5F7 - Reserved. */
    764         uint8_t     u8Reserved8[0x5F8-0x5E0];
    765         /** Offset 0x5F8 - RAX. */
    766         uint64_t    u64RAX;
    767         /** Offset 0x600 - STAR. */
    768         uint64_t    u64STAR;
    769         /** Offset 0x608 - LSTAR. */
    770         uint64_t    u64LSTAR;
    771         /** Offset 0x610 - CSTAR. */
    772         uint64_t    u64CSTAR;
    773         /** Offset 0x618 - SFMASK. */
    774         uint64_t    u64SFMASK;
    775         /** Offset 0x620 - KernelGSBase. */
    776         uint64_t    u64KernelGSBase;
    777         /** Offset 0x628 - SYSENTER_CS. */
    778         uint64_t    u64SysEnterCS;
    779         /** Offset 0x630 - SYSENTER_ESP. */
    780         uint64_t    u64SysEnterESP;
    781         /** Offset 0x638 - SYSENTER_EIP. */
    782         uint64_t    u64SysEnterEIP;
    783         /** Offset 0x640 - CR2. */
    784         uint64_t    u64CR2;
    785         /** Offset 0x648-0x667 - Reserved. */
    786         uint8_t     u8Reserved9[0x668-0x648];
    787         /** Offset 0x668 - G_PAT. */
    788         uint64_t    u64GPAT;
    789         /** Offset 0x670 - DBGCTL. */
    790         uint64_t    u64DBGCTL;
    791         /** Offset 0x678 - BR_FROM. */
    792         uint64_t    u64BR_FROM;
    793         /** Offset 0x680 - BR_TO. */
    794         uint64_t    u64BR_TO;
    795         /** Offset 0x688 - LASTEXCPFROM. */
    796         uint64_t    u64LASTEXCPFROM;
    797         /** Offset 0x690 - LASTEXCPTO. */
    798         uint64_t    u64LASTEXCPTO;
    799     } guest;
    800 
     894    /** Offset 0x400 - State save area. */
     895    SVMVMCBSTATESAVE guest;
    801896    /** Offset 0x698-0xFFF- Reserved. */
    802897    uint8_t     u8Reserved10[0x1000-0x698];
     
    808903typedef const SVMVMCB *PCSVMVMCB;
    809904AssertCompileMemberOffset(SVMVMCB, ctrl, 0x00);
    810 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdCRx, 0x00);
    811 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptWrCRx, 0x02);
    812 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptRdDRx, 0x04);
    813 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptWrDRx, 0x06);
    814 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptException, 0x08);
    815 AssertCompileMemberOffset(SVMVMCB, ctrl.u64InterceptCtrl, 0x0C);
    816 AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved, 0x14);
    817 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterThreshold, 0x3c);
    818 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterCount, 0x3e);
    819 AssertCompileMemberOffset(SVMVMCB, ctrl.u64IOPMPhysAddr, 0x40);
    820 AssertCompileMemberOffset(SVMVMCB, ctrl.u64MSRPMPhysAddr, 0x48);
    821 AssertCompileMemberOffset(SVMVMCB, ctrl.u64TSCOffset, 0x50);
    822 AssertCompileMemberOffset(SVMVMCB, ctrl.TLBCtrl, 0x58);
    823 AssertCompileMemberOffset(SVMVMCB, ctrl.IntCtrl, 0x60);
    824 AssertCompileMemberOffset(SVMVMCB, ctrl.u64IntShadow, 0x68);
    825 AssertCompileMemberOffset(SVMVMCB, ctrl.u64ExitCode, 0x70);
    826 AssertCompileMemberOffset(SVMVMCB, ctrl.u64ExitInfo1, 0x78);
    827 AssertCompileMemberOffset(SVMVMCB, ctrl.u64ExitInfo2, 0x80);
    828 AssertCompileMemberOffset(SVMVMCB, ctrl.ExitIntInfo, 0x88);
    829 AssertCompileMemberOffset(SVMVMCB, ctrl.NestedPaging, 0x90);
    830 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicBar, 0x98);
    831 AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved2, 0xA0);
    832 AssertCompileMemberOffset(SVMVMCB, ctrl.EventInject, 0xA8);
    833 AssertCompileMemberOffset(SVMVMCB, ctrl.u64NestedPagingCR3, 0xB0);
    834 AssertCompileMemberOffset(SVMVMCB, ctrl.u64LBRVirt, 0xB8);
    835 AssertCompileMemberOffset(SVMVMCB, ctrl.u64VmcbCleanBits, 0xC0);
    836 AssertCompileMemberOffset(SVMVMCB, ctrl.u64NextRIP, 0xC8);
    837 AssertCompileMemberOffset(SVMVMCB, ctrl.cbInstrFetched, 0xD0);
    838 AssertCompileMemberOffset(SVMVMCB, ctrl.abInstr, 0xD1);
    839 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicBackingPagePtr, 0xE0);
    840 AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved3, 0xE8);
    841 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicLogicalTablePtr, 0xF0);
    842 AssertCompileMemberOffset(SVMVMCB, ctrl.AvicPhysicalTablePtr, 0xF8);
    843905AssertCompileMemberOffset(SVMVMCB, u8Reserved3, 0x100);
    844906AssertCompileMemberOffset(SVMVMCB, guest, 0x400);
    845 AssertCompileMemberOffset(SVMVMCB, guest.ES, 0x400);
    846 AssertCompileMemberOffset(SVMVMCB, guest.CS, 0x410);
    847 AssertCompileMemberOffset(SVMVMCB, guest.SS, 0x420);
    848 AssertCompileMemberOffset(SVMVMCB, guest.DS, 0x430);
    849 AssertCompileMemberOffset(SVMVMCB, guest.FS, 0x440);
    850 AssertCompileMemberOffset(SVMVMCB, guest.GS, 0x450);
    851 AssertCompileMemberOffset(SVMVMCB, guest.GDTR, 0x460);
    852 AssertCompileMemberOffset(SVMVMCB, guest.LDTR, 0x470);
    853 AssertCompileMemberOffset(SVMVMCB, guest.IDTR, 0x480);
    854 AssertCompileMemberOffset(SVMVMCB, guest.TR, 0x490);
    855 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved4, 0x4A0);
    856 AssertCompileMemberOffset(SVMVMCB, guest.u8CPL, 0x4CB);
    857 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved5, 0x4CC);
    858 AssertCompileMemberOffset(SVMVMCB, guest.u64EFER, 0x4D0);
    859 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved6, 0x4D8);
    860 AssertCompileMemberOffset(SVMVMCB, guest.u64CR4, 0x548);
    861 AssertCompileMemberOffset(SVMVMCB, guest.u64CR3, 0x550);
    862 AssertCompileMemberOffset(SVMVMCB, guest.u64CR0, 0x558);
    863 AssertCompileMemberOffset(SVMVMCB, guest.u64DR7, 0x560);
    864 AssertCompileMemberOffset(SVMVMCB, guest.u64DR6, 0x568);
    865 AssertCompileMemberOffset(SVMVMCB, guest.u64RFlags, 0x570);
    866 AssertCompileMemberOffset(SVMVMCB, guest.u64RIP, 0x578);
    867 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved7, 0x580);
    868 AssertCompileMemberOffset(SVMVMCB, guest.u64RSP, 0x5D8);
    869 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved8, 0x5E0);
    870 AssertCompileMemberOffset(SVMVMCB, guest.u64RAX, 0x5F8);
    871 AssertCompileMemberOffset(SVMVMCB, guest.u64STAR, 0x600);
    872 AssertCompileMemberOffset(SVMVMCB, guest.u64LSTAR, 0x608);
    873 AssertCompileMemberOffset(SVMVMCB, guest.u64CSTAR, 0x610);
    874 AssertCompileMemberOffset(SVMVMCB, guest.u64SFMASK, 0x618);
    875 AssertCompileMemberOffset(SVMVMCB, guest.u64KernelGSBase, 0x620);
    876 AssertCompileMemberOffset(SVMVMCB, guest.u64SysEnterCS, 0x628);
    877 AssertCompileMemberOffset(SVMVMCB, guest.u64SysEnterESP, 0x630);
    878 AssertCompileMemberOffset(SVMVMCB, guest.u64SysEnterEIP, 0x638);
    879 AssertCompileMemberOffset(SVMVMCB, guest.u64CR2, 0x640);
    880 AssertCompileMemberOffset(SVMVMCB, guest.u8Reserved9, 0x648);
    881 AssertCompileMemberOffset(SVMVMCB, guest.u64GPAT, 0x668);
    882 AssertCompileMemberOffset(SVMVMCB, guest.u64DBGCTL, 0x670);
    883 AssertCompileMemberOffset(SVMVMCB, guest.u64BR_FROM, 0x678);
    884 AssertCompileMemberOffset(SVMVMCB, guest.u64BR_TO, 0x680);
    885 AssertCompileMemberOffset(SVMVMCB, guest.u64LASTEXCPFROM, 0x688);
    886 AssertCompileMemberOffset(SVMVMCB, guest.u64LASTEXCPTO, 0x690);
    887907AssertCompileMemberOffset(SVMVMCB, u8Reserved10, 0x698);
    888908AssertCompileSize(SVMVMCB, 0x1000);
  • trunk/include/VBox/vmm/iem.h

    r65934 r66000  
    117117VMM_INT_DECL(void)          IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr);
    118118VMM_INT_DECL(void)          IEMTlbInvalidateAllPhysical(PVMCPU pVCpu);
    119 
     119#ifdef VBOX_WITH_NESTED_HWVIRT
     120VMM_INT_DECL(bool)          IEMIsRaisingIntOrXcpt(PVMCPU pVCpu);
     121#endif
    120122
    121123/** @name Given Instruction Interpreters
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r65989 r66000  
    558558
    559559
    560 
    561 /**
    562  * SVM nested-guest \#VMEXIT handler.
    563  *
    564  * @param   pVCpu       The cross context virtual CPU structure.
    565  * @param   uExitCode   The exit reason.
    566  */
    567 VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)
    568 {
    569     RT_NOREF2(pVCpu, uExitCode);
    570 }
    571 
    572 
    573560/**
    574561 * VMX nested-guest VM-exit handler.
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r65989 r66000  
    126126/**
    127127 * Performs the operations necessary that are part of the vmmcall instruction
    128  * execution for AMD-V.
     128 * execution in the guest.
    129129 *
    130130 * @returns Strict VBox status code (i.e. informational status codes too).
    131  *
    132131 * @retval  VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
    133132 *          update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
     
    173172}
    174173
     174
     175/**
     176 * Performs the operations necessary that are part of the vmrun instruction
     177 * execution in the guest.
     178 *
     179 * @returns Strict VBox status code (i.e. informational status codes too).
     180 *
     181 * @param   pVCpu               The cross context virtual CPU structure.
     182 * @param   pCtx                Pointer to the guest-CPU context.
     183 */
     184VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx)
     185{
     186    RT_NOREF2(pVCpu, pCtx);
     187
     188    return VERR_NOT_IMPLEMENTED;
     189}
     190
     191
     192/**
     193 * SVM nested-guest \#VMEXIT handler.
     194 *
     195 * @returns Strict VBox status code.
     196 * @param   pVCpu       The cross context virtual CPU structure.
     197 * @param   pCtx        The guest-CPU context.
     198 * @param   uExitCode   The exit reason.
     199 * @param   uExitInfo1  The exit info. 1 field.
     200 * @param   uExitInfo1  The exit info. 2 field.
     201 */
     202VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1,
     203                                             uint64_t uExitInfo2)
     204{
     205    if (   CPUMIsGuestInNestedHwVirtMode(pCtx)
     206        || iExitCode == SVM_EXIT_INVALID)
     207    {
     208        RT_NOREF(pVCpu);
     209
     210        pCtx->hwvirt.svm.fGif = 0;
     211
     212        /** @todo implement #VMEXIT. */
     213
     214        return VINF_SUCCESS;
     215    }
     216    else
     217        Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%RI64 uExitInfo1=%RU64 uExitInfo2=%RU64\n", iExitCode,
     218             uExitInfo1, uExitInfo2));
     219
     220    return VERR_SVM_IPE_5;
     221}
     222
     223
     224/**
     225 * Peforms the functions of a VMRUN instruction.
     226 *
     227 * @returns Strict VBox status code.
     228 * @param   pVCpu       The cross context virtual CPU structure.
     229 * @param   pCtx        The guest-CPU context.
     230 */
     231VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmRun(PVMCPU pVCpu, PCPUMCTX pCtx)
     232{
     233    RT_NOREF2(pVCpu, pCtx);
     234    return VERR_NOT_IMPLEMENTED;
     235}
     236
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r65934 r66000  
    1495414954#ifdef VBOX_WITH_NESTED_HWVIRT
    1495514955/**
     14956 * Checks if IEM is in the process of delivering an event (interrupt or
     14957 * exception).
     14958 *
     14959 * @returns true if it's raising an interrupt or exception, false otherwise.
     14960 * @param   pVCpu       The cross context virtual CPU structure.
     14961 */
     14962VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu)
     14963{
     14964    return pVCpu->iem.s.cXcptRecursions > 0;
     14965}
     14966
     14967
     14968/**
    1495614969 * Interface for HM and EM to emulate the STGI instruction.
    1495714970 * 
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r65989 r66000  
    58775877#ifdef VBOX_WITH_NESTED_HWVIRT
    58785878/**
     5879 * Implements 'VMRUN'.
     5880 */
     5881IEM_CIMPL_DEF_0(iemCImpl_vmrun)
     5882{
     5883    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5884    IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);
     5885
     5886    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
     5887    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     5888        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     5889    {
     5890        Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
     5891        return iemRaiseGeneralProtectionFault0(pVCpu);
     5892    }
     5893
     5894#ifndef IN_RC
     5895    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN))
     5896    {
     5897        Log(("vmrun: Guest intercept -> VMexit\n"));
     5898        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5899    }
     5900#endif
     5901
     5902    /** @todo think - I probably need to map both the HSAVE area page and the
     5903     *        guest VMCB via iemMemPageMap here and do the copying? */
     5904    pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb;
     5905    void *pvVmcb;
     5906    PGMPAGEMAPLOCK PgLockVmcb;
     5907    VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb);
     5908    if (rcStrict == VINF_SUCCESS)
     5909        return HMSvmVmrun(pVCpu, pCtx);
     5910    RT_NOREF(cbInstr);
     5911    return rcStrict;
     5912}
     5913
     5914
     5915/**
    58795916 * Implements 'VMMCALL'.
    58805917 */
    58815918IEM_CIMPL_DEF_0(iemCImpl_vmmcall)
    58825919{
    5883     /*
    5884      * We do not check for presence of SVM/AMD-V here as the KVM GIM provider
    5885      * might patch in an invalid vmmcall instruction with an Intel vmcall
    5886      * instruction.
    5887      */
     5920    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5921#ifndef IN_RC
     5922    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL))
     5923    {
     5924        Log(("vmrun: Guest intercept -> VMexit\n"));
     5925        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
     5926    }
     5927#endif
     5928
    58885929    bool fUpdatedRipAndRF;
    5889     PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
    58905930    VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF);
    58915931    if (RT_SUCCESS(rcStrict))
     
    59115951    {
    59125952        Log(("vmload: Guest intercept -> VMexit\n"));
    5913         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD);
    5914         return VINF_EM_RESCHEDULE;
     5953        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59155954    }
    59165955#endif
    59175956
    59185957    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    5919     if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
    5920     {
    5921         Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
     5958    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     5959        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     5960    {
     5961        Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59225962        return iemRaiseGeneralProtectionFault0(pVCpu);
    59235963    }
     
    59626002    {
    59636003        Log(("vmsave: Guest intercept -> VMexit\n"));
    5964         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE);
    5965         return VINF_EM_RESCHEDULE;
     6004        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    59666005    }
    59676006#endif
    59686007
    59696008    RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax;
    5970     if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
    5971     {
    5972         Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb));
     6009    if (   (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)
     6010        || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))
     6011    {
     6012        Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));
    59736013        return iemRaiseGeneralProtectionFault0(pVCpu);
    59746014    }
     
    60136053    {
    60146054        Log(("clgi: Guest intercept -> VMexit\n"));
    6015         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI);
    6016         return VINF_EM_RESCHEDULE;
     6055        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60176056    }
    60186057#endif
     
    60356074    {
    60366075        Log2(("stgi: Guest intercept -> VMexit\n"));
    6037         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI);
    6038         return VINF_EM_RESCHEDULE;
     6076        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60396077    }
    60406078#endif
     
    60576095    {
    60586096        Log2(("invlpga: Guest intercept -> VMexit\n"));
    6059         HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA);
    6060         return VINF_EM_RESCHEDULE;
     6097        return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
    60616098    }
    60626099#endif
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r65989 r66000  
    439439
    440440
     441#ifdef VBOX_WITH_NESTED_HWVIRT
     442/** Opcode 0x0f 0x01 0xd8. */
     443FNIEMOP_DEF(iemOp_Grp7_Amd_vmrun)
     444{
     445    IEMOP_MNEMONIC(vmrun, "vmrun");
     446    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmrun);
     447}
     448
     449/** Opcode 0x0f 0x01 0xd9. */
     450FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
     451{
     452    IEMOP_MNEMONIC(vmmcall, "vmmcall");
     453    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
     454}
     455
     456
     457/** Opcode 0x0f 0x01 0xda. */
     458FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
     459{
     460    IEMOP_MNEMONIC(vmload, "vmload");
     461    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
     462}
     463
     464
     465/** Opcode 0x0f 0x01 0xdb. */
     466FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
     467{
     468    IEMOP_MNEMONIC(vmsave, "vmsave");
     469    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
     470}
     471
     472
     473/** Opcode 0x0f 0x01 0xdc. */
     474FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
     475{
     476    IEMOP_MNEMONIC(stgi, "stgi");
     477    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
     478}
     479
     480
     481/** Opcode 0x0f 0x01 0xdd. */
     482FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
     483{
     484    IEMOP_MNEMONIC(clgi, "clgi");
     485    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
     486}
     487
     488
     489/** Opcode 0x0f 0x01 0xdf. */
     490FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
     491{
     492    IEMOP_MNEMONIC(invlpga, "invlpga");
     493    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
     494}
     495#else
    441496/** Opcode 0x0f 0x01 0xd8. */
    442497FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
    443498
    444 #ifdef VBOX_WITH_NESTED_HWVIRT
    445 /** Opcode 0x0f 0x01 0xd9. */
    446 FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)
    447 {
    448     IEMOP_MNEMONIC(vmmcall, "vmmcall");
    449     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);
    450 }
    451 
    452 
    453 /** Opcode 0x0f 0x01 0xda. */
    454 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)
    455 {
    456     IEMOP_MNEMONIC(vmload, "vmload");
    457     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);
    458 }
    459 
    460 
    461 /** Opcode 0x0f 0x01 0xdb. */
    462 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)
    463 {
    464     IEMOP_MNEMONIC(vmsave, "vmsave");
    465     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);
    466 }
    467 
    468 
    469 /** Opcode 0x0f 0x01 0xdc. */
    470 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
    471 {
    472     IEMOP_MNEMONIC(stgi, "stgi");
    473     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
    474 }
    475 
    476 
    477 /** Opcode 0x0f 0x01 0xdd. */
    478 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
    479 {
    480     IEMOP_MNEMONIC(clgi, "clgi");
    481     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
    482 }
    483 
    484 
    485 /** Opcode 0x0f 0x01 0xdf. */
    486 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)
    487 {
    488     IEMOP_MNEMONIC(invlpga, "invlpga");
    489     return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);
    490 }
    491 #else
    492499/** Opcode 0x0f 0x01 0xd9. */
    493500FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
    494 
    495501/** Opcode 0x0f 0x01 0xda. */
    496502FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r65909 r66000  
    239239    .Guest.hwvirt.svm.u16InterceptWrDRx   resw    1
    240240    .Guest.hwvirt.svm.fGif                resb    1
     241    .Guest.hwvirt.svm.abPadding           resb    3
     242    .Guest.hwvirt.svm.GCPhysNstGstVmcb    resq    1
    241243    alignb 64
    242244
     
    510512    .Hyper.hwvirt.svm.u16InterceptWrDRx   resw    1
    511513    .Hyper.hwvirt.svm.fGif                resb    1
     514    .Hyper.hwvirt.svm.abPadding           resb    3
     515    .Hyper.hwvirt.svm.GCPhysNstGstVmcb    resq    1
    512516    alignb 64
    513517
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette