VirtualBox

Changeset 65904 in vbox for trunk


Ignore:
Timestamp:
Mar 1, 2017 10:21:38 AM (8 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
113697
Message:

VMM: Nested Hw.virt: Started with tweaking the AMD bits and laying the groundwork.

Location:
trunk
Files:
20 edited

Legend:

Unmodified
Added
Removed
  • trunk/Config.kmk

    r65732 r65904  
    397397# Enables the third step using IEM (the interpreter).
    398398VBOX_WITH_3RD_IEM_STEP = 1
     399# Enables nested hardware virtualization support (mainly for IEM)
     400VBOX_WITH_NESTED_HWVIRT = 1
    399401## @}
    400402
  • trunk/include/VBox/vmm/cpum.h

    r65763 r65904  
    10251025    uint32_t        fLeakyFxSR : 1;
    10261026
     1027    /** AMD64: Supports AMD SVM. */
     1028    uint32_t        fSvm : 1;
     1029
    10271030    /** Alignment padding / reserved for future use. */
    1028     uint32_t        fPadding : 27;
     1031    uint32_t        fPadding : 26;
    10291032    uint32_t        auPadding[3];
    10301033} CPUMFEATURES;
     
    12541257}
    12551258
     1259/**
     1260 * Checks if the guest has the specified ctrl/instruction
     1261 * intercept active.
     1262 *
     1263 * @returns @c true if in intercept is set, @c false otherwise.
     1264 * @param   pCtx          Pointer to the context.
     1265 * @param   Intercept     The SVM control/instruction intercept,
     1266 *                        see SVM_CTRL_INTERCEPT_*.
     1267 */
     1268DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCPUMCTX pCtx, uint64_t fIntercept)
     1269{
     1270    return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & fIntercept);
     1271}
     1272
     1273/**
     1274 * Checks if the guest has the specified CR read intercept
     1275 * active.
     1276 *
     1277 * @returns @c true if in intercept is set, @c false otherwise.
     1278 * @param   pCtx          Pointer to the context.
     1279 * @param   uCr           The CR register number (0 to 15).
     1280 */
     1281DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
     1282{
     1283    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdCRx & (1 << uCr));
     1284}
     1285
     1286/**
     1287 * Checks if the guest has the specified CR write intercept
     1288 * active.
     1289 *
     1290 * @returns @c true if in intercept is set, @c false otherwise.
     1291 * @param   pCtx          Pointer to the context.
     1292 * @param   uCr           The CR register number (0 to 15).
     1293 */
     1294DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr)
     1295{
     1296    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrCRx & (1 << uCr));
     1297}
     1298
     1299/**
     1300 * Checks if the guest has the specified DR read intercept
     1301 * active.
     1302 *
     1303 * @returns @c true if in intercept is set, @c false otherwise.
     1304 * @param   pCtx    Pointer to the context.
     1305 * @param   uDr     The DR register number (0 to 15).
     1306 */
     1307DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
     1308{
     1309    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdDRx & (1 << uDr));
     1310}
     1311
     1312/**
     1313 * Checks if the guest has the specified DR write intercept
     1314 * active.
     1315 *
     1316 * @returns @c true if in intercept is set, @c false otherwise.
     1317 * @param   pCtx    Pointer to the context.
     1318 * @param   uDr     The DR register number (0 to 15).
     1319 */
     1320DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr)
     1321{
     1322    return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrDRx & (1 << uDr));
     1323}
     1324
     1325/**
     1326 * Checks if the guest has the specified exception
     1327 * intercept active.
     1328 *
     1329 * @returns true if in intercept is active, false otherwise.
     1330 * @param   pCtx        Pointer to the context.
     1331 * @param   enmXcpt     The exception.
     1332 */
     1333DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCPUMCTX pCtx, X86XCPT enmXcpt)
     1334{
     1335    return RT_BOOL(pCtx->hwvirt.svm.u32InterceptXcpt & enmXcpt);
     1336}
     1337
    12561338#endif /* VBOX_WITHOUT_UNNAMED_UNIONS */
    12571339
  • trunk/include/VBox/vmm/cpum.mac

    r64663 r65904  
    253253    .pXStateRC      RTRCPTR_RES 1
    254254    .aoffXState         resw    64
     255    alignb 8
     256    .hwvirt.svm.uMsrHSavePa         resq    1
     257    .hwvirt.svm.u64InterceptCtrl    resq    1
     258    .hwvirt.svm.u32InterceptXcpt    resd    1
     259    .hwvirt.svm.u16InterceptRdCRx   resw    1
     260    .hwvirt.svm.u16InterceptWrCRx   resw    1
     261    .hwvirt.svm.u16InterceptRdDRx   resw    1
     262    .hwvirt.svm.u16InterceptWrDRx   resw    1
     263    .hwvirt.svm.fGif                resb    1
    255264    alignb 64
    256265endstruc
  • trunk/include/VBox/vmm/cpumctx.h

    r64663 r65904  
    431431    uint16_t                    aoffXState[64];
    432432
    433     /** Size padding. */
    434     uint32_t        au32SizePadding[HC_ARCH_BITS == 32 ? 13 : 11];
     433    /** 724 - Size padding. */
     434    uint32_t                    u32Padding;
     435
     436    /** 728 - Hardware virtualization state.   */
     437    struct
     438    {
     439        union   /* no tag! */
     440        {
     441            struct
     442            {
     443                /** 728 - MSR holding physical address of the Guest's 'host-state'. */
     444                uint64_t            uMsrHSavePa;
     445
     446                /** @name Cache of the nested-guest VMCB controls.
     447                 * @{ */
     448                /** 736 - Control intercepts. */
     449                uint64_t            u64InterceptCtrl;
     450                /** 744 - Exception intercepts. */
     451                uint32_t            u32InterceptXcpt;
     452                /** 748 - CR0-CR15 read intercepts. */
     453                uint16_t            u16InterceptRdCRx;
     454                /** 750 - CR0-CR15 write intercepts. */
     455                uint16_t            u16InterceptWrCRx;
     456                /** 752 - DR0-DR15 read intercepts. */
     457                uint16_t            u16InterceptRdDRx;
     458                /** 754 - DR0-DR15 write intercepts. */
     459                uint16_t            u16InterceptWrDRx;
     460                /** @} */
     461
     462                /** 756 - Global interrupt flag. */
     463                uint8_t            fGif;
     464                /** 757 - Padding. */
     465                uint8_t            abPadding[11];
     466            } svm;
     467            struct
     468            {
     469            } vmx;
     470        } CPUM_UNION_NM(s);
     471    } hwvirt;
     472    /** @} */
    435473} CPUMCTX;
    436474#pragma pack()
     
    485523AssertCompileMemberOffset(CPUMCTX,                  pXStateRC, HC_ARCH_BITS == 64 ? 592 : 584);
    486524AssertCompileMemberOffset(CPUMCTX,                 aoffXState, HC_ARCH_BITS == 64 ? 596 : 588);
     525AssertCompileMemberOffset(CPUMCTX, hwvirt, 728);
     526AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa,       728);
     527AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u64InterceptCtrl,  736);
     528AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u32InterceptXcpt,  744);
     529AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdCRx, 748);
     530AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrCRx, 750);
     531AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdDRx, 752);
     532AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrDRx, 754);
     533AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif,              756);
     534
    487535AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs);
    488536AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.)  r0);
  • trunk/include/VBox/vmm/hm.h

    r62476 r65904  
    147147VMM_INT_DECL(void)              HMHypercallsEnable(PVMCPU pVCpu);
    148148VMM_INT_DECL(void)              HMHypercallsDisable(PVMCPU pVCpu);
     149
     150VMM_INT_DECL(void)              HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode);
     151VMM_INT_DECL(void)              HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason);
    149152
    150153#ifndef IN_RC
  • trunk/include/VBox/vmm/hm_svm.h

    r62476 r65904  
    301301/** @} */
    302302
    303 /** @name SVMVMCB.ctrl.u32InterceptCtrl1
    304  * @{
    305  */
    306 /** 0 Intercept INTR (physical maskable interrupt). */
    307 #define SVM_CTRL1_INTERCEPT_INTR              RT_BIT(0)
    308 /** 1 Intercept NMI. */
    309 #define SVM_CTRL1_INTERCEPT_NMI               RT_BIT(1)
    310 /** 2 Intercept SMI. */
    311 #define SVM_CTRL1_INTERCEPT_SMI               RT_BIT(2)
    312 /** 3 Intercept INIT. */
    313 #define SVM_CTRL1_INTERCEPT_INIT              RT_BIT(3)
    314 /** 4 Intercept VINTR (virtual maskable interrupt). */
    315 #define SVM_CTRL1_INTERCEPT_VINTR             RT_BIT(4)
    316 /** 5 Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */
    317 #define SVM_CTRL1_INTERCEPT_CR0               RT_BIT(5)
    318 /** 6 Intercept reads of IDTR. */
    319 #define SVM_CTRL1_INTERCEPT_IDTR_READS        RT_BIT(6)
    320 /** 7 Intercept reads of GDTR. */
    321 #define SVM_CTRL1_INTERCEPT_GDTR_READS        RT_BIT(7)
    322 /** 8 Intercept reads of LDTR. */
    323 #define SVM_CTRL1_INTERCEPT_LDTR_READS        RT_BIT(8)
    324 /** 9 Intercept reads of TR. */
    325 #define SVM_CTRL1_INTERCEPT_TR_READS          RT_BIT(9)
    326 /** 10 Intercept writes of IDTR. */
    327 #define SVM_CTRL1_INTERCEPT_IDTR_WRITES       RT_BIT(10)
    328 /** 11 Intercept writes of GDTR. */
    329 #define SVM_CTRL1_INTERCEPT_GDTR_WRITES       RT_BIT(11)
    330 /** 12 Intercept writes of LDTR. */
    331 #define SVM_CTRL1_INTERCEPT_LDTR_WRITES       RT_BIT(12)
    332 /** 13 Intercept writes of TR. */
    333 #define SVM_CTRL1_INTERCEPT_TR_WRITES         RT_BIT(13)
    334 /** 14 Intercept RDTSC instruction. */
    335 #define SVM_CTRL1_INTERCEPT_RDTSC             RT_BIT(14)
    336 /** 15 Intercept RDPMC instruction. */
    337 #define SVM_CTRL1_INTERCEPT_RDPMC             RT_BIT(15)
    338 /** 16 Intercept PUSHF instruction. */
    339 #define SVM_CTRL1_INTERCEPT_PUSHF             RT_BIT(16)
    340 /** 17 Intercept POPF instruction. */
    341 #define SVM_CTRL1_INTERCEPT_POPF              RT_BIT(17)
    342 /** 18 Intercept CPUID instruction. */
    343 #define SVM_CTRL1_INTERCEPT_CPUID             RT_BIT(18)
    344 /** 19 Intercept RSM instruction. */
    345 #define SVM_CTRL1_INTERCEPT_RSM               RT_BIT(19)
    346 /** 20 Intercept IRET instruction. */
    347 #define SVM_CTRL1_INTERCEPT_IRET              RT_BIT(20)
    348 /** 21 Intercept INTn instruction. */
    349 #define SVM_CTRL1_INTERCEPT_INTN              RT_BIT(21)
    350 /** 22 Intercept INVD instruction. */
    351 #define SVM_CTRL1_INTERCEPT_INVD              RT_BIT(22)
    352 /** 23 Intercept PAUSE instruction. */
    353 #define SVM_CTRL1_INTERCEPT_PAUSE             RT_BIT(23)
    354 /** 24 Intercept HLT instruction. */
    355 #define SVM_CTRL1_INTERCEPT_HLT               RT_BIT(24)
    356 /** 25 Intercept INVLPG instruction. */
    357 #define SVM_CTRL1_INTERCEPT_INVLPG            RT_BIT(25)
    358 /** 26 Intercept INVLPGA instruction. */
    359 #define SVM_CTRL1_INTERCEPT_INVLPGA           RT_BIT(26)
    360 /** 27 IOIO_PROT Intercept IN/OUT accesses to selected ports. */
    361 #define SVM_CTRL1_INTERCEPT_INOUT_BITMAP      RT_BIT(27)
    362 /** 28 MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */
    363 #define SVM_CTRL1_INTERCEPT_MSR_SHADOW        RT_BIT(28)
    364 /** 29 Intercept task switches. */
    365 #define SVM_CTRL1_INTERCEPT_TASK_SWITCH       RT_BIT(29)
    366 /** 30 FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */
    367 #define SVM_CTRL1_INTERCEPT_FERR_FREEZE       RT_BIT(30)
    368 /** 31 Intercept shutdown events. */
    369 #define SVM_CTRL1_INTERCEPT_SHUTDOWN          RT_BIT(31)
    370 /** @} */
    371 
    372 
    373 /** @name SVMVMCB.ctrl.u32InterceptCtrl2
    374  * @{
    375  */
    376 /** 0 Intercept VMRUN instruction. */
    377 #define SVM_CTRL2_INTERCEPT_VMRUN             RT_BIT(0)
    378 /** 1 Intercept VMMCALL instruction. */
    379 #define SVM_CTRL2_INTERCEPT_VMMCALL           RT_BIT(1)
    380 /** 2 Intercept VMLOAD instruction. */
    381 #define SVM_CTRL2_INTERCEPT_VMLOAD            RT_BIT(2)
    382 /** 3 Intercept VMSAVE instruction. */
    383 #define SVM_CTRL2_INTERCEPT_VMSAVE            RT_BIT(3)
    384 /** 4 Intercept STGI instruction. */
    385 #define SVM_CTRL2_INTERCEPT_STGI              RT_BIT(4)
    386 /** 5 Intercept CLGI instruction. */
    387 #define SVM_CTRL2_INTERCEPT_CLGI              RT_BIT(5)
    388 /** 6 Intercept SKINIT instruction. */
    389 #define SVM_CTRL2_INTERCEPT_SKINIT            RT_BIT(6)
    390 /** 7 Intercept RDTSCP instruction. */
    391 #define SVM_CTRL2_INTERCEPT_RDTSCP            RT_BIT(7)
    392 /** 8 Intercept ICEBP instruction. */
    393 #define SVM_CTRL2_INTERCEPT_ICEBP             RT_BIT(8)
    394 /** 9 Intercept WBINVD instruction. */
    395 #define SVM_CTRL2_INTERCEPT_WBINVD            RT_BIT(9)
    396 /** 10 Intercept MONITOR instruction. */
    397 #define SVM_CTRL2_INTERCEPT_MONITOR           RT_BIT(10)
    398 /** 11 Intercept MWAIT instruction unconditionally. */
    399 #define SVM_CTRL2_INTERCEPT_MWAIT             RT_BIT(11)
    400 /** 12 Intercept MWAIT instruction when armed. */
    401 #define SVM_CTRL2_INTERCEPT_MWAIT_ARMED       RT_BIT(12)
    402 /** 13 Intercept XSETBV instruction. */
    403 #define SVM_CTRL2_INTERCEPT_XSETBV            RT_BIT(13)
     303
     304/** @name SVMVMCB.ctrl.u64InterceptCtrl
     305 * @{
     306 */
     307/** Intercept INTR (physical maskable interrupt). */
     308#define SVM_CTRL_INTERCEPT_INTR               RT_BIT_64(0)
     309/** Intercept NMI. */
     310#define SVM_CTRL_INTERCEPT_NMI                RT_BIT_64(1)
     311/** Intercept SMI. */
     312#define SVM_CTRL_INTERCEPT_SMI                RT_BIT_64(2)
     313/** Intercept INIT. */
     314#define SVM_CTRL_INTERCEPT_INIT               RT_BIT_64(3)
     315/** Intercept VINTR (virtual maskable interrupt). */
     316#define SVM_CTRL_INTERCEPT_VINTR              RT_BIT_64(4)
     317/** Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */
     318#define SVM_CTRL_INTERCEPT_CR0                RT_BIT_64(5)
     319/** Intercept reads of IDTR. */
     320#define SVM_CTRL_INTERCEPT_IDTR_READS         RT_BIT_64(6)
     321/** Intercept reads of GDTR. */
     322#define SVM_CTRL_INTERCEPT_GDTR_READS         RT_BIT_64(7)
     323/** Intercept reads of LDTR. */
     324#define SVM_CTRL_INTERCEPT_LDTR_READS         RT_BIT_64(8)
     325/** Intercept reads of TR. */
     326#define SVM_CTRL_INTERCEPT_TR_READS           RT_BIT_64(9)
     327/** Intercept writes of IDTR. */           
     328#define SVM_CTRL_INTERCEPT_IDTR_WRITES        RT_BIT_64(10)
     329/** Intercept writes of GDTR. */           
     330#define SVM_CTRL_INTERCEPT_GDTR_WRITES        RT_BIT_64(11)
     331/** Intercept writes of LDTR. */           
     332#define SVM_CTRL_INTERCEPT_LDTR_WRITES        RT_BIT_64(12)
     333/** Intercept writes of TR. */             
     334#define SVM_CTRL_INTERCEPT_TR_WRITES          RT_BIT_64(13)
     335/** Intercept RDTSC instruction. */       
     336#define SVM_CTRL_INTERCEPT_RDTSC              RT_BIT_64(14)
     337/** Intercept RDPMC instruction. */       
     338#define SVM_CTRL_INTERCEPT_RDPMC              RT_BIT_64(15)
     339/** Intercept PUSHF instruction. */       
     340#define SVM_CTRL_INTERCEPT_PUSHF              RT_BIT_64(16)
     341/** Intercept POPF instruction. */         
     342#define SVM_CTRL_INTERCEPT_POPF               RT_BIT_64(17)
     343/** Intercept CPUID instruction. */       
     344#define SVM_CTRL_INTERCEPT_CPUID              RT_BIT_64(18)
     345/** Intercept RSM instruction. */         
     346#define SVM_CTRL_INTERCEPT_RSM                RT_BIT_64(19)
     347/** Intercept IRET instruction. */         
     348#define SVM_CTRL_INTERCEPT_IRET               RT_BIT_64(20)
     349/** Intercept INTn instruction. */         
     350#define SVM_CTRL_INTERCEPT_INTN               RT_BIT_64(21)
     351/** Intercept INVD instruction. */         
     352#define SVM_CTRL_INTERCEPT_INVD               RT_BIT_64(22)
     353/** Intercept PAUSE instruction. */       
     354#define SVM_CTRL_INTERCEPT_PAUSE              RT_BIT_64(23)
     355/** Intercept HLT instruction. */         
     356#define SVM_CTRL_INTERCEPT_HLT                RT_BIT_64(24)
     357/** Intercept INVLPG instruction. */       
     358#define SVM_CTRL_INTERCEPT_INVLPG             RT_BIT_64(25)
     359/** Intercept INVLPGA instruction. */     
     360#define SVM_CTRL_INTERCEPT_INVLPGA            RT_BIT_64(26)
     361/** IOIO_PROT Intercept IN/OUT accesses to selected ports. */
     362#define SVM_CTRL_INTERCEPT_INOUT_BITMAP       RT_BIT_64(27)
     363/** MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */
     364#define SVM_CTRL_INTERCEPT_MSR_SHADOW         RT_BIT_64(28)
     365/** Intercept task switches. */
     366#define SVM_CTRL_INTERCEPT_TASK_SWITCH        RT_BIT_64(29)
     367/** FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */
     368#define SVM_CTRL_INTERCEPT_FERR_FREEZE        RT_BIT_64(30)
     369/** Intercept shutdown events. */               
     370#define SVM_CTRL_INTERCEPT_SHUTDOWN           RT_BIT_64(31)
     371/** Intercept VMRUN instruction. */             
     372#define SVM_CTRL_INTERCEPT_VMRUN              RT_BIT_64(32 + 0)
     373/** Intercept VMMCALL instruction. */           
     374#define SVM_CTRL_INTERCEPT_VMMCALL            RT_BIT_64(32 + 1)
     375/** Intercept VMLOAD instruction. */                 
     376#define SVM_CTRL_INTERCEPT_VMLOAD             RT_BIT_64(32 + 2)
     377/** Intercept VMSAVE instruction. */             
     378#define SVM_CTRL_INTERCEPT_VMSAVE             RT_BIT_64(32 + 3)
     379/** Intercept STGI instruction. */               
     380#define SVM_CTRL_INTERCEPT_STGI               RT_BIT_64(32 + 4)
     381/** Intercept CLGI instruction. */               
     382#define SVM_CTRL_INTERCEPT_CLGI               RT_BIT_64(32 + 5)
     383/** Intercept SKINIT instruction. */             
     384#define SVM_CTRL_INTERCEPT_SKINIT             RT_BIT_64(32 + 6)
     385/** Intercept RDTSCP instruction. */             
     386#define SVM_CTRL_INTERCEPT_RDTSCP             RT_BIT_64(32 + 7)
     387/** Intercept ICEBP instruction. */             
     388#define SVM_CTRL_INTERCEPT_ICEBP              RT_BIT_64(32 + 8)
     389/** Intercept WBINVD instruction. */             
     390#define SVM_CTRL_INTERCEPT_WBINVD             RT_BIT_64(32 + 9)
     391/** Intercept MONITOR instruction. */           
     392#define SVM_CTRL_INTERCEPT_MONITOR            RT_BIT_64(32 + 10)
     393/** Intercept MWAIT instruction unconditionally. */   
     394#define SVM_CTRL_INTERCEPT_MWAIT              RT_BIT_64(32 + 11)
     395/** Intercept MWAIT instruction when armed. */   
     396#define SVM_CTRL_INTERCEPT_MWAIT_ARMED        RT_BIT_64(32 + 12)
     397/** Intercept XSETBV instruction. */
     398#define SVM_CTRL_INTERCEPT_XSETBV             RT_BIT_64(32 + 13)
     399/* Bit 14 - Reserved, SBZ. */
     400/** Intercept EFER writes after guest instruction finishes. */
     401#define SVM_CTRL_INTERCEPT_EFER_WRITES_TRAP   RT_BIT_64(32 + 15)
     402/** Intercept CR0 writes after guest instruction finishes. */
     403#define SVM_CTRL_INTERCEPT_CR0_WRITES_TRAP    RT_BIT_64(32 + 16)
     404/** Intercept CR0 writes after guest instruction finishes. */
     405#define SVM_CTRL_INTERCEPT_CR1_WRITES_TRAP    RT_BIT_64(32 + 17)
     406/** Intercept CR0 writes after guest instruction finishes. */
     407#define SVM_CTRL_INTERCEPT_CR2_WRITES_TRAP    RT_BIT_64(32 + 18)
     408/** Intercept CR0 writes after guest instruction finishes. */
     409#define SVM_CTRL_INTERCEPT_CR3_WRITES_TRAP    RT_BIT_64(32 + 19)
     410/** Intercept CR0 writes after guest instruction finishes. */
     411#define SVM_CTRL_INTERCEPT_CR4_WRITES_TRAP    RT_BIT_64(32 + 20)
     412/** Intercept CR0 writes after guest instruction finishes. */
     413#define SVM_CTRL_INTERCEPT_CR5_WRITES_TRAP    RT_BIT_64(32 + 21)
     414/** Intercept CR0 writes after guest instruction finishes. */
     415#define SVM_CTRL_INTERCEPT_CR6_WRITES_TRAP    RT_BIT_64(32 + 22)
     416/** Intercept CR0 writes after guest instruction finishes. */
     417#define SVM_CTRL_INTERCEPT_CR7_WRITES_TRAP    RT_BIT_64(32 + 23)
     418/** Intercept CR0 writes after guest instruction finishes. */
     419#define SVM_CTRL_INTERCEPT_CR8_WRITES_TRAP    RT_BIT_64(32 + 24)
     420/** Intercept CR0 writes after guest instruction finishes. */
     421#define SVM_CTRL_INTERCEPT_CR9_WRITES_TRAP    RT_BIT_64(32 + 25)
     422/** Intercept CR0 writes after guest instruction finishes. */
     423#define SVM_CTRL_INTERCEPT_CR10_WRITES_TRAP   RT_BIT_64(32 + 26)
     424/** Intercept CR0 writes after guest instruction finishes. */
     425#define SVM_CTRL_INTERCEPT_CR11_WRITES_TRAP   RT_BIT_64(32 + 27)
     426/** Intercept CR0 writes after guest instruction finishes. */
     427#define SVM_CTRL_INTERCEPT_CR12_WRITES_TRAP   RT_BIT_64(32 + 28)
     428/** Intercept CR0 writes after guest instruction finishes. */
     429#define SVM_CTRL_INTERCEPT_CR13_WRITES_TRAP   RT_BIT_64(32 + 29)
     430/** Intercept CR0 writes after guest instruction finishes. */
     431#define SVM_CTRL_INTERCEPT_CR14_WRITES_TRAP   RT_BIT_64(32 + 30)
     432/** Intercept CR0 writes after guest instruction finishes. */
     433#define SVM_CTRL_INTERCEPT_CR15_WRITES_TRAP   RT_BIT_64(32 + 31)
    404434/** @} */
    405435
     
    603633 * SVM VM Control Block. (VMCB)
    604634 */
     635#pragma pack(1)
    605636typedef struct SVMVMCB
    606637{
     
    608639    struct
    609640    {
    610         /** Offset 0x00 - Intercept reads of CR0-15. */
     641        /** Offset 0x00 - Intercept reads of CR0-CR15. */
    611642        uint16_t    u16InterceptRdCRx;
    612         /** Offset 0x02 - Intercept writes to CR0-15. */
     643        /** Offset 0x02 - Intercept writes to CR0-CR15. */
    613644        uint16_t    u16InterceptWrCRx;
    614         /** Offset 0x04 - Intercept reads of DR0-15. */
     645        /** Offset 0x04 - Intercept reads of DR0-DR15. */
    615646        uint16_t    u16InterceptRdDRx;
    616         /** Offset 0x06 - Intercept writes to DR0-15. */
     647        /** Offset 0x06 - Intercept writes to DR0-DR15. */
    617648        uint16_t    u16InterceptWrDRx;
    618649        /** Offset 0x08 - Intercept exception vectors 0-31. */
    619650        uint32_t    u32InterceptException;
    620         /** Offset 0x0C - Intercept control field 1. */
    621         uint32_t    u32InterceptCtrl1;
    622         /** Offset 0x10 - Intercept control field 2. */
    623         uint32_t    u32InterceptCtrl2;
     651        /** Offset 0x0C - Intercept control. */
     652        uint64_t    u64InterceptCtrl;
    624653        /** Offset 0x14-0x3F - Reserved. */
    625654        uint8_t     u8Reserved[0x3c - 0x14];
     
    773802    uint8_t     u8Reserved10[0x1000-0x698];
    774803} SVMVMCB;
     804#pragma pack()
    775805/** Pointer to the SVMVMCB structure. */
    776806typedef SVMVMCB *PSVMVMCB;
     
    781811AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptWrDRx, 0x06);
    782812AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptException, 0x08);
    783 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl1, 0x0C);
    784 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl2, 0x10);
     813AssertCompileMemberOffset(SVMVMCB, ctrl.u64InterceptCtrl, 0x0C);
    785814AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved, 0x14);
    786815AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterThreshold, 0x3c);
  • trunk/include/VBox/vmm/iem.h

    r65587 r65904  
    132132VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue);
    133133VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr);
     134#ifdef VBOX_WITH_NESTED_HWVIRT
     135VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr);
     136VMM_INT_DECL(VBOXSTRICTRC)  IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr);
     137#endif
    134138/** @}  */
    135139
  • trunk/include/iprt/x86.h

    r65776 r65904  
    14591459 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" */
    14601460#define MSR_K8_INT_PENDING                  UINT32_C(0xc0010055)
     1461
     1462/** SVM Control. */
    14611463#define MSR_K8_VM_CR                        UINT32_C(0xc0010114)
     1464/** Disables HDT (Hardware Debug Tool) and certain internal debug
     1465 *  features. */
     1466#define MSR_K8_VM_CR_DPD                    RT_BIT_32(0)
     1467/** If set, non-intercepted INIT signals are converted to \#SX
     1468 *  exceptions. */
     1469#define MSR_K8_VM_CR_R_INIT                 RT_BIT_32(1)
     1470/** Disables A20 masking.  */
     1471#define MSR_K8_VM_CR_DIS_A20M               RT_BIT_32(2)
     1472/** Lock bit for this MSR controlling bits 3 (LOCK) and 4 (SVMDIS). */
     1473#define MSR_K8_VM_CR_LOCK                   RT_BIT_32(3)
     1474/** SVM disable. When set, writes to EFER.SVME are treated as MBZ. When
     1475 *  clear, EFER.SVME can be written normally. */
    14621476#define MSR_K8_VM_CR_SVM_DISABLE            RT_BIT_32(4)
    14631477
  • trunk/src/VBox/VMM/Makefile.kmk

    r65871 r65904  
    4848ifdef VBOX_WITH_3RD_IEM_STEP
    4949 VMM_COMMON_DEFS += VBOX_WITH_3RD_IEM_STEP
     50endif
     51ifdef VBOX_WITH_NESTED_HWVIRT
     52 VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT
    5053endif
    5154#ifdef VBOX_WITH_IEM
  • trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp

    r64655 r65904  
    14531453    if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR)
    14541454        fMask |= MSR_K6_EFER_FFXSR;
     1455    if (pVM->cpum.s.GuestFeatures.fSvm)
     1456        fMask |= MSR_K6_EFER_SVME;
    14551457
    14561458    /* #GP(0) If anything outside the allowed bits is set. */
     
    14711473
    14721474    /* There are a few more: e.g. MSR_K6_EFER_LMSLE */
    1473     AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)),
    1474               ("Unexpected value %RX64\n", uValue));
     1475    AssertMsg(!(uValue & ~(  MSR_K6_EFER_NXE
     1476                           | MSR_K6_EFER_LME
     1477                           | MSR_K6_EFER_LMA /* ignored anyway */
     1478                           | MSR_K6_EFER_SCE
     1479                           | MSR_K6_EFER_FFXSR
     1480                           | MSR_K6_EFER_SVME)),
     1481              ("Unexpected value %#RX64\n", uValue));
    14751482    pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask);
    14761483
     
    37433750static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue)
    37443751{
    3745     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
    3746     /** @todo AMD SVM. */
    3747     *puValue = 0;
     3752    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
     3753    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3754    if (pVM->cpum.s.GuestFeatures.fSvm)
     3755        *puValue = MSR_K8_VM_CR_LOCK;
     3756    else
     3757        *puValue = 0;
    37483758    return VINF_SUCCESS;
    37493759}
     
    37533763static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
    37543764{
    3755     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue);
    3756     /** @todo AMD SVM. */
    3757     return VINF_SUCCESS;
     3765    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
     3766    PVM pVM = pVCpu->CTX_SUFF(pVM);
     3767    if (pVM->cpum.s.GuestFeatures.fSvm)
     3768    {
     3769        /* Silently ignore writes to LOCK and SVM_DISABLE bit when the LOCK bit is set (see cpumMsrRd_AmdK8VmCr). */
     3770        if (uValue & (MSR_K8_VM_CR_DPD | MSR_K8_VM_CR_R_INIT | MSR_K8_VM_CR_DIS_A20M))
     3771            return VERR_CPUM_RAISE_GP_0;
     3772        return VINF_SUCCESS;
     3773    }
     3774    return VERR_CPUM_RAISE_GP_0;
    37583775}
    37593776
     
    38013818{
    38023819    RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange);
    3803     /** @todo AMD SVM. */
    3804     *puValue = 0;
     3820    *puValue = pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa;
    38053821    return VINF_SUCCESS;
    38063822}
     
    38103826static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue)
    38113827{
    3812     RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue);
    3813     /** @todo AMD SVM. */
     3828    RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue);
     3829    if (uValue & UINT64_C(0xfff))
     3830    {
     3831        Log(("CPUM: Invalid setting of low 12 bits set writing host-state save area MSR %#x: %#llx\n", idMsr, uValue));
     3832        return VERR_CPUM_RAISE_GP_0;
     3833    }
     3834
     3835    uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U);
     3836    if (fInvPhysMask & uValue)
     3837    {
     3838        Log(("CPUM: Invalid physical address bits set writing host-state save area MSR %#x: %#llx (%#llx)\n",
     3839             idMsr, uValue, uValue & fInvPhysMask));
     3840        return VERR_CPUM_RAISE_GP_0;
     3841    }
     3842
     3843    pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa = uValue;
    38143844    return VINF_SUCCESS;
    38153845}
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r62478 r65904  
    557557}
    558558
     559
     560
     561/**
     562 * SVM nested-guest #VMEXIT handler.
     563 *
     564 * @param   pVCpu       The cross context virtual CPU structure.
     565 * @param   uExitCode   The exit reason.
     566 */
     567VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)
     568{
     569    RT_NOREF2(pVCpu, uExitCode);
     570}
     571
     572
     573/**
     574 * VMX nested-guest VM-exit handler.
     575 * 
     576 * @param   pVCpu              The cross context virtual CPU structure.
     577 * @param   uBasicExitReason   The basic exit reason.
     578 */
     579VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason)
     580{
     581    RT_NOREF2(pVCpu, uBasicExitReason);
     582}
     583
  • trunk/src/VBox/VMM/VMMAll/IEMAll.cpp

    r65881 r65904  
    102102#include <VBox/vmm/em.h>
    103103#include <VBox/vmm/hm.h>
     104#ifdef VBOX_WITH_NESTED_HWVIRT
     105# include <VBox/vmm/hm_svm.h>
     106#endif
    104107#include <VBox/vmm/tm.h>
    105108#include <VBox/vmm/dbgf.h>
     
    362365# define IEM_USE_UNALIGNED_DATA_ACCESS
    363366#endif
     367
     368#ifdef VBOX_WITH_NESTED_HWVIRT
     369/**
     370 * Check if an SVM control/instruction intercept is set.
     371 */
     372#define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
     373
     374/**
     375 * Check if an SVM read CRx intercept is set.
     376 */
     377#define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)    (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     378
     379/**
     380 * Check if an SVM write CRx intercept is set.
     381 */
     382#define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr)   (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
     383
     384/**
     385 * Check if an SVM read DRx intercept is set.
     386 */
     387#define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr)    (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     388
     389/**
     390 * Check if an SVM write DRx intercept is set.
     391 */
     392#define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr)   (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
     393
     394/**
     395 * Check if an SVM exception intercept is set.
     396 */
     397#define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt)   (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt)))
     398#endif /* VBOX_WITH_NESTED_HWVIRT */
    364399
    365400
     
    1487614911}
    1487714912
     14913
     14914#ifdef VBOX_WITH_NESTED_HWVIRT
     14915/**
     14916 * Interface for HM and EM to emulate the STGI instruction.
     14917 * 
     14918 * @returns Strict VBox status code.
     14919 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     14920 * @param   cbInstr     The instruction length in bytes.
     14921 * @thread  EMT(pVCpu)
     14922 */
     14923VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
     14924{
     14925    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
     14926
     14927    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     14928    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
     14929    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     14930}
     14931
     14932
     14933/**
     14934 * Interface for HM and EM to emulate the STGI instruction.
     14935 * 
     14936 * @returns Strict VBox status code.
     14937 * @param   pVCpu       The cross context virtual CPU structure of the calling EMT.
     14938 * @param   cbInstr     The instruction length in bytes.
     14939 * @thread  EMT(pVCpu)
     14940 */
     14941VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
     14942{
     14943    IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
     14944
     14945    iemInitExec(pVCpu, false /*fBypassHandlers*/);
     14946    VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
     14947    return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
     14948}
     14949#endif /* VBOX_WITH_NESTED_HWVIRT */
     14950
    1487814951#ifdef IN_RING3
    1487914952
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h

    r65612 r65904  
    58755875
    58765876
     5877#ifdef VBOX_WITH_NESTED_HWVIRT
     5878/**
     5879 * Implements 'CLGI'.
     5880 */
     5881IEM_CIMPL_DEF_0(iemCImpl_clgi)
     5882{
     5883    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5884    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm)
     5885    {
     5886        Log2(("clgi: Not in CPUID -> #UD\n"));
     5887        return iemRaiseUndefinedOpcode(pVCpu);
     5888    }
     5889    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
     5890    {
     5891        Log2(("clgi: EFER.SVME not enabled -> #UD\n"));
     5892        return iemRaiseUndefinedOpcode(pVCpu);
     5893    }
     5894    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
     5895    {
     5896        Log2(("clgi: Real or v8086 mode -> #UD\n"));
     5897        return iemRaiseUndefinedOpcode(pVCpu);
     5898    }
     5899    if (pVCpu->iem.s.uCpl != 0)
     5900    {
     5901        Log2(("clgi: CPL != 0 -> #GP(0)\n"));
     5902        return iemRaiseGeneralProtectionFault0(pVCpu);
     5903    }
     5904#ifndef IN_RC
     5905    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))
     5906    {
     5907        Log2(("clgi: Guest intercept -> VMexit\n"));
     5908        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI);
     5909        return VINF_EM_RESCHEDULE;
     5910    }
     5911#endif
     5912
     5913    pCtx->hwvirt.svm.fGif = 0;
     5914    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5915    return VINF_SUCCESS;
     5916}
     5917
     5918
     5919/**
     5920 * Implements 'STGI'.
     5921 */
     5922IEM_CIMPL_DEF_0(iemCImpl_stgi)
     5923{
     5924    PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
     5925    if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm)
     5926    {
     5927        Log2(("stgi: Not in CPUID -> #UD\n"));
     5928        return iemRaiseUndefinedOpcode(pVCpu);
     5929    }
     5930    if (!(pCtx->msrEFER & MSR_K6_EFER_SVME))
     5931    {
     5932        Log2(("stgi: EFER.SVME not enabled -> #UD\n"));
     5933        return iemRaiseUndefinedOpcode(pVCpu);
     5934    }
     5935    if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
     5936    {
     5937        Log2(("stgi: Real or v8086 mode -> #UD\n"));
     5938        return iemRaiseUndefinedOpcode(pVCpu);
     5939    }
     5940    if (pVCpu->iem.s.uCpl != 0)
     5941    {
     5942        Log2(("stgi: CPL != 0 -> #GP(0)\n"));
     5943        return iemRaiseGeneralProtectionFault0(pVCpu);
     5944    }
     5945#ifndef IN_RC
     5946    if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))
     5947    {
     5948        Log2(("stgi: Guest intercept -> VMexit\n"));
     5949        HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI);
     5950        return VINF_EM_RESCHEDULE;
     5951    }
     5952#endif
     5953
     5954    pCtx->hwvirt.svm.fGif = 1;
     5955    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5956    return VINF_SUCCESS;
     5957}
     5958#endif /* VBOX_WITH_NESTED_HWVIRT */
     5959
    58775960/**
    58785961 * Implements 'CLI'.
  • trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h

    r65877 r65904  
    451451FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
    452452
     453#ifdef VBOX_WITH_NESTED_HWVIRT
     454/** Opcode 0x0f 0x01 0xdc. */
     455FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)
     456{
     457    IEMOP_MNEMONIC(stgi, "stgi");
     458    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);
     459}
     460
     461/** Opcode 0x0f 0x01 0xdd. */
     462FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)
     463{
     464    IEMOP_MNEMONIC(clgi, "clgi");
     465    return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);
     466}
     467#else
    453468/** Opcode 0x0f 0x01 0xdc. */
    454469FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
     
    456471/** Opcode 0x0f 0x01 0xdd. */
    457472FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
     473#endif /* VBOX_WITH_NESTED_HWVIRT */
    458474
    459475/** Opcode 0x0f 0x01 0xde. */
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r65322 r65904  
    308308static FNSVMEXITHANDLER hmR0SvmExitXcptAC;
    309309static FNSVMEXITHANDLER hmR0SvmExitXcptBP;
     310#ifdef VBOX_WITH_NESTED_HWVIRT
     311static FNSVMEXITHANDLER hmR0SvmExitClgi;
     312static FNSVMEXITHANDLER hmR0SvmExitStgi;
     313#endif
    310314/** @} */
    311315
     
    722726
    723727        /* Set up unconditional intercepts and conditions. */
    724         pVmcb->ctrl.u32InterceptCtrl1 =   SVM_CTRL1_INTERCEPT_INTR          /* External interrupt causes a #VMEXIT. */
    725                                         | SVM_CTRL1_INTERCEPT_NMI           /* Non-maskable interrupts causes a #VMEXIT. */
    726                                         | SVM_CTRL1_INTERCEPT_INIT          /* INIT signal causes a #VMEXIT. */
    727                                         | SVM_CTRL1_INTERCEPT_RDPMC         /* RDPMC causes a #VMEXIT. */
    728                                         | SVM_CTRL1_INTERCEPT_CPUID         /* CPUID causes a #VMEXIT. */
    729                                         | SVM_CTRL1_INTERCEPT_RSM           /* RSM causes a #VMEXIT. */
    730                                         | SVM_CTRL1_INTERCEPT_HLT           /* HLT causes a #VMEXIT. */
    731                                         | SVM_CTRL1_INTERCEPT_INOUT_BITMAP  /* Use the IOPM to cause IOIO #VMEXITs. */
    732                                         | SVM_CTRL1_INTERCEPT_MSR_SHADOW    /* MSR access not covered by MSRPM causes a #VMEXIT.*/
    733                                         | SVM_CTRL1_INTERCEPT_INVLPGA       /* INVLPGA causes a #VMEXIT. */
    734                                         | SVM_CTRL1_INTERCEPT_SHUTDOWN      /* Shutdown events causes a #VMEXIT. */
    735                                         | SVM_CTRL1_INTERCEPT_FERR_FREEZE;  /* Intercept "freezing" during legacy FPU handling. */
    736 
    737         pVmcb->ctrl.u32InterceptCtrl2 =   SVM_CTRL2_INTERCEPT_VMRUN         /* VMRUN causes a #VMEXIT. */
    738                                         | SVM_CTRL2_INTERCEPT_VMMCALL       /* VMMCALL causes a #VMEXIT. */
    739                                         | SVM_CTRL2_INTERCEPT_VMLOAD        /* VMLOAD causes a #VMEXIT. */
    740                                         | SVM_CTRL2_INTERCEPT_VMSAVE        /* VMSAVE causes a #VMEXIT. */
    741                                         | SVM_CTRL2_INTERCEPT_STGI          /* STGI causes a #VMEXIT. */
    742                                         | SVM_CTRL2_INTERCEPT_CLGI          /* CLGI causes a #VMEXIT. */
    743                                         | SVM_CTRL2_INTERCEPT_SKINIT        /* SKINIT causes a #VMEXIT. */
    744                                         | SVM_CTRL2_INTERCEPT_WBINVD        /* WBINVD causes a #VMEXIT. */
    745                                         | SVM_CTRL2_INTERCEPT_MONITOR       /* MONITOR causes a #VMEXIT. */
    746                                         | SVM_CTRL2_INTERCEPT_MWAIT         /* MWAIT causes a #VMEXIT. */
    747                                         | SVM_CTRL2_INTERCEPT_XSETBV;       /* XSETBV causes a #VMEXIT. */
     728        pVmcb->ctrl.u64InterceptCtrl = SVM_CTRL_INTERCEPT_INTR         /* External interrupt causes a #VMEXIT. */
     729                                     | SVM_CTRL_INTERCEPT_NMI          /* Non-maskable interrupts causes a #VMEXIT. */
     730                                     | SVM_CTRL_INTERCEPT_INIT         /* INIT signal causes a #VMEXIT. */
     731                                     | SVM_CTRL_INTERCEPT_RDPMC        /* RDPMC causes a #VMEXIT. */
     732                                     | SVM_CTRL_INTERCEPT_CPUID        /* CPUID causes a #VMEXIT. */
     733                                     | SVM_CTRL_INTERCEPT_RSM          /* RSM causes a #VMEXIT. */
     734                                     | SVM_CTRL_INTERCEPT_HLT          /* HLT causes a #VMEXIT. */
     735                                     | SVM_CTRL_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */
     736                                     | SVM_CTRL_INTERCEPT_MSR_SHADOW   /* MSR access not covered by MSRPM causes a #VMEXIT.*/
     737                                     | SVM_CTRL_INTERCEPT_INVLPGA      /* INVLPGA causes a #VMEXIT. */
     738                                     | SVM_CTRL_INTERCEPT_SHUTDOWN     /* Shutdown events causes a #VMEXIT. */
     739                                     | SVM_CTRL_INTERCEPT_FERR_FREEZE  /* Intercept "freezing" during legacy FPU handling. */
     740                                     | SVM_CTRL_INTERCEPT_VMRUN        /* VMRUN causes a #VMEXIT. */
     741                                     | SVM_CTRL_INTERCEPT_VMMCALL      /* VMMCALL causes a #VMEXIT. */
     742                                     | SVM_CTRL_INTERCEPT_VMLOAD       /* VMLOAD causes a #VMEXIT. */
     743                                     | SVM_CTRL_INTERCEPT_VMSAVE       /* VMSAVE causes a #VMEXIT. */
     744                                     | SVM_CTRL_INTERCEPT_STGI         /* STGI causes a #VMEXIT. */
     745                                     | SVM_CTRL_INTERCEPT_CLGI         /* CLGI causes a #VMEXIT. */
     746                                     | SVM_CTRL_INTERCEPT_SKINIT       /* SKINIT causes a #VMEXIT. */
     747                                     | SVM_CTRL_INTERCEPT_WBINVD       /* WBINVD causes a #VMEXIT. */
     748                                     | SVM_CTRL_INTERCEPT_MONITOR      /* MONITOR causes a #VMEXIT. */
     749                                     | SVM_CTRL_INTERCEPT_MWAIT        /* MWAIT causes a #VMEXIT. */
     750                                     | SVM_CTRL_INTERCEPT_XSETBV;      /* XSETBV causes a #VMEXIT. */
    748751
    749752        /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */
     
    795798
    796799            /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */
    797             pVmcb->ctrl.u32InterceptCtrl1 |=   SVM_CTRL1_INTERCEPT_INVLPG
    798                                              | SVM_CTRL1_INTERCEPT_TASK_SWITCH;
     800            pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG
     801                                         |  SVM_CTRL_INTERCEPT_TASK_SWITCH;
    799802
    800803            /* Page faults must be intercepted to implement shadow paging. */
     
    803806
    804807#ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH
    805         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;
     808        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH;
    806809#endif
    807810
     
    23262329    if (fCanUseRealTsc)
    23272330    {
    2328         pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;
    2329         pVmcb->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;
     2331        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC;
     2332        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP;
    23302333        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    23312334    }
    23322335    else
    23332336    {
    2334         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;
    2335         pVmcb->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;
     2337        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC;
     2338        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP;
    23362339        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    23372340    }
     
    25752578DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
    25762579{
    2577     if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
     2580    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR))
    25782581    {
    25792582        pVmcb->ctrl.IntCtrl.n.u1VIrqValid  = 1;     /* A virtual interrupt is pending. */
    25802583        pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;     /* Not necessary as we #VMEXIT for delivering the interrupt. */
    2581         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
     2584        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR;
    25822585        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    25832586
    25842587        Log4(("Setting VINTR intercept\n"));
     2588    }
     2589}
     2590
     2591
     2592/**
     2593 * Clears the virtual interrupt intercept control in the VMCB as
     2594 * we are figured the guest is unable process any interrupts
     2595 * at this point of time.
     2596 *
     2597 * @param   pVmcb       Pointer to the VM control block.
     2598 */
     2599DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb)
     2600{
     2601    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)
     2602    {
     2603        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
     2604        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
     2605        Log4(("Clearing VINTR intercept\n"));
    25852606    }
    25862607}
     
    25962617DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb)
    25972618{
    2598     if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))
    2599     {
    2600         pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;
     2619    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET))
     2620    {
     2621        pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET;
    26012622        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
    26022623
     
    26132634DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb)
    26142635{
    2615     if (pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)
    2616     {
    2617         pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;
     2636    if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)
     2637    {
     2638        pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET;
    26182639        pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS);
    26192640
     
    27802801        Log4(("ctrl.u16InterceptWrDRx            %#x\n",      pVmcb->ctrl.u16InterceptWrDRx));
    27812802        Log4(("ctrl.u32InterceptException        %#x\n",      pVmcb->ctrl.u32InterceptException));
    2782         Log4(("ctrl.u32InterceptCtrl1            %#x\n",      pVmcb->ctrl.u32InterceptCtrl1));
    2783         Log4(("ctrl.u32InterceptCtrl2            %#x\n",      pVmcb->ctrl.u32InterceptCtrl2));
     2803        Log4(("ctrl.u64InterceptCtrl             %#RX64\n",   pVmcb->ctrl.u64InterceptCtrl));
    27842804        Log4(("ctrl.u64IOPMPhysAddr              %#RX64\n",   pVmcb->ctrl.u64IOPMPhysAddr));
    27852805        Log4(("ctrl.u64MSRPMPhysAddr             %#RX64\n",   pVmcb->ctrl.u64MSRPMPhysAddr));
     
    31893209     */
    31903210    if (    (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)
    3191         && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))
     3211        && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP))
    31923212    {
    31933213        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     
    32633283
    32643284    /* TSC read must be done early for maximum accuracy. */
    3265     if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))
     3285    if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC))
    32663286        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset);
    32673287
     
    36653685                }
    36663686
     3687#ifdef VBOX_WITH_NESTED_HWVIRT
     3688                case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient);
     3689                case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient);
     3690#else
     3691                case SVM_EXIT_CLGI:
     3692                case SVM_EXIT_STGI:
     3693#endif
    36673694                case SVM_EXIT_INVLPGA:
    36683695                case SVM_EXIT_RSM:
     
    36703697                case SVM_EXIT_VMLOAD:
    36713698                case SVM_EXIT_VMSAVE:
    3672                 case SVM_EXIT_STGI:
    3673                 case SVM_EXIT_CLGI:
    36743699                case SVM_EXIT_SKINIT:
    36753700                    return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient);
     
    51925217
    51935218    /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */
    5194     pVmcb->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;
     5219    pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR;
    51955220    pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR);
    51965221
     
    56675692}
    56685693
     5694
     5695#ifdef VBOX_WITH_NESTED_HWVIRT
     5696/**
     5697 * \#VMEXIT handler for RDPMC (SVM_EXIT_CLGI). Conditional
     5698 * \#VMEXIT.
     5699 */
     5700HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     5701{
     5702    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     5703    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5704    if (pVM->cpum.ro.GuestFeatures.fSvm)
     5705    {
     5706        /** @todo Stat. */
     5707        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */
     5708        VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3);
     5709        return VBOXSTRICTRC_VAL(rcStrict);
     5710    }
     5711    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
     5712}
     5713
     5714
     5715/**
     5716 * \#VMEXIT handler for RDPMC (SVM_EXIT_STGI). Conditional
     5717 * \#VMEXIT.
     5718 */
     5719HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
     5720{
     5721    HMSVM_VALIDATE_EXIT_HANDLER_PARAMS();
     5722    PVM pVM = pVCpu->CTX_SUFF(pVM);
     5723    if (pVM->cpum.ro.GuestFeatures.fSvm)
     5724    {
     5725        /** @todo Stat. */
     5726        /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */
     5727        VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3);
     5728        return VBOXSTRICTRC_VAL(rcStrict);
     5729    }
     5730    return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient);
     5731}
     5732#endif /* VBOX_WITH_NESTED_HWVIRT */
     5733
     5734
    56695735/** @} */
    56705736
  • trunk/src/VBox/VMM/VMMR3/CPUM.cpp

    r64663 r65904  
    11511151    /* C-state control. Guesses. */
    11521152    pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28);
     1153
     1154    /*
     1155     * Hardware virtualization state.
     1156     */
     1157    memset(&pCtx->hwvirt, 0, sizeof(pCtx->hwvirt));
     1158    /* SVM. */
     1159    pCtx->hwvirt.svm.fGif = 1;
    11531160}
    11541161
  • trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp

    r65801 r65904  
    16791679            pFeatures->fAmdMmxExts      = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX);
    16801680            pFeatures->fXop             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP);
     1681            pFeatures->fSvm             = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM);
    16811682        }
    16821683
     
    22122213    CPUMISAEXTCFG   enm3dNowPrf;
    22132214    CPUMISAEXTCFG   enmAmdExtMmx;
     2215    CPUMISAEXTCFG   enmSvm;
    22142216
    22152217    uint32_t        uMaxStdLeaf;
     
    26962698        pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF
    26972699                               //| X86_CPUID_AMD_FEATURE_ECX_CMPL   - set below if applicable.
    2698                                //| X86_CPUID_AMD_FEATURE_ECX_SVM    - not virtualized.
     2700                               | (pConfig->enmSvm       ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0)
    26992701                               //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC
    27002702                               /* Note: This could prevent teleporting from AMD to Intel CPUs! */
     
    27372739        {
    27382740            PORTABLE_DISABLE_FEATURE_BIT(    1, pExtFeatureLeaf->uEcx, CR8L,       X86_CPUID_AMD_FEATURE_ECX_CR8L);
     2741            PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SVM,        X86_CPUID_AMD_FEATURE_ECX_SVM,       pConfig->enmSvm);
    27392742            PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM,        X86_CPUID_AMD_FEATURE_ECX_ABM,       pConfig->enmAbm);
    27402743            PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A,      X86_CPUID_AMD_FEATURE_ECX_SSE4A,     pConfig->enmSse4A);
     
    27792782        if (pConfig->enmSse4A     == CPUMISAEXTCFG_ENABLED_ALWAYS)
    27802783            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A;
     2784        if (pConfig->enmSvm       == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2785            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM;
    27812786        if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS)
    27822787            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE;
     
    27852790        if (pConfig->enmAmdExtMmx  == CPUMISAEXTCFG_ENABLED_ALWAYS)
    27862791            pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX;
     2792        if (pConfig->enmSvm        == CPUMISAEXTCFG_ENABLED_ALWAYS)
     2793            pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM;
    27872794    }
    27882795    pExtFeatureLeaf = NULL; /* Must refetch! */
     
    33483355     *      ECX - Reserved.
    33493356     *      EDX - SVM Feature identification.
    3350      * We clear all as we currently does not virtualize SVM.
    3351      */
    3352     cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
     3357     */
     3358    pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0);
     3359    if (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM)
     3360    {
     3361        PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0);
     3362        pSvmFeatureLeaf->uEax = 0x1;
     3363        pSvmFeatureLeaf->uEbx = 0x8000;
     3364        pSvmFeatureLeaf->uEcx = 0;
     3365        pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */
     3366    }
     3367    else
     3368        cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a));
    33533369
    33543370    /* Cpuid 0x8000000b thru 0x80000018: Reserved
     
    37223738                                  "|3DNOWPRF"
    37233739                                  "|AXMMX"
     3740                                  "|SVM"
    37243741                                  , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/);
    37253742        if (RT_FAILURE(rc))
     
    38953912    rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec);
    38963913    AssertLogRelRCReturn(rc, rc);
     3914
     3915#ifdef VBOX_WITH_NESTED_HWVIRT
     3916    /** @cfgm{/CPUM/IsaExts/SVM, isaextcfg, depends}
     3917     * Whether to expose the AMD's hardware virtualization (SVM) instructions to the
     3918     * guest. For the time being, the default is to only do this for VMs with nested
     3919     * paging and AMD-V.
     3920     */
     3921    rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SVM", &pConfig->enmSvm, fNestedPagingAndFullGuestExec);
     3922    AssertLogRelRCReturn(rc, rc);
     3923#endif
    38973924
    38983925    return VINF_SUCCESS;
     
    58325859    DBGFREGSUBFIELD_RO("LahfSahf\0"     "LAHF/SAHF support in 64-bit mode",              0, 1, 0),
    58335860    DBGFREGSUBFIELD_RO("CmpLegacy\0"    "Core multi-processing legacy mode",             1, 1, 0),
    5834     DBGFREGSUBFIELD_RO("SVM\0"          "AMD VM extensions",                             2, 1, 0),
     5861    DBGFREGSUBFIELD_RO("SVM\0"          "AMD Secure Virtual Machine extensions",         2, 1, 0),
    58355862    DBGFREGSUBFIELD_RO("EXTAPIC\0"      "AMD Extended APIC registers",                   3, 1, 0),
    58365863    DBGFREGSUBFIELD_RO("CR8L\0"         "AMD LOCK MOV CR0 means MOV CR8",                4, 1, 0),
  • trunk/src/VBox/VMM/include/CPUMInternal.mac

    r64663 r65904  
    226226    .Guest.pXStateRC      RTRCPTR_RES 1
    227227    .Guest.aoffXState         resw    64
    228 
     228    alignb 8
     229    .Guest.hwvirt.svm.uMsrHSavePa         resq    1
     230    .Guest.hwvirt.svm.u64InterceptCtrl    resq    1
     231    .Guest.hwvirt.svm.u32InterceptXcpt    resd    1
     232    .Guest.hwvirt.svm.u16InterceptRdCRx   resw    1
     233    .Guest.hwvirt.svm.u16InterceptWrCRx   resw    1
     234    .Guest.hwvirt.svm.u16InterceptRdDRx   resw    1
     235    .Guest.hwvirt.svm.u16InterceptWrDRx   resw    1
     236    .Guest.hwvirt.svm.fGif                resb    1
    229237    alignb 64
     238
    230239    .GuestMsrs                resq    0
    231240    .GuestMsrs.au64           resq    64
     
    484493    .Hyper.pXStateRC      RTRCPTR_RES 1
    485494    .Hyper.aoffXState         resw    64
     495    alignb 8
     496    .Hyper.hwvirt.svm.uMsrHSavePa         resq    1
     497    .Hyper.hwvirt.svm.u64InterceptCtrl    resq    1
     498    .Hyper.hwvirt.svm.u32InterceptXcpt    resd    1
     499    .Hyper.hwvirt.svm.u16InterceptRdCRx   resw    1
     500    .Hyper.hwvirt.svm.u16InterceptWrCRx   resw    1
     501    .Hyper.hwvirt.svm.u16InterceptRdDRx   resw    1
     502    .Hyper.hwvirt.svm.u16InterceptWrDRx   resw    1
     503    .Hyper.hwvirt.svm.fGif                resb    1
    486504    alignb 64
    487505
  • trunk/src/VBox/VMM/testcase/tstVMStruct.h

    r65531 r65904  
    131131
    132132    GEN_CHECK_SIZE(CPUMCTX);
     133    GEN_CHECK_OFF(CPUMCTX, hwvirt);
     134    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa);
     135    GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif);
     136    /** @todo add rest of hwvirt fields when code is more
     137     *        finalized. */
    133138    GEN_CHECK_OFF(CPUMCTX, pXStateR0);
    134139    GEN_CHECK_OFF(CPUMCTX, pXStateR3);
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r64626 r65904  
    312312    CHECK_MEMBER_ALIGNMENT(CPUMCTX, gdtr.pGdt, 8);
    313313    CHECK_MEMBER_ALIGNMENT(CPUMCTX, SysEnter, 8);
     314    CHECK_MEMBER_ALIGNMENT(CPUMCTX, hwvirt, 8);
    314315    CHECK_CPUMCTXCORE(rax);
    315316    CHECK_CPUMCTXCORE(rcx);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette