- Timestamp:
- Mar 1, 2017 10:21:38 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 113697
- Location:
- trunk
- Files:
-
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Config.kmk
r65732 r65904 397 397 # Enables the third step using IEM (the interpreter). 398 398 VBOX_WITH_3RD_IEM_STEP = 1 399 # Enables nested hardware virtualization support (mainly for IEM) 400 VBOX_WITH_NESTED_HWVIRT = 1 399 401 ## @} 400 402 -
trunk/include/VBox/vmm/cpum.h
r65763 r65904 1025 1025 uint32_t fLeakyFxSR : 1; 1026 1026 1027 /** AMD64: Supports AMD SVM. */ 1028 uint32_t fSvm : 1; 1029 1027 1030 /** Alignment padding / reserved for future use. */ 1028 uint32_t fPadding : 2 7;1031 uint32_t fPadding : 26; 1029 1032 uint32_t auPadding[3]; 1030 1033 } CPUMFEATURES; … … 1254 1257 } 1255 1258 1259 /** 1260 * Checks if the guest has the specified ctrl/instruction 1261 * intercept active. 1262 * 1263 * @returns @c true if in intercept is set, @c false otherwise. 1264 * @param pCtx Pointer to the context. 1265 * @param Intercept The SVM control/instruction intercept, 1266 * see SVM_CTRL_INTERCEPT_*. 1267 */ 1268 DECLINLINE(bool) CPUMIsGuestSvmCtrlInterceptSet(PCPUMCTX pCtx, uint64_t fIntercept) 1269 { 1270 return RT_BOOL(pCtx->hwvirt.svm.u64InterceptCtrl & fIntercept); 1271 } 1272 1273 /** 1274 * Checks if the guest has the specified CR read intercept 1275 * active. 1276 * 1277 * @returns @c true if in intercept is set, @c false otherwise. 1278 * @param pCtx Pointer to the context. 1279 * @param uCr The CR register number (0 to 15). 1280 */ 1281 DECLINLINE(bool) CPUMIsGuestSvmReadCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr) 1282 { 1283 return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdCRx & (1 << uCr)); 1284 } 1285 1286 /** 1287 * Checks if the guest has the specified CR write intercept 1288 * active. 1289 * 1290 * @returns @c true if in intercept is set, @c false otherwise. 1291 * @param pCtx Pointer to the context. 1292 * @param uCr The CR register number (0 to 15). 1293 */ 1294 DECLINLINE(bool) CPUMIsGuestSvmWriteCRxInterceptSet(PCPUMCTX pCtx, uint8_t uCr) 1295 { 1296 return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrCRx & (1 << uCr)); 1297 } 1298 1299 /** 1300 * Checks if the guest has the specified DR read intercept 1301 * active. 1302 * 1303 * @returns @c true if in intercept is set, @c false otherwise. 1304 * @param pCtx Pointer to the context. 1305 * @param uDr The DR register number (0 to 15). 1306 */ 1307 DECLINLINE(bool) CPUMIsGuestSvmReadDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr) 1308 { 1309 return RT_BOOL(pCtx->hwvirt.svm.u16InterceptRdDRx & (1 << uDr)); 1310 } 1311 1312 /** 1313 * Checks if the guest has the specified DR write intercept 1314 * active. 1315 * 1316 * @returns @c true if in intercept is set, @c false otherwise. 1317 * @param pCtx Pointer to the context. 1318 * @param uDr The DR register number (0 to 15). 1319 */ 1320 DECLINLINE(bool) CPUMIsGuestSvmWriteDRxInterceptSet(PCPUMCTX pCtx, uint8_t uDr) 1321 { 1322 return RT_BOOL(pCtx->hwvirt.svm.u16InterceptWrDRx & (1 << uDr)); 1323 } 1324 1325 /** 1326 * Checks if the guest has the specified exception 1327 * intercept active. 1328 * 1329 * @returns true if in intercept is active, false otherwise. 1330 * @param pCtx Pointer to the context. 1331 * @param enmXcpt The exception. 1332 */ 1333 DECLINLINE(bool) CPUMIsGuestSvmXcptInterceptSet(PCPUMCTX pCtx, X86XCPT enmXcpt) 1334 { 1335 return RT_BOOL(pCtx->hwvirt.svm.u32InterceptXcpt & enmXcpt); 1336 } 1337 1256 1338 #endif /* VBOX_WITHOUT_UNNAMED_UNIONS */ 1257 1339 -
trunk/include/VBox/vmm/cpum.mac
r64663 r65904 253 253 .pXStateRC RTRCPTR_RES 1 254 254 .aoffXState resw 64 255 alignb 8 256 .hwvirt.svm.uMsrHSavePa resq 1 257 .hwvirt.svm.u64InterceptCtrl resq 1 258 .hwvirt.svm.u32InterceptXcpt resd 1 259 .hwvirt.svm.u16InterceptRdCRx resw 1 260 .hwvirt.svm.u16InterceptWrCRx resw 1 261 .hwvirt.svm.u16InterceptRdDRx resw 1 262 .hwvirt.svm.u16InterceptWrDRx resw 1 263 .hwvirt.svm.fGif resb 1 255 264 alignb 64 256 265 endstruc -
trunk/include/VBox/vmm/cpumctx.h
r64663 r65904 431 431 uint16_t aoffXState[64]; 432 432 433 /** Size padding. */ 434 uint32_t au32SizePadding[HC_ARCH_BITS == 32 ? 13 : 11]; 433 /** 724 - Size padding. */ 434 uint32_t u32Padding; 435 436 /** 728 - Hardware virtualization state. */ 437 struct 438 { 439 union /* no tag! */ 440 { 441 struct 442 { 443 /** 728 - MSR holding physical address of the Guest's 'host-state'. */ 444 uint64_t uMsrHSavePa; 445 446 /** @name Cache of the nested-guest VMCB controls. 447 * @{ */ 448 /** 736 - Control intercepts. */ 449 uint64_t u64InterceptCtrl; 450 /** 744 - Exception intercepts. */ 451 uint32_t u32InterceptXcpt; 452 /** 748 - CR0-CR15 read intercepts. */ 453 uint16_t u16InterceptRdCRx; 454 /** 750 - CR0-CR15 write intercepts. */ 455 uint16_t u16InterceptWrCRx; 456 /** 752 - DR0-DR15 read intercepts. */ 457 uint16_t u16InterceptRdDRx; 458 /** 754 - DR0-DR15 write intercepts. */ 459 uint16_t u16InterceptWrDRx; 460 /** @} */ 461 462 /** 756 - Global interrupt flag. */ 463 uint8_t fGif; 464 /** 757 - Padding. */ 465 uint8_t abPadding[11]; 466 } svm; 467 struct 468 { 469 } vmx; 470 } CPUM_UNION_NM(s); 471 } hwvirt; 472 /** @} */ 435 473 } CPUMCTX; 436 474 #pragma pack() … … 485 523 AssertCompileMemberOffset(CPUMCTX, pXStateRC, HC_ARCH_BITS == 64 ? 592 : 584); 486 524 AssertCompileMemberOffset(CPUMCTX, aoffXState, HC_ARCH_BITS == 64 ? 596 : 588); 525 AssertCompileMemberOffset(CPUMCTX, hwvirt, 728); 526 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.uMsrHSavePa, 728); 527 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u64InterceptCtrl, 736); 528 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u32InterceptXcpt, 744); 529 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdCRx, 748); 530 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrCRx, 750); 531 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptRdDRx, 752); 532 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.u16InterceptWrDRx, 754); 533 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif, 756); 534 487 535 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs); 488 536 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw2.) r0); -
trunk/include/VBox/vmm/hm.h
r62476 r65904 147 147 VMM_INT_DECL(void) HMHypercallsEnable(PVMCPU pVCpu); 148 148 VMM_INT_DECL(void) HMHypercallsDisable(PVMCPU pVCpu); 149 150 VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode); 151 VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason); 149 152 150 153 #ifndef IN_RC -
trunk/include/VBox/vmm/hm_svm.h
r62476 r65904 301 301 /** @} */ 302 302 303 /** @name SVMVMCB.ctrl.u32InterceptCtrl1 304 * @{ 305 */ 306 /** 0 Intercept INTR (physical maskable interrupt). */ 307 #define SVM_CTRL1_INTERCEPT_INTR RT_BIT(0) 308 /** 1 Intercept NMI. */ 309 #define SVM_CTRL1_INTERCEPT_NMI RT_BIT(1) 310 /** 2 Intercept SMI. */ 311 #define SVM_CTRL1_INTERCEPT_SMI RT_BIT(2) 312 /** 3 Intercept INIT. */ 313 #define SVM_CTRL1_INTERCEPT_INIT RT_BIT(3) 314 /** 4 Intercept VINTR (virtual maskable interrupt). */ 315 #define SVM_CTRL1_INTERCEPT_VINTR RT_BIT(4) 316 /** 5 Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */ 317 #define SVM_CTRL1_INTERCEPT_CR0 RT_BIT(5) 318 /** 6 Intercept reads of IDTR. */ 319 #define SVM_CTRL1_INTERCEPT_IDTR_READS RT_BIT(6) 320 /** 7 Intercept reads of GDTR. */ 321 #define SVM_CTRL1_INTERCEPT_GDTR_READS RT_BIT(7) 322 /** 8 Intercept reads of LDTR. */ 323 #define SVM_CTRL1_INTERCEPT_LDTR_READS RT_BIT(8) 324 /** 9 Intercept reads of TR. */ 325 #define SVM_CTRL1_INTERCEPT_TR_READS RT_BIT(9) 326 /** 10 Intercept writes of IDTR. */ 327 #define SVM_CTRL1_INTERCEPT_IDTR_WRITES RT_BIT(10) 328 /** 11 Intercept writes of GDTR. */ 329 #define SVM_CTRL1_INTERCEPT_GDTR_WRITES RT_BIT(11) 330 /** 12 Intercept writes of LDTR. */ 331 #define SVM_CTRL1_INTERCEPT_LDTR_WRITES RT_BIT(12) 332 /** 13 Intercept writes of TR. */ 333 #define SVM_CTRL1_INTERCEPT_TR_WRITES RT_BIT(13) 334 /** 14 Intercept RDTSC instruction. */ 335 #define SVM_CTRL1_INTERCEPT_RDTSC RT_BIT(14) 336 /** 15 Intercept RDPMC instruction. */ 337 #define SVM_CTRL1_INTERCEPT_RDPMC RT_BIT(15) 338 /** 16 Intercept PUSHF instruction. */ 339 #define SVM_CTRL1_INTERCEPT_PUSHF RT_BIT(16) 340 /** 17 Intercept POPF instruction. */ 341 #define SVM_CTRL1_INTERCEPT_POPF RT_BIT(17) 342 /** 18 Intercept CPUID instruction. */ 343 #define SVM_CTRL1_INTERCEPT_CPUID RT_BIT(18) 344 /** 19 Intercept RSM instruction. */ 345 #define SVM_CTRL1_INTERCEPT_RSM RT_BIT(19) 346 /** 20 Intercept IRET instruction. */ 347 #define SVM_CTRL1_INTERCEPT_IRET RT_BIT(20) 348 /** 21 Intercept INTn instruction. */ 349 #define SVM_CTRL1_INTERCEPT_INTN RT_BIT(21) 350 /** 22 Intercept INVD instruction. */ 351 #define SVM_CTRL1_INTERCEPT_INVD RT_BIT(22) 352 /** 23 Intercept PAUSE instruction. */ 353 #define SVM_CTRL1_INTERCEPT_PAUSE RT_BIT(23) 354 /** 24 Intercept HLT instruction. */ 355 #define SVM_CTRL1_INTERCEPT_HLT RT_BIT(24) 356 /** 25 Intercept INVLPG instruction. */ 357 #define SVM_CTRL1_INTERCEPT_INVLPG RT_BIT(25) 358 /** 26 Intercept INVLPGA instruction. */ 359 #define SVM_CTRL1_INTERCEPT_INVLPGA RT_BIT(26) 360 /** 27 IOIO_PROT Intercept IN/OUT accesses to selected ports. */ 361 #define SVM_CTRL1_INTERCEPT_INOUT_BITMAP RT_BIT(27) 362 /** 28 MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */ 363 #define SVM_CTRL1_INTERCEPT_MSR_SHADOW RT_BIT(28) 364 /** 29 Intercept task switches. */ 365 #define SVM_CTRL1_INTERCEPT_TASK_SWITCH RT_BIT(29) 366 /** 30 FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */ 367 #define SVM_CTRL1_INTERCEPT_FERR_FREEZE RT_BIT(30) 368 /** 31 Intercept shutdown events. */ 369 #define SVM_CTRL1_INTERCEPT_SHUTDOWN RT_BIT(31) 370 /** @} */ 371 372 373 /** @name SVMVMCB.ctrl.u32InterceptCtrl2 374 * @{ 375 */ 376 /** 0 Intercept VMRUN instruction. */ 377 #define SVM_CTRL2_INTERCEPT_VMRUN RT_BIT(0) 378 /** 1 Intercept VMMCALL instruction. */ 379 #define SVM_CTRL2_INTERCEPT_VMMCALL RT_BIT(1) 380 /** 2 Intercept VMLOAD instruction. */ 381 #define SVM_CTRL2_INTERCEPT_VMLOAD RT_BIT(2) 382 /** 3 Intercept VMSAVE instruction. */ 383 #define SVM_CTRL2_INTERCEPT_VMSAVE RT_BIT(3) 384 /** 4 Intercept STGI instruction. */ 385 #define SVM_CTRL2_INTERCEPT_STGI RT_BIT(4) 386 /** 5 Intercept CLGI instruction. */ 387 #define SVM_CTRL2_INTERCEPT_CLGI RT_BIT(5) 388 /** 6 Intercept SKINIT instruction. */ 389 #define SVM_CTRL2_INTERCEPT_SKINIT RT_BIT(6) 390 /** 7 Intercept RDTSCP instruction. */ 391 #define SVM_CTRL2_INTERCEPT_RDTSCP RT_BIT(7) 392 /** 8 Intercept ICEBP instruction. */ 393 #define SVM_CTRL2_INTERCEPT_ICEBP RT_BIT(8) 394 /** 9 Intercept WBINVD instruction. */ 395 #define SVM_CTRL2_INTERCEPT_WBINVD RT_BIT(9) 396 /** 10 Intercept MONITOR instruction. */ 397 #define SVM_CTRL2_INTERCEPT_MONITOR RT_BIT(10) 398 /** 11 Intercept MWAIT instruction unconditionally. */ 399 #define SVM_CTRL2_INTERCEPT_MWAIT RT_BIT(11) 400 /** 12 Intercept MWAIT instruction when armed. */ 401 #define SVM_CTRL2_INTERCEPT_MWAIT_ARMED RT_BIT(12) 402 /** 13 Intercept XSETBV instruction. */ 403 #define SVM_CTRL2_INTERCEPT_XSETBV RT_BIT(13) 303 304 /** @name SVMVMCB.ctrl.u64InterceptCtrl 305 * @{ 306 */ 307 /** Intercept INTR (physical maskable interrupt). */ 308 #define SVM_CTRL_INTERCEPT_INTR RT_BIT_64(0) 309 /** Intercept NMI. */ 310 #define SVM_CTRL_INTERCEPT_NMI RT_BIT_64(1) 311 /** Intercept SMI. */ 312 #define SVM_CTRL_INTERCEPT_SMI RT_BIT_64(2) 313 /** Intercept INIT. */ 314 #define SVM_CTRL_INTERCEPT_INIT RT_BIT_64(3) 315 /** Intercept VINTR (virtual maskable interrupt). */ 316 #define SVM_CTRL_INTERCEPT_VINTR RT_BIT_64(4) 317 /** Intercept CR0 writes that change bits other than CR0.TS or CR0.MP */ 318 #define SVM_CTRL_INTERCEPT_CR0 RT_BIT_64(5) 319 /** Intercept reads of IDTR. */ 320 #define SVM_CTRL_INTERCEPT_IDTR_READS RT_BIT_64(6) 321 /** Intercept reads of GDTR. */ 322 #define SVM_CTRL_INTERCEPT_GDTR_READS RT_BIT_64(7) 323 /** Intercept reads of LDTR. */ 324 #define SVM_CTRL_INTERCEPT_LDTR_READS RT_BIT_64(8) 325 /** Intercept reads of TR. */ 326 #define SVM_CTRL_INTERCEPT_TR_READS RT_BIT_64(9) 327 /** Intercept writes of IDTR. */ 328 #define SVM_CTRL_INTERCEPT_IDTR_WRITES RT_BIT_64(10) 329 /** Intercept writes of GDTR. */ 330 #define SVM_CTRL_INTERCEPT_GDTR_WRITES RT_BIT_64(11) 331 /** Intercept writes of LDTR. */ 332 #define SVM_CTRL_INTERCEPT_LDTR_WRITES RT_BIT_64(12) 333 /** Intercept writes of TR. */ 334 #define SVM_CTRL_INTERCEPT_TR_WRITES RT_BIT_64(13) 335 /** Intercept RDTSC instruction. */ 336 #define SVM_CTRL_INTERCEPT_RDTSC RT_BIT_64(14) 337 /** Intercept RDPMC instruction. */ 338 #define SVM_CTRL_INTERCEPT_RDPMC RT_BIT_64(15) 339 /** Intercept PUSHF instruction. */ 340 #define SVM_CTRL_INTERCEPT_PUSHF RT_BIT_64(16) 341 /** Intercept POPF instruction. */ 342 #define SVM_CTRL_INTERCEPT_POPF RT_BIT_64(17) 343 /** Intercept CPUID instruction. */ 344 #define SVM_CTRL_INTERCEPT_CPUID RT_BIT_64(18) 345 /** Intercept RSM instruction. */ 346 #define SVM_CTRL_INTERCEPT_RSM RT_BIT_64(19) 347 /** Intercept IRET instruction. */ 348 #define SVM_CTRL_INTERCEPT_IRET RT_BIT_64(20) 349 /** Intercept INTn instruction. */ 350 #define SVM_CTRL_INTERCEPT_INTN RT_BIT_64(21) 351 /** Intercept INVD instruction. */ 352 #define SVM_CTRL_INTERCEPT_INVD RT_BIT_64(22) 353 /** Intercept PAUSE instruction. */ 354 #define SVM_CTRL_INTERCEPT_PAUSE RT_BIT_64(23) 355 /** Intercept HLT instruction. */ 356 #define SVM_CTRL_INTERCEPT_HLT RT_BIT_64(24) 357 /** Intercept INVLPG instruction. */ 358 #define SVM_CTRL_INTERCEPT_INVLPG RT_BIT_64(25) 359 /** Intercept INVLPGA instruction. */ 360 #define SVM_CTRL_INTERCEPT_INVLPGA RT_BIT_64(26) 361 /** IOIO_PROT Intercept IN/OUT accesses to selected ports. */ 362 #define SVM_CTRL_INTERCEPT_INOUT_BITMAP RT_BIT_64(27) 363 /** MSR_PROT Intercept RDMSR or WRMSR accesses to selected MSRs. */ 364 #define SVM_CTRL_INTERCEPT_MSR_SHADOW RT_BIT_64(28) 365 /** Intercept task switches. */ 366 #define SVM_CTRL_INTERCEPT_TASK_SWITCH RT_BIT_64(29) 367 /** FERR_FREEZE: intercept processor "freezing" during legacy FERR handling. */ 368 #define SVM_CTRL_INTERCEPT_FERR_FREEZE RT_BIT_64(30) 369 /** Intercept shutdown events. */ 370 #define SVM_CTRL_INTERCEPT_SHUTDOWN RT_BIT_64(31) 371 /** Intercept VMRUN instruction. */ 372 #define SVM_CTRL_INTERCEPT_VMRUN RT_BIT_64(32 + 0) 373 /** Intercept VMMCALL instruction. */ 374 #define SVM_CTRL_INTERCEPT_VMMCALL RT_BIT_64(32 + 1) 375 /** Intercept VMLOAD instruction. */ 376 #define SVM_CTRL_INTERCEPT_VMLOAD RT_BIT_64(32 + 2) 377 /** Intercept VMSAVE instruction. */ 378 #define SVM_CTRL_INTERCEPT_VMSAVE RT_BIT_64(32 + 3) 379 /** Intercept STGI instruction. */ 380 #define SVM_CTRL_INTERCEPT_STGI RT_BIT_64(32 + 4) 381 /** Intercept CLGI instruction. */ 382 #define SVM_CTRL_INTERCEPT_CLGI RT_BIT_64(32 + 5) 383 /** Intercept SKINIT instruction. */ 384 #define SVM_CTRL_INTERCEPT_SKINIT RT_BIT_64(32 + 6) 385 /** Intercept RDTSCP instruction. */ 386 #define SVM_CTRL_INTERCEPT_RDTSCP RT_BIT_64(32 + 7) 387 /** Intercept ICEBP instruction. */ 388 #define SVM_CTRL_INTERCEPT_ICEBP RT_BIT_64(32 + 8) 389 /** Intercept WBINVD instruction. */ 390 #define SVM_CTRL_INTERCEPT_WBINVD RT_BIT_64(32 + 9) 391 /** Intercept MONITOR instruction. */ 392 #define SVM_CTRL_INTERCEPT_MONITOR RT_BIT_64(32 + 10) 393 /** Intercept MWAIT instruction unconditionally. */ 394 #define SVM_CTRL_INTERCEPT_MWAIT RT_BIT_64(32 + 11) 395 /** Intercept MWAIT instruction when armed. */ 396 #define SVM_CTRL_INTERCEPT_MWAIT_ARMED RT_BIT_64(32 + 12) 397 /** Intercept XSETBV instruction. */ 398 #define SVM_CTRL_INTERCEPT_XSETBV RT_BIT_64(32 + 13) 399 /* Bit 14 - Reserved, SBZ. */ 400 /** Intercept EFER writes after guest instruction finishes. */ 401 #define SVM_CTRL_INTERCEPT_EFER_WRITES_TRAP RT_BIT_64(32 + 15) 402 /** Intercept CR0 writes after guest instruction finishes. */ 403 #define SVM_CTRL_INTERCEPT_CR0_WRITES_TRAP RT_BIT_64(32 + 16) 404 /** Intercept CR0 writes after guest instruction finishes. */ 405 #define SVM_CTRL_INTERCEPT_CR1_WRITES_TRAP RT_BIT_64(32 + 17) 406 /** Intercept CR0 writes after guest instruction finishes. */ 407 #define SVM_CTRL_INTERCEPT_CR2_WRITES_TRAP RT_BIT_64(32 + 18) 408 /** Intercept CR0 writes after guest instruction finishes. */ 409 #define SVM_CTRL_INTERCEPT_CR3_WRITES_TRAP RT_BIT_64(32 + 19) 410 /** Intercept CR0 writes after guest instruction finishes. */ 411 #define SVM_CTRL_INTERCEPT_CR4_WRITES_TRAP RT_BIT_64(32 + 20) 412 /** Intercept CR0 writes after guest instruction finishes. */ 413 #define SVM_CTRL_INTERCEPT_CR5_WRITES_TRAP RT_BIT_64(32 + 21) 414 /** Intercept CR0 writes after guest instruction finishes. */ 415 #define SVM_CTRL_INTERCEPT_CR6_WRITES_TRAP RT_BIT_64(32 + 22) 416 /** Intercept CR0 writes after guest instruction finishes. */ 417 #define SVM_CTRL_INTERCEPT_CR7_WRITES_TRAP RT_BIT_64(32 + 23) 418 /** Intercept CR0 writes after guest instruction finishes. */ 419 #define SVM_CTRL_INTERCEPT_CR8_WRITES_TRAP RT_BIT_64(32 + 24) 420 /** Intercept CR0 writes after guest instruction finishes. */ 421 #define SVM_CTRL_INTERCEPT_CR9_WRITES_TRAP RT_BIT_64(32 + 25) 422 /** Intercept CR0 writes after guest instruction finishes. */ 423 #define SVM_CTRL_INTERCEPT_CR10_WRITES_TRAP RT_BIT_64(32 + 26) 424 /** Intercept CR0 writes after guest instruction finishes. */ 425 #define SVM_CTRL_INTERCEPT_CR11_WRITES_TRAP RT_BIT_64(32 + 27) 426 /** Intercept CR0 writes after guest instruction finishes. */ 427 #define SVM_CTRL_INTERCEPT_CR12_WRITES_TRAP RT_BIT_64(32 + 28) 428 /** Intercept CR0 writes after guest instruction finishes. */ 429 #define SVM_CTRL_INTERCEPT_CR13_WRITES_TRAP RT_BIT_64(32 + 29) 430 /** Intercept CR0 writes after guest instruction finishes. */ 431 #define SVM_CTRL_INTERCEPT_CR14_WRITES_TRAP RT_BIT_64(32 + 30) 432 /** Intercept CR0 writes after guest instruction finishes. */ 433 #define SVM_CTRL_INTERCEPT_CR15_WRITES_TRAP RT_BIT_64(32 + 31) 404 434 /** @} */ 405 435 … … 603 633 * SVM VM Control Block. (VMCB) 604 634 */ 635 #pragma pack(1) 605 636 typedef struct SVMVMCB 606 637 { … … 608 639 struct 609 640 { 610 /** Offset 0x00 - Intercept reads of CR0- 15. */641 /** Offset 0x00 - Intercept reads of CR0-CR15. */ 611 642 uint16_t u16InterceptRdCRx; 612 /** Offset 0x02 - Intercept writes to CR0- 15. */643 /** Offset 0x02 - Intercept writes to CR0-CR15. */ 613 644 uint16_t u16InterceptWrCRx; 614 /** Offset 0x04 - Intercept reads of DR0- 15. */645 /** Offset 0x04 - Intercept reads of DR0-DR15. */ 615 646 uint16_t u16InterceptRdDRx; 616 /** Offset 0x06 - Intercept writes to DR0- 15. */647 /** Offset 0x06 - Intercept writes to DR0-DR15. */ 617 648 uint16_t u16InterceptWrDRx; 618 649 /** Offset 0x08 - Intercept exception vectors 0-31. */ 619 650 uint32_t u32InterceptException; 620 /** Offset 0x0C - Intercept control field 1. */ 621 uint32_t u32InterceptCtrl1; 622 /** Offset 0x10 - Intercept control field 2. */ 623 uint32_t u32InterceptCtrl2; 651 /** Offset 0x0C - Intercept control. */ 652 uint64_t u64InterceptCtrl; 624 653 /** Offset 0x14-0x3F - Reserved. */ 625 654 uint8_t u8Reserved[0x3c - 0x14]; … … 773 802 uint8_t u8Reserved10[0x1000-0x698]; 774 803 } SVMVMCB; 804 #pragma pack() 775 805 /** Pointer to the SVMVMCB structure. */ 776 806 typedef SVMVMCB *PSVMVMCB; … … 781 811 AssertCompileMemberOffset(SVMVMCB, ctrl.u16InterceptWrDRx, 0x06); 782 812 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptException, 0x08); 783 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl1, 0x0C); 784 AssertCompileMemberOffset(SVMVMCB, ctrl.u32InterceptCtrl2, 0x10); 813 AssertCompileMemberOffset(SVMVMCB, ctrl.u64InterceptCtrl, 0x0C); 785 814 AssertCompileMemberOffset(SVMVMCB, ctrl.u8Reserved, 0x14); 786 815 AssertCompileMemberOffset(SVMVMCB, ctrl.u16PauseFilterThreshold, 0x3c); -
trunk/include/VBox/vmm/iem.h
r65587 r65904 132 132 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue); 133 133 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr); 134 #ifdef VBOX_WITH_NESTED_HWVIRT 135 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr); 136 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr); 137 #endif 134 138 /** @} */ 135 139 -
trunk/include/iprt/x86.h
r65776 r65904 1459 1459 * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" */ 1460 1460 #define MSR_K8_INT_PENDING UINT32_C(0xc0010055) 1461 1462 /** SVM Control. */ 1461 1463 #define MSR_K8_VM_CR UINT32_C(0xc0010114) 1464 /** Disables HDT (Hardware Debug Tool) and certain internal debug 1465 * features. */ 1466 #define MSR_K8_VM_CR_DPD RT_BIT_32(0) 1467 /** If set, non-intercepted INIT signals are converted to \#SX 1468 * exceptions. */ 1469 #define MSR_K8_VM_CR_R_INIT RT_BIT_32(1) 1470 /** Disables A20 masking. */ 1471 #define MSR_K8_VM_CR_DIS_A20M RT_BIT_32(2) 1472 /** Lock bit for this MSR controlling bits 3 (LOCK) and 4 (SVMDIS). */ 1473 #define MSR_K8_VM_CR_LOCK RT_BIT_32(3) 1474 /** SVM disable. When set, writes to EFER.SVME are treated as MBZ. When 1475 * clear, EFER.SVME can be written normally. */ 1462 1476 #define MSR_K8_VM_CR_SVM_DISABLE RT_BIT_32(4) 1463 1477 -
trunk/src/VBox/VMM/Makefile.kmk
r65871 r65904 48 48 ifdef VBOX_WITH_3RD_IEM_STEP 49 49 VMM_COMMON_DEFS += VBOX_WITH_3RD_IEM_STEP 50 endif 51 ifdef VBOX_WITH_NESTED_HWVIRT 52 VMM_COMMON_DEFS += VBOX_WITH_NESTED_HWVIRT 50 53 endif 51 54 #ifdef VBOX_WITH_IEM -
trunk/src/VBox/VMM/VMMAll/CPUMAllMsrs.cpp
r64655 r65904 1453 1453 if (fExtFeatures & X86_CPUID_AMD_FEATURE_EDX_FFXSR) 1454 1454 fMask |= MSR_K6_EFER_FFXSR; 1455 if (pVM->cpum.s.GuestFeatures.fSvm) 1456 fMask |= MSR_K6_EFER_SVME; 1455 1457 1456 1458 /* #GP(0) If anything outside the allowed bits is set. */ … … 1471 1473 1472 1474 /* There are a few more: e.g. MSR_K6_EFER_LMSLE */ 1473 AssertMsg(!(uValue & ~(MSR_K6_EFER_NXE | MSR_K6_EFER_LME | MSR_K6_EFER_LMA /* ignored anyway */ | MSR_K6_EFER_SCE | MSR_K6_EFER_FFXSR)), 1474 ("Unexpected value %RX64\n", uValue)); 1475 AssertMsg(!(uValue & ~( MSR_K6_EFER_NXE 1476 | MSR_K6_EFER_LME 1477 | MSR_K6_EFER_LMA /* ignored anyway */ 1478 | MSR_K6_EFER_SCE 1479 | MSR_K6_EFER_FFXSR 1480 | MSR_K6_EFER_SVME)), 1481 ("Unexpected value %#RX64\n", uValue)); 1475 1482 pVCpu->cpum.s.Guest.msrEFER = (uOldEfer & ~fMask) | (uValue & fMask); 1476 1483 … … 3743 3750 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrRd_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t *puValue) 3744 3751 { 3745 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 3746 /** @todo AMD SVM. */ 3747 *puValue = 0; 3752 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 3753 PVM pVM = pVCpu->CTX_SUFF(pVM); 3754 if (pVM->cpum.s.GuestFeatures.fSvm) 3755 *puValue = MSR_K8_VM_CR_LOCK; 3756 else 3757 *puValue = 0; 3748 3758 return VINF_SUCCESS; 3749 3759 } … … 3753 3763 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmCr(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) 3754 3764 { 3755 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue); 3756 /** @todo AMD SVM. */ 3757 return VINF_SUCCESS; 3765 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 3766 PVM pVM = pVCpu->CTX_SUFF(pVM); 3767 if (pVM->cpum.s.GuestFeatures.fSvm) 3768 { 3769 /* Silently ignore writes to LOCK and SVM_DISABLE bit when the LOCK bit is set (see cpumMsrRd_AmdK8VmCr). */ 3770 if (uValue & (MSR_K8_VM_CR_DPD | MSR_K8_VM_CR_R_INIT | MSR_K8_VM_CR_DIS_A20M)) 3771 return VERR_CPUM_RAISE_GP_0; 3772 return VINF_SUCCESS; 3773 } 3774 return VERR_CPUM_RAISE_GP_0; 3758 3775 } 3759 3776 … … 3801 3818 { 3802 3819 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); 3803 /** @todo AMD SVM. */ 3804 *puValue = 0; 3820 *puValue = pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa; 3805 3821 return VINF_SUCCESS; 3806 3822 } … … 3810 3826 static DECLCALLBACK(VBOXSTRICTRC) cpumMsrWr_AmdK8VmHSavePa(PVMCPU pVCpu, uint32_t idMsr, PCCPUMMSRRANGE pRange, uint64_t uValue, uint64_t uRawValue) 3811 3827 { 3812 RT_NOREF_PV(pVCpu); RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uValue); RT_NOREF_PV(uRawValue); 3813 /** @todo AMD SVM. */ 3828 RT_NOREF_PV(idMsr); RT_NOREF_PV(pRange); RT_NOREF_PV(uRawValue); 3829 if (uValue & UINT64_C(0xfff)) 3830 { 3831 Log(("CPUM: Invalid setting of low 12 bits set writing host-state save area MSR %#x: %#llx\n", idMsr, uValue)); 3832 return VERR_CPUM_RAISE_GP_0; 3833 } 3834 3835 uint64_t fInvPhysMask = ~(RT_BIT_64(pVCpu->CTX_SUFF(pVM)->cpum.s.GuestFeatures.cMaxPhysAddrWidth) - 1U); 3836 if (fInvPhysMask & uValue) 3837 { 3838 Log(("CPUM: Invalid physical address bits set writing host-state save area MSR %#x: %#llx (%#llx)\n", 3839 idMsr, uValue, uValue & fInvPhysMask)); 3840 return VERR_CPUM_RAISE_GP_0; 3841 } 3842 3843 pVCpu->cpum.s.Guest.hwvirt.svm.uMsrHSavePa = uValue; 3814 3844 return VINF_SUCCESS; 3815 3845 } -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r62478 r65904 557 557 } 558 558 559 560 561 /** 562 * SVM nested-guest #VMEXIT handler. 563 * 564 * @param pVCpu The cross context virtual CPU structure. 565 * @param uExitCode The exit reason. 566 */ 567 VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode) 568 { 569 RT_NOREF2(pVCpu, uExitCode); 570 } 571 572 573 /** 574 * VMX nested-guest VM-exit handler. 575 * 576 * @param pVCpu The cross context virtual CPU structure. 577 * @param uBasicExitReason The basic exit reason. 578 */ 579 VMM_INT_DECL(void) HMNstGstVmxVmExit(PVMCPU pVCpu, uint16_t uBasicExitReason) 580 { 581 RT_NOREF2(pVCpu, uBasicExitReason); 582 } 583 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r65881 r65904 102 102 #include <VBox/vmm/em.h> 103 103 #include <VBox/vmm/hm.h> 104 #ifdef VBOX_WITH_NESTED_HWVIRT 105 # include <VBox/vmm/hm_svm.h> 106 #endif 104 107 #include <VBox/vmm/tm.h> 105 108 #include <VBox/vmm/dbgf.h> … … 362 365 # define IEM_USE_UNALIGNED_DATA_ACCESS 363 366 #endif 367 368 #ifdef VBOX_WITH_NESTED_HWVIRT 369 /** 370 * Check if an SVM control/instruction intercept is set. 371 */ 372 #define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept))) 373 374 /** 375 * Check if an SVM read CRx intercept is set. 376 */ 377 #define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr))) 378 379 /** 380 * Check if an SVM write CRx intercept is set. 381 */ 382 #define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr))) 383 384 /** 385 * Check if an SVM read DRx intercept is set. 386 */ 387 #define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr))) 388 389 /** 390 * Check if an SVM write DRx intercept is set. 391 */ 392 #define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr))) 393 394 /** 395 * Check if an SVM exception intercept is set. 396 */ 397 #define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_enmXcpt) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_enmXcpt))) 398 #endif /* VBOX_WITH_NESTED_HWVIRT */ 364 399 365 400 … … 14876 14911 } 14877 14912 14913 14914 #ifdef VBOX_WITH_NESTED_HWVIRT 14915 /** 14916 * Interface for HM and EM to emulate the STGI instruction. 14917 * 14918 * @returns Strict VBox status code. 14919 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 14920 * @param cbInstr The instruction length in bytes. 14921 * @thread EMT(pVCpu) 14922 */ 14923 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr) 14924 { 14925 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 14926 14927 iemInitExec(pVCpu, false /*fBypassHandlers*/); 14928 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi); 14929 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14930 } 14931 14932 14933 /** 14934 * Interface for HM and EM to emulate the STGI instruction. 14935 * 14936 * @returns Strict VBox status code. 14937 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 14938 * @param cbInstr The instruction length in bytes. 14939 * @thread EMT(pVCpu) 14940 */ 14941 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr) 14942 { 14943 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 14944 14945 iemInitExec(pVCpu, false /*fBypassHandlers*/); 14946 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi); 14947 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 14948 } 14949 #endif /* VBOX_WITH_NESTED_HWVIRT */ 14950 14878 14951 #ifdef IN_RING3 14879 14952 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r65612 r65904 5875 5875 5876 5876 5877 #ifdef VBOX_WITH_NESTED_HWVIRT 5878 /** 5879 * Implements 'CLGI'. 5880 */ 5881 IEM_CIMPL_DEF_0(iemCImpl_clgi) 5882 { 5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm) 5885 { 5886 Log2(("clgi: Not in CPUID -> #UD\n")); 5887 return iemRaiseUndefinedOpcode(pVCpu); 5888 } 5889 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5890 { 5891 Log2(("clgi: EFER.SVME not enabled -> #UD\n")); 5892 return iemRaiseUndefinedOpcode(pVCpu); 5893 } 5894 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5895 { 5896 Log2(("clgi: Real or v8086 mode -> #UD\n")); 5897 return iemRaiseUndefinedOpcode(pVCpu); 5898 } 5899 if (pVCpu->iem.s.uCpl != 0) 5900 { 5901 Log2(("clgi: CPL != 0 -> #GP(0)\n")); 5902 return iemRaiseGeneralProtectionFault0(pVCpu); 5903 } 5904 #ifndef IN_RC 5905 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 5906 { 5907 Log2(("clgi: Guest intercept -> VMexit\n")); 5908 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI); 5909 return VINF_EM_RESCHEDULE; 5910 } 5911 #endif 5912 5913 pCtx->hwvirt.svm.fGif = 0; 5914 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5915 return VINF_SUCCESS; 5916 } 5917 5918 5919 /** 5920 * Implements 'STGI'. 5921 */ 5922 IEM_CIMPL_DEF_0(iemCImpl_stgi) 5923 { 5924 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5925 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvm) 5926 { 5927 Log2(("stgi: Not in CPUID -> #UD\n")); 5928 return iemRaiseUndefinedOpcode(pVCpu); 5929 } 5930 if (!(pCtx->msrEFER & MSR_K6_EFER_SVME)) 5931 { 5932 Log2(("stgi: EFER.SVME not enabled -> #UD\n")); 5933 return iemRaiseUndefinedOpcode(pVCpu); 5934 } 5935 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) 5936 { 5937 Log2(("stgi: Real or v8086 mode -> #UD\n")); 5938 return iemRaiseUndefinedOpcode(pVCpu); 5939 } 5940 if (pVCpu->iem.s.uCpl != 0) 5941 { 5942 Log2(("stgi: CPL != 0 -> #GP(0)\n")); 5943 return iemRaiseGeneralProtectionFault0(pVCpu); 5944 } 5945 #ifndef IN_RC 5946 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) 5947 { 5948 Log2(("stgi: Guest intercept -> VMexit\n")); 5949 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI); 5950 return VINF_EM_RESCHEDULE; 5951 } 5952 #endif 5953 5954 pCtx->hwvirt.svm.fGif = 1; 5955 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 5956 return VINF_SUCCESS; 5957 } 5958 #endif /* VBOX_WITH_NESTED_HWVIRT */ 5959 5877 5960 /** 5878 5961 * Implements 'CLI'. -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r65877 r65904 451 451 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave); 452 452 453 #ifdef VBOX_WITH_NESTED_HWVIRT 454 /** Opcode 0x0f 0x01 0xdc. */ 455 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi) 456 { 457 IEMOP_MNEMONIC(stgi, "stgi"); 458 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi); 459 } 460 461 /** Opcode 0x0f 0x01 0xdd. */ 462 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi) 463 { 464 IEMOP_MNEMONIC(clgi, "clgi"); 465 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi); 466 } 467 #else 453 468 /** Opcode 0x0f 0x01 0xdc. */ 454 469 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi); … … 456 471 /** Opcode 0x0f 0x01 0xdd. */ 457 472 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi); 473 #endif /* VBOX_WITH_NESTED_HWVIRT */ 458 474 459 475 /** Opcode 0x0f 0x01 0xde. */ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r65322 r65904 308 308 static FNSVMEXITHANDLER hmR0SvmExitXcptAC; 309 309 static FNSVMEXITHANDLER hmR0SvmExitXcptBP; 310 #ifdef VBOX_WITH_NESTED_HWVIRT 311 static FNSVMEXITHANDLER hmR0SvmExitClgi; 312 static FNSVMEXITHANDLER hmR0SvmExitStgi; 313 #endif 310 314 /** @} */ 311 315 … … 722 726 723 727 /* Set up unconditional intercepts and conditions. */ 724 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a #VMEXIT. */ 725 | SVM_CTRL1_INTERCEPT_NMI /* Non-maskable interrupts causes a #VMEXIT. */ 726 | SVM_CTRL1_INTERCEPT_INIT /* INIT signal causes a #VMEXIT. */ 727 | SVM_CTRL1_INTERCEPT_RDPMC /* RDPMC causes a #VMEXIT. */ 728 | SVM_CTRL1_INTERCEPT_CPUID /* CPUID causes a #VMEXIT. */ 729 | SVM_CTRL1_INTERCEPT_RSM /* RSM causes a #VMEXIT. */ 730 | SVM_CTRL1_INTERCEPT_HLT /* HLT causes a #VMEXIT. */ 731 | SVM_CTRL1_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */ 732 | SVM_CTRL1_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a #VMEXIT.*/ 733 | SVM_CTRL1_INTERCEPT_INVLPGA /* INVLPGA causes a #VMEXIT. */ 734 | SVM_CTRL1_INTERCEPT_SHUTDOWN /* Shutdown events causes a #VMEXIT. */ 735 | SVM_CTRL1_INTERCEPT_FERR_FREEZE; /* Intercept "freezing" during legacy FPU handling. */ 736 737 pVmcb->ctrl.u32InterceptCtrl2 = SVM_CTRL2_INTERCEPT_VMRUN /* VMRUN causes a #VMEXIT. */ 738 | SVM_CTRL2_INTERCEPT_VMMCALL /* VMMCALL causes a #VMEXIT. */ 739 | SVM_CTRL2_INTERCEPT_VMLOAD /* VMLOAD causes a #VMEXIT. */ 740 | SVM_CTRL2_INTERCEPT_VMSAVE /* VMSAVE causes a #VMEXIT. */ 741 | SVM_CTRL2_INTERCEPT_STGI /* STGI causes a #VMEXIT. */ 742 | SVM_CTRL2_INTERCEPT_CLGI /* CLGI causes a #VMEXIT. */ 743 | SVM_CTRL2_INTERCEPT_SKINIT /* SKINIT causes a #VMEXIT. */ 744 | SVM_CTRL2_INTERCEPT_WBINVD /* WBINVD causes a #VMEXIT. */ 745 | SVM_CTRL2_INTERCEPT_MONITOR /* MONITOR causes a #VMEXIT. */ 746 | SVM_CTRL2_INTERCEPT_MWAIT /* MWAIT causes a #VMEXIT. */ 747 | SVM_CTRL2_INTERCEPT_XSETBV; /* XSETBV causes a #VMEXIT. */ 728 pVmcb->ctrl.u64InterceptCtrl = SVM_CTRL_INTERCEPT_INTR /* External interrupt causes a #VMEXIT. */ 729 | SVM_CTRL_INTERCEPT_NMI /* Non-maskable interrupts causes a #VMEXIT. */ 730 | SVM_CTRL_INTERCEPT_INIT /* INIT signal causes a #VMEXIT. */ 731 | SVM_CTRL_INTERCEPT_RDPMC /* RDPMC causes a #VMEXIT. */ 732 | SVM_CTRL_INTERCEPT_CPUID /* CPUID causes a #VMEXIT. */ 733 | SVM_CTRL_INTERCEPT_RSM /* RSM causes a #VMEXIT. */ 734 | SVM_CTRL_INTERCEPT_HLT /* HLT causes a #VMEXIT. */ 735 | SVM_CTRL_INTERCEPT_INOUT_BITMAP /* Use the IOPM to cause IOIO #VMEXITs. */ 736 | SVM_CTRL_INTERCEPT_MSR_SHADOW /* MSR access not covered by MSRPM causes a #VMEXIT.*/ 737 | SVM_CTRL_INTERCEPT_INVLPGA /* INVLPGA causes a #VMEXIT. */ 738 | SVM_CTRL_INTERCEPT_SHUTDOWN /* Shutdown events causes a #VMEXIT. */ 739 | SVM_CTRL_INTERCEPT_FERR_FREEZE /* Intercept "freezing" during legacy FPU handling. */ 740 | SVM_CTRL_INTERCEPT_VMRUN /* VMRUN causes a #VMEXIT. */ 741 | SVM_CTRL_INTERCEPT_VMMCALL /* VMMCALL causes a #VMEXIT. */ 742 | SVM_CTRL_INTERCEPT_VMLOAD /* VMLOAD causes a #VMEXIT. */ 743 | SVM_CTRL_INTERCEPT_VMSAVE /* VMSAVE causes a #VMEXIT. */ 744 | SVM_CTRL_INTERCEPT_STGI /* STGI causes a #VMEXIT. */ 745 | SVM_CTRL_INTERCEPT_CLGI /* CLGI causes a #VMEXIT. */ 746 | SVM_CTRL_INTERCEPT_SKINIT /* SKINIT causes a #VMEXIT. */ 747 | SVM_CTRL_INTERCEPT_WBINVD /* WBINVD causes a #VMEXIT. */ 748 | SVM_CTRL_INTERCEPT_MONITOR /* MONITOR causes a #VMEXIT. */ 749 | SVM_CTRL_INTERCEPT_MWAIT /* MWAIT causes a #VMEXIT. */ 750 | SVM_CTRL_INTERCEPT_XSETBV; /* XSETBV causes a #VMEXIT. */ 748 751 749 752 /* CR0, CR4 reads must be intercepted, our shadow values are not necessarily the same as the guest's. */ … … 795 798 796 799 /* Intercept INVLPG and task switches (may change CR3, EFLAGS, LDT). */ 797 pVmcb->ctrl.u 32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_INVLPG798 | SVM_CTRL1_INTERCEPT_TASK_SWITCH;800 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_INVLPG 801 | SVM_CTRL_INTERCEPT_TASK_SWITCH; 799 802 800 803 /* Page faults must be intercepted to implement shadow paging. */ … … 803 806 804 807 #ifdef HMSVM_ALWAYS_TRAP_TASK_SWITCH 805 pVmcb->ctrl.u 32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_TASK_SWITCH;808 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_TASK_SWITCH; 806 809 #endif 807 810 … … 2326 2329 if (fCanUseRealTsc) 2327 2330 { 2328 pVmcb->ctrl.u 32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC;2329 pVmcb->ctrl.u 32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP;2331 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSC; 2332 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_RDTSCP; 2330 2333 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 2331 2334 } 2332 2335 else 2333 2336 { 2334 pVmcb->ctrl.u 32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC;2335 pVmcb->ctrl.u 32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP;2337 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSC; 2338 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_RDTSCP; 2336 2339 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 2337 2340 } … … 2575 2578 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb) 2576 2579 { 2577 if (!(pVmcb->ctrl.u 32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))2580 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR)) 2578 2581 { 2579 2582 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */ 2580 2583 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */ 2581 pVmcb->ctrl.u 32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;2584 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_VINTR; 2582 2585 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 2583 2586 2584 2587 Log4(("Setting VINTR intercept\n")); 2588 } 2589 } 2590 2591 2592 /** 2593 * Clears the virtual interrupt intercept control in the VMCB as 2594 * we are figured the guest is unable process any interrupts 2595 * at this point of time. 2596 * 2597 * @param pVmcb Pointer to the VM control block. 2598 */ 2599 DECLINLINE(void) hmR0SvmClearVirtIntrIntercept(PSVMVMCB pVmcb) 2600 { 2601 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 2602 { 2603 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 2604 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 2605 Log4(("Clearing VINTR intercept\n")); 2585 2606 } 2586 2607 } … … 2596 2617 DECLINLINE(void) hmR0SvmSetIretIntercept(PSVMVMCB pVmcb) 2597 2618 { 2598 if (!(pVmcb->ctrl.u 32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET))2599 { 2600 pVmcb->ctrl.u 32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_IRET;2619 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET)) 2620 { 2621 pVmcb->ctrl.u64InterceptCtrl |= SVM_CTRL_INTERCEPT_IRET; 2601 2622 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 2602 2623 … … 2613 2634 DECLINLINE(void) hmR0SvmClearIretIntercept(PSVMVMCB pVmcb) 2614 2635 { 2615 if (pVmcb->ctrl.u 32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_IRET)2616 { 2617 pVmcb->ctrl.u 32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_IRET;2636 if (pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_IRET) 2637 { 2638 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_IRET; 2618 2639 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS); 2619 2640 … … 2780 2801 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx)); 2781 2802 Log4(("ctrl.u32InterceptException %#x\n", pVmcb->ctrl.u32InterceptException)); 2782 Log4(("ctrl.u32InterceptCtrl1 %#x\n", pVmcb->ctrl.u32InterceptCtrl1)); 2783 Log4(("ctrl.u32InterceptCtrl2 %#x\n", pVmcb->ctrl.u32InterceptCtrl2)); 2803 Log4(("ctrl.u64InterceptCtrl %#RX64\n", pVmcb->ctrl.u64InterceptCtrl)); 2784 2804 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr)); 2785 2805 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr)); … … 3189 3209 */ 3190 3210 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 3191 && !(pVmcb->ctrl.u 32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP))3211 && !(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSCP)) 3192 3212 { 3193 3213 hmR0SvmSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); … … 3263 3283 3264 3284 /* TSC read must be done early for maximum accuracy. */ 3265 if (!(pVmcb->ctrl.u 32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC))3285 if (!(pVmcb->ctrl.u64InterceptCtrl & SVM_CTRL_INTERCEPT_RDTSC)) 3266 3286 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVmcb->ctrl.u64TSCOffset); 3267 3287 … … 3665 3685 } 3666 3686 3687 #ifdef VBOX_WITH_NESTED_HWVIRT 3688 case SVM_EXIT_CLGI: return hmR0SvmExitClgi(pVCpu, pCtx, pSvmTransient); 3689 case SVM_EXIT_STGI: return hmR0SvmExitStgi(pVCpu, pCtx, pSvmTransient); 3690 #else 3691 case SVM_EXIT_CLGI: 3692 case SVM_EXIT_STGI: 3693 #endif 3667 3694 case SVM_EXIT_INVLPGA: 3668 3695 case SVM_EXIT_RSM: … … 3670 3697 case SVM_EXIT_VMLOAD: 3671 3698 case SVM_EXIT_VMSAVE: 3672 case SVM_EXIT_STGI:3673 case SVM_EXIT_CLGI:3674 3699 case SVM_EXIT_SKINIT: 3675 3700 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); … … 5192 5217 5193 5218 /* Indicate that we no longer need to #VMEXIT when the guest is ready to receive interrupts/NMIs, it is now ready. */ 5194 pVmcb->ctrl.u 32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_VINTR;5219 pVmcb->ctrl.u64InterceptCtrl &= ~SVM_CTRL_INTERCEPT_VINTR; 5195 5220 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 5196 5221 … … 5667 5692 } 5668 5693 5694 5695 #ifdef VBOX_WITH_NESTED_HWVIRT 5696 /** 5697 * \#VMEXIT handler for RDPMC (SVM_EXIT_CLGI). Conditional 5698 * \#VMEXIT. 5699 */ 5700 HMSVM_EXIT_DECL hmR0SvmExitClgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5701 { 5702 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5703 PVM pVM = pVCpu->CTX_SUFF(pVM); 5704 if (pVM->cpum.ro.GuestFeatures.fSvm) 5705 { 5706 /** @todo Stat. */ 5707 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */ 5708 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, 3); 5709 return VBOXSTRICTRC_VAL(rcStrict); 5710 } 5711 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); 5712 } 5713 5714 5715 /** 5716 * \#VMEXIT handler for RDPMC (SVM_EXIT_STGI). Conditional 5717 * \#VMEXIT. 5718 */ 5719 HMSVM_EXIT_DECL hmR0SvmExitStgi(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5720 { 5721 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5722 PVM pVM = pVCpu->CTX_SUFF(pVM); 5723 if (pVM->cpum.ro.GuestFeatures.fSvm) 5724 { 5725 /** @todo Stat. */ 5726 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitStgi); */ 5727 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, 3); 5728 return VBOXSTRICTRC_VAL(rcStrict); 5729 } 5730 return hmR0SvmExitXcptUD(pVCpu, pCtx, pSvmTransient); 5731 } 5732 #endif /* VBOX_WITH_NESTED_HWVIRT */ 5733 5734 5669 5735 /** @} */ 5670 5736 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r64663 r65904 1151 1151 /* C-state control. Guesses. */ 1152 1152 pVCpu->cpum.s.GuestMsrs.msr.PkgCStateCfgCtrl = 1 /*C1*/ | RT_BIT_32(25) | RT_BIT_32(26) | RT_BIT_32(27) | RT_BIT_32(28); 1153 1154 /* 1155 * Hardware virtualization state. 1156 */ 1157 memset(&pCtx->hwvirt, 0, sizeof(pCtx->hwvirt)); 1158 /* SVM. */ 1159 pCtx->hwvirt.svm.fGif = 1; 1153 1160 } 1154 1161 -
trunk/src/VBox/VMM/VMMR3/CPUMR3CpuId.cpp
r65801 r65904 1679 1679 pFeatures->fAmdMmxExts = RT_BOOL(pExtLeaf->uEdx & X86_CPUID_AMD_FEATURE_EDX_AXMMX); 1680 1680 pFeatures->fXop = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_XOP); 1681 pFeatures->fSvm = RT_BOOL(pExtLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM); 1681 1682 } 1682 1683 … … 2212 2213 CPUMISAEXTCFG enm3dNowPrf; 2213 2214 CPUMISAEXTCFG enmAmdExtMmx; 2215 CPUMISAEXTCFG enmSvm; 2214 2216 2215 2217 uint32_t uMaxStdLeaf; … … 2696 2698 pExtFeatureLeaf->uEcx &= X86_CPUID_EXT_FEATURE_ECX_LAHF_SAHF 2697 2699 //| X86_CPUID_AMD_FEATURE_ECX_CMPL - set below if applicable. 2698 //| X86_CPUID_AMD_FEATURE_ECX_SVM - not virtualized.2700 | (pConfig->enmSvm ? X86_CPUID_AMD_FEATURE_ECX_SVM : 0) 2699 2701 //| X86_CPUID_AMD_FEATURE_ECX_EXT_APIC 2700 2702 /* Note: This could prevent teleporting from AMD to Intel CPUs! */ … … 2737 2739 { 2738 2740 PORTABLE_DISABLE_FEATURE_BIT( 1, pExtFeatureLeaf->uEcx, CR8L, X86_CPUID_AMD_FEATURE_ECX_CR8L); 2741 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SVM, X86_CPUID_AMD_FEATURE_ECX_SVM, pConfig->enmSvm); 2739 2742 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, ABM, X86_CPUID_AMD_FEATURE_ECX_ABM, pConfig->enmAbm); 2740 2743 PORTABLE_DISABLE_FEATURE_BIT_CFG(1, pExtFeatureLeaf->uEcx, SSE4A, X86_CPUID_AMD_FEATURE_ECX_SSE4A, pConfig->enmSse4A); … … 2779 2782 if (pConfig->enmSse4A == CPUMISAEXTCFG_ENABLED_ALWAYS) 2780 2783 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SSE4A; 2784 if (pConfig->enmSvm == CPUMISAEXTCFG_ENABLED_ALWAYS) 2785 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM; 2781 2786 if (pConfig->enmMisAlnSse == CPUMISAEXTCFG_ENABLED_ALWAYS) 2782 2787 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_MISALNSSE; … … 2785 2790 if (pConfig->enmAmdExtMmx == CPUMISAEXTCFG_ENABLED_ALWAYS) 2786 2791 pExtFeatureLeaf->uEdx |= X86_CPUID_AMD_FEATURE_EDX_AXMMX; 2792 if (pConfig->enmSvm == CPUMISAEXTCFG_ENABLED_ALWAYS) 2793 pExtFeatureLeaf->uEcx |= X86_CPUID_AMD_FEATURE_ECX_SVM; 2787 2794 } 2788 2795 pExtFeatureLeaf = NULL; /* Must refetch! */ … … 3348 3355 * ECX - Reserved. 3349 3356 * EDX - SVM Feature identification. 3350 * We clear all as we currently does not virtualize SVM. 3351 */ 3352 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a)); 3357 */ 3358 pExtFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, UINT32_C(0x80000001), 0); 3359 if (pExtFeatureLeaf->uEcx & X86_CPUID_AMD_FEATURE_ECX_SVM) 3360 { 3361 PCPUMCPUIDLEAF pSvmFeatureLeaf = cpumR3CpuIdGetExactLeaf(pCpum, 0x8000000a, 0); 3362 pSvmFeatureLeaf->uEax = 0x1; 3363 pSvmFeatureLeaf->uEbx = 0x8000; 3364 pSvmFeatureLeaf->uEcx = 0; 3365 pSvmFeatureLeaf->uEdx = 0; /** @todo Support SVM features */ 3366 } 3367 else 3368 cpumR3CpuIdZeroLeaf(pCpum, UINT32_C(0x8000000a)); 3353 3369 3354 3370 /* Cpuid 0x8000000b thru 0x80000018: Reserved … … 3722 3738 "|3DNOWPRF" 3723 3739 "|AXMMX" 3740 "|SVM" 3724 3741 , "" /*pszValidNodes*/, "CPUM" /*pszWho*/, 0 /*uInstance*/); 3725 3742 if (RT_FAILURE(rc)) … … 3895 3912 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "AXMMX", &pConfig->enmAmdExtMmx, fNestedPagingAndFullGuestExec); 3896 3913 AssertLogRelRCReturn(rc, rc); 3914 3915 #ifdef VBOX_WITH_NESTED_HWVIRT 3916 /** @cfgm{/CPUM/IsaExts/SVM, isaextcfg, depends} 3917 * Whether to expose the AMD's hardware virtualization (SVM) instructions to the 3918 * guest. For the time being, the default is to only do this for VMs with nested 3919 * paging and AMD-V. 3920 */ 3921 rc = cpumR3CpuIdReadIsaExtCfg(pVM, pIsaExts, "SVM", &pConfig->enmSvm, fNestedPagingAndFullGuestExec); 3922 AssertLogRelRCReturn(rc, rc); 3923 #endif 3897 3924 3898 3925 return VINF_SUCCESS; … … 5832 5859 DBGFREGSUBFIELD_RO("LahfSahf\0" "LAHF/SAHF support in 64-bit mode", 0, 1, 0), 5833 5860 DBGFREGSUBFIELD_RO("CmpLegacy\0" "Core multi-processing legacy mode", 1, 1, 0), 5834 DBGFREGSUBFIELD_RO("SVM\0" "AMD VM extensions",2, 1, 0),5861 DBGFREGSUBFIELD_RO("SVM\0" "AMD Secure Virtual Machine extensions", 2, 1, 0), 5835 5862 DBGFREGSUBFIELD_RO("EXTAPIC\0" "AMD Extended APIC registers", 3, 1, 0), 5836 5863 DBGFREGSUBFIELD_RO("CR8L\0" "AMD LOCK MOV CR0 means MOV CR8", 4, 1, 0), -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r64663 r65904 226 226 .Guest.pXStateRC RTRCPTR_RES 1 227 227 .Guest.aoffXState resw 64 228 228 alignb 8 229 .Guest.hwvirt.svm.uMsrHSavePa resq 1 230 .Guest.hwvirt.svm.u64InterceptCtrl resq 1 231 .Guest.hwvirt.svm.u32InterceptXcpt resd 1 232 .Guest.hwvirt.svm.u16InterceptRdCRx resw 1 233 .Guest.hwvirt.svm.u16InterceptWrCRx resw 1 234 .Guest.hwvirt.svm.u16InterceptRdDRx resw 1 235 .Guest.hwvirt.svm.u16InterceptWrDRx resw 1 236 .Guest.hwvirt.svm.fGif resb 1 229 237 alignb 64 238 230 239 .GuestMsrs resq 0 231 240 .GuestMsrs.au64 resq 64 … … 484 493 .Hyper.pXStateRC RTRCPTR_RES 1 485 494 .Hyper.aoffXState resw 64 495 alignb 8 496 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 497 .Hyper.hwvirt.svm.u64InterceptCtrl resq 1 498 .Hyper.hwvirt.svm.u32InterceptXcpt resd 1 499 .Hyper.hwvirt.svm.u16InterceptRdCRx resw 1 500 .Hyper.hwvirt.svm.u16InterceptWrCRx resw 1 501 .Hyper.hwvirt.svm.u16InterceptRdDRx resw 1 502 .Hyper.hwvirt.svm.u16InterceptWrDRx resw 1 503 .Hyper.hwvirt.svm.fGif resb 1 486 504 alignb 64 487 505 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r65531 r65904 131 131 132 132 GEN_CHECK_SIZE(CPUMCTX); 133 GEN_CHECK_OFF(CPUMCTX, hwvirt); 134 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa); 135 GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fGif); 136 /** @todo add rest of hwvirt fields when code is more 137 * finalized. */ 133 138 GEN_CHECK_OFF(CPUMCTX, pXStateR0); 134 139 GEN_CHECK_OFF(CPUMCTX, pXStateR3); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r64626 r65904 312 312 CHECK_MEMBER_ALIGNMENT(CPUMCTX, gdtr.pGdt, 8); 313 313 CHECK_MEMBER_ALIGNMENT(CPUMCTX, SysEnter, 8); 314 CHECK_MEMBER_ALIGNMENT(CPUMCTX, hwvirt, 8); 314 315 CHECK_CPUMCTXCORE(rax); 315 316 CHECK_CPUMCTXCORE(rcx);
Note:
See TracChangeset
for help on using the changeset viewer.