Changeset 74332 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Sep 18, 2018 6:56:39 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r74209 r74332 410 410 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 411 411 /** 412 * Check the common SVM instruction preconditions.413 */414 # define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \415 do { \416 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \417 { \418 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \419 return iemRaiseUndefinedOpcode(a_pVCpu); \420 } \421 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \422 { \423 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \424 return iemRaiseUndefinedOpcode(a_pVCpu); \425 } \426 if ((a_pVCpu)->iem.s.uCpl != 0) \427 { \428 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \429 return iemRaiseGeneralProtectionFault0(a_pVCpu); \430 } \431 } while (0)432 433 /**434 * Updates the NextRIP (NRI) field in the nested-guest VMCB.435 */436 # define IEM_SVM_UPDATE_NRIP(a_pVCpu) \437 do { \438 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \439 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \440 } while (0)441 442 /**443 * Check if SVM is enabled.444 */445 # define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))446 447 /**448 412 * Check if an SVM control/instruction intercept is set. 449 413 */ … … 476 440 477 441 /** 478 * Get the SVM pause-filter count.479 */480 # define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))481 482 /**483 442 * Invokes the SVM \#VMEXIT handler for the nested-guest. 484 443 */ 485 # define IEM_ RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \444 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 486 445 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0) 487 446 … … 490 449 * corresponding decode assist information. 491 450 */ 492 # define IEM_ RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \451 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \ 493 452 do \ 494 453 { \ … … 499 458 else \ 500 459 uExitInfo1 = 0; \ 501 IEM_ RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \460 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \ 502 461 } while (0) 503 462 463 /** Check and handles SVM nested-guest instruction intercept and updates 464 * NRIP if needed. 465 */ 466 # define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 467 do \ 468 { \ 469 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \ 470 { \ 471 IEM_UPDATE_SVM_NRIP(a_pVCpu); \ 472 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \ 473 } \ 474 } while (0) 475 476 /** Checks and handles SVM nested-guest CR0 read intercept. */ 477 # define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \ 478 do \ 479 { \ 480 if (!IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \ 481 { /* probably likely */ } \ 482 else \ 483 { \ 484 IEM_UPDATE_SVM_NRIP(a_pVCpu); \ 485 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \ 486 } \ 487 } while (0) 488 489 /** 490 * Updates the NextRIP (NRI) field in the nested-guest VMCB. 491 */ 492 # define IEM_UPDATE_SVM_NRIP(a_pVCpu) \ 493 do { \ 494 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \ 495 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \ 496 } while (0) 497 504 498 #else 505 # define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)506 # define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)507 # define IEM_IS_SVM_ENABLED(a_pVCpu) (false)508 499 # define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false) 509 500 # define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false) … … 512 503 # define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false) 513 504 # define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false) 514 # define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0) 515 # define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0) 516 # define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0) 505 # define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0) 506 # define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0) 507 # define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0) 508 # define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0) 509 # define IEM_UPDATE_SVM_NRIP(a_pVCpu) do { } while (0) 517 510 518 511 #endif … … 3418 3411 { 3419 3412 Log2(("shutdown: Guest intercept -> #VMEXIT\n")); 3420 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3413 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3421 3414 } 3422 3415 … … 4004 3997 4005 3998 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2)); 4006 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);3999 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2); 4007 4000 RT_NOREF2(uExitInfo1, uExitInfo2); 4008 4001 } … … 5505 5498 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */ 5506 5499 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF)) 5507 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5500 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5508 5501 } 5509 5502 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT) -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r73606 r74332 551 551 { 552 552 Log2(("pushf: Guest intercept -> #VMEXIT\n")); 553 IEM_ SVM_UPDATE_NRIP(pVCpu);554 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);553 IEM_UPDATE_SVM_NRIP(pVCpu); 554 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_PUSHF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 555 555 } 556 556 … … 618 618 { 619 619 Log2(("popf: Guest intercept -> #VMEXIT\n")); 620 IEM_ SVM_UPDATE_NRIP(pVCpu);621 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);620 IEM_UPDATE_SVM_NRIP(pVCpu); 621 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_POPF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 622 622 } 623 623 … … 3856 3856 { 3857 3857 Log(("iret: Guest intercept -> #VMEXIT\n")); 3858 IEM_ SVM_UPDATE_NRIP(pVCpu);3859 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);3858 IEM_UPDATE_SVM_NRIP(pVCpu); 3859 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IRET, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 3860 3860 } 3861 3861 … … 4635 4635 { 4636 4636 Log(("lgdt: Guest intercept -> #VMEXIT\n")); 4637 IEM_ SVM_UPDATE_NRIP(pVCpu);4638 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4637 IEM_UPDATE_SVM_NRIP(pVCpu); 4638 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4639 4639 } 4640 4640 … … 4680 4680 { 4681 4681 Log(("sgdt: Guest intercept -> #VMEXIT\n")); 4682 IEM_ SVM_UPDATE_NRIP(pVCpu);4683 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4682 IEM_UPDATE_SVM_NRIP(pVCpu); 4683 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_GDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4684 4684 } 4685 4685 … … 4708 4708 { 4709 4709 Log(("lidt: Guest intercept -> #VMEXIT\n")); 4710 IEM_ SVM_UPDATE_NRIP(pVCpu);4711 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4710 IEM_UPDATE_SVM_NRIP(pVCpu); 4711 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4712 4712 } 4713 4713 … … 4752 4752 { 4753 4753 Log(("sidt: Guest intercept -> #VMEXIT\n")); 4754 IEM_ SVM_UPDATE_NRIP(pVCpu);4755 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4754 IEM_UPDATE_SVM_NRIP(pVCpu); 4755 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4756 4756 } 4757 4757 … … 4799 4799 { 4800 4800 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4801 IEM_ SVM_UPDATE_NRIP(pVCpu);4802 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4801 IEM_UPDATE_SVM_NRIP(pVCpu); 4802 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4803 4803 } 4804 4804 … … 4875 4875 { 4876 4876 Log(("lldt: Guest intercept -> #VMEXIT\n")); 4877 IEM_ SVM_UPDATE_NRIP(pVCpu);4878 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4877 IEM_UPDATE_SVM_NRIP(pVCpu); 4878 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_LDTR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4879 4879 } 4880 4880 … … 4903 4903 IEM_CIMPL_DEF_2(iemCImpl_sldt_reg, uint8_t, iGReg, uint8_t, enmEffOpSize) 4904 4904 { 4905 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);4905 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0); 4906 4906 4907 4907 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR); … … 4927 4927 IEM_CIMPL_DEF_2(iemCImpl_sldt_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 4928 4928 { 4929 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0);4929 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_LDTR_READS, SVM_EXIT_LDTR_READ, 0, 0); 4930 4930 4931 4931 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_LDTR); … … 4970 4970 { 4971 4971 Log(("ltr: Guest intercept -> #VMEXIT\n")); 4972 IEM_ SVM_UPDATE_NRIP(pVCpu);4973 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);4972 IEM_UPDATE_SVM_NRIP(pVCpu); 4973 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TR_WRITE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 4974 4974 } 4975 4975 … … 5068 5068 IEM_CIMPL_DEF_2(iemCImpl_str_reg, uint8_t, iGReg, uint8_t, enmEffOpSize) 5069 5069 { 5070 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);5070 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0); 5071 5071 5072 5072 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR); … … 5092 5092 IEM_CIMPL_DEF_2(iemCImpl_str_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 5093 5093 { 5094 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0);5094 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_TR_READS, SVM_EXIT_TR_READ, 0, 0); 5095 5095 5096 5096 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR); … … 5117 5117 { 5118 5118 Log(("iemCImpl_mov_Rd_Cd: Guest intercept CR%u -> #VMEXIT\n", iCrReg)); 5119 IEM_ SVM_UPDATE_NRIP(pVCpu);5120 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg);5119 IEM_UPDATE_SVM_NRIP(pVCpu); 5120 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_READ_CR0 + iCrReg, IEMACCESSCRX_MOV_CRX, iGReg); 5121 5121 } 5122 5122 … … 5187 5187 IEM_CIMPL_DEF_2(iemCImpl_smsw_reg, uint8_t, iGReg, uint8_t, enmEffOpSize) 5188 5188 { 5189 IEM CIMPL_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5189 IEM_CHECK_SVM_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5190 5190 5191 5191 switch (enmEffOpSize) … … 5225 5225 IEM_CIMPL_DEF_2(iemCImpl_smsw_mem, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst) 5226 5226 { 5227 IEM CIMPL_HLP_SVM_READ_CR_INTERCEPT(pVCpu, /*cr*/ 0, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);5227 IEM_CHECK_SVM_READ_CR0_INTERCEPT(pVCpu, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5228 5228 5229 5229 uint16_t u16Value; … … 5355 5355 { 5356 5356 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5357 IEM_ SVM_UPDATE_NRIP(pVCpu);5358 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg);5357 IEM_UPDATE_SVM_NRIP(pVCpu); 5358 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR0, enmAccessCrX, iGReg); 5359 5359 } 5360 5360 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CR0_SEL_WRITE)) … … 5366 5366 Assert(enmAccessCrX != IEMACCESSCRX_CLTS); 5367 5367 Log(("iemCImpl_load_Cr%#x: lmsw or bits other than TS/MP changed: Guest intercept -> #VMEXIT\n", iCrReg)); 5368 IEM_ SVM_UPDATE_NRIP(pVCpu);5369 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg);5368 IEM_UPDATE_SVM_NRIP(pVCpu); 5369 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_CR0_SEL_WRITE, enmAccessCrX, iGReg); 5370 5370 } 5371 5371 } … … 5423 5423 { 5424 5424 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5425 IEM_ SVM_UPDATE_NRIP(pVCpu);5426 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg);5425 IEM_UPDATE_SVM_NRIP(pVCpu); 5426 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR2, enmAccessCrX, iGReg); 5427 5427 } 5428 5428 pVCpu->cpum.GstCtx.cr2 = uNewCrX; … … 5479 5479 { 5480 5480 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5481 IEM_ SVM_UPDATE_NRIP(pVCpu);5482 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg);5481 IEM_UPDATE_SVM_NRIP(pVCpu); 5482 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR3, enmAccessCrX, iGReg); 5483 5483 } 5484 5484 … … 5558 5558 { 5559 5559 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5560 IEM_ SVM_UPDATE_NRIP(pVCpu);5561 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg);5560 IEM_UPDATE_SVM_NRIP(pVCpu); 5561 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR4, enmAccessCrX, iGReg); 5562 5562 } 5563 5563 … … 5623 5623 { 5624 5624 Log(("iemCImpl_load_Cr%#x: Guest intercept -> #VMEXIT\n", iCrReg)); 5625 IEM_ SVM_UPDATE_NRIP(pVCpu);5626 IEM_ RETURN_SVM_CRX_VMEXIT(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg);5625 IEM_UPDATE_SVM_NRIP(pVCpu); 5626 IEM_SVM_CRX_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_CR8, enmAccessCrX, iGReg); 5627 5627 } 5628 5628 … … 5797 5797 { 5798 5798 Log(("mov r%u,dr%u: Guest intercept -> #VMEXIT\n", iGReg, iDrReg)); 5799 IEM_ SVM_UPDATE_NRIP(pVCpu);5800 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf),5799 IEM_UPDATE_SVM_NRIP(pVCpu); 5800 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_READ_DR0 + (iDrReg & 0xf), 5801 5801 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5802 5802 } … … 5898 5898 { 5899 5899 Log2(("mov dr%u,r%u: Guest intercept -> #VMEXIT\n", iDrReg, iGReg)); 5900 IEM_ SVM_UPDATE_NRIP(pVCpu);5901 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf),5900 IEM_UPDATE_SVM_NRIP(pVCpu); 5901 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_WRITE_DR0 + (iDrReg & 0xf), 5902 5902 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? (iGReg & 7) : 0, 0 /* uExitInfo2 */); 5903 5903 } … … 5936 5936 { 5937 5937 Log(("invlpg: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage)); 5938 IEM_ SVM_UPDATE_NRIP(pVCpu);5939 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPG,5938 IEM_UPDATE_SVM_NRIP(pVCpu); 5939 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPG, 5940 5940 IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? GCPtrPage : 0, 0 /* uExitInfo2 */); 5941 5941 } … … 6071 6071 } 6072 6072 6073 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0);6073 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_INVD, SVM_EXIT_INVD, 0, 0); 6074 6074 6075 6075 /* We currently take no action here. */ … … 6090 6090 } 6091 6091 6092 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0);6092 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_WBINVD, SVM_EXIT_WBINVD, 0, 0); 6093 6093 6094 6094 /* We currently take no action here. */ … … 6101 6101 IEM_CIMPL_DEF_0(iemCImpl_rsm) 6102 6102 { 6103 IEM CIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0);6103 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_RSM, SVM_EXIT_RSM, 0, 0); 6104 6104 NOREF(cbInstr); 6105 6105 return iemRaiseUndefinedOpcode(pVCpu); … … 6131 6131 { 6132 6132 Log(("rdtsc: Guest intercept -> #VMEXIT\n")); 6133 IEM_ SVM_UPDATE_NRIP(pVCpu);6134 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6133 IEM_UPDATE_SVM_NRIP(pVCpu); 6134 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6135 6135 } 6136 6136 … … 6174 6174 { 6175 6175 Log(("rdtscp: Guest intercept -> #VMEXIT\n")); 6176 IEM_ SVM_UPDATE_NRIP(pVCpu);6177 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6176 IEM_UPDATE_SVM_NRIP(pVCpu); 6177 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDTSCP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6178 6178 } 6179 6179 … … 6216 6216 { 6217 6217 Log(("rdpmc: Guest intercept -> #VMEXIT\n")); 6218 IEM_ SVM_UPDATE_NRIP(pVCpu);6219 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6218 IEM_UPDATE_SVM_NRIP(pVCpu); 6219 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_RDPMC, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6220 6220 } 6221 6221 … … 6653 6653 { 6654 6654 Log2(("hlt: Guest intercept -> #VMEXIT\n")); 6655 IEM_ SVM_UPDATE_NRIP(pVCpu);6656 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6655 IEM_UPDATE_SVM_NRIP(pVCpu); 6656 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_HLT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6657 6657 } 6658 6658 … … 6707 6707 { 6708 6708 Log2(("monitor: Guest intercept -> #VMEXIT\n")); 6709 IEM_ SVM_UPDATE_NRIP(pVCpu);6710 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6709 IEM_UPDATE_SVM_NRIP(pVCpu); 6710 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MONITOR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6711 6711 } 6712 6712 … … 6774 6774 { 6775 6775 Log2(("mwait: Guest intercept (monitor hardware armed) -> #VMEXIT\n")); 6776 IEM_ SVM_UPDATE_NRIP(pVCpu);6777 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6776 IEM_UPDATE_SVM_NRIP(pVCpu); 6777 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT_ARMED, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6778 6778 } 6779 6779 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_MWAIT)) 6780 6780 { 6781 6781 Log2(("mwait: Guest intercept -> #VMEXIT\n")); 6782 IEM_ SVM_UPDATE_NRIP(pVCpu);6783 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6782 IEM_UPDATE_SVM_NRIP(pVCpu); 6783 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_MWAIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6784 6784 } 6785 6785 … … 6831 6831 { 6832 6832 Log2(("cpuid: Guest intercept -> #VMEXIT\n")); 6833 IEM_ SVM_UPDATE_NRIP(pVCpu);6834 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);6833 IEM_UPDATE_SVM_NRIP(pVCpu); 6834 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CPUID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6835 6835 } 6836 6836 … … 7171 7171 { 7172 7172 Log2(("xsetbv: Guest intercept -> #VMEXIT\n")); 7173 IEM_ SVM_UPDATE_NRIP(pVCpu);7174 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);7173 IEM_UPDATE_SVM_NRIP(pVCpu); 7174 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XSETBV, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 7175 7175 } 7176 7176 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplSvmInstr.cpp.h
r74301 r74332 18 18 19 19 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 20 /** Check and handles SVM nested-guest instruction intercept and updates 21 * NRIP if needed. 22 */ 23 # define IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 24 do \ 25 { \ 26 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \ 20 /** 21 * Check the common SVM instruction preconditions. 22 */ 23 # define IEM_CHECK_SVM_INSTR_COMMON(a_pVCpu, a_Instr) \ 24 do { \ 25 if (!CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu))) \ 27 26 { \ 28 IEM_SVM_UPDATE_NRIP(a_pVCpu); \ 29 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \ 27 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \ 28 return iemRaiseUndefinedOpcode(a_pVCpu); \ 29 } \ 30 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \ 31 { \ 32 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \ 33 return iemRaiseUndefinedOpcode(a_pVCpu); \ 34 } \ 35 if ((a_pVCpu)->iem.s.uCpl != 0) \ 36 { \ 37 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \ 38 return iemRaiseGeneralProtectionFault0(a_pVCpu); \ 30 39 } \ 31 40 } while (0) 32 41 33 /** Checks and handles SVM nested-guest CR0 read intercept. */34 # define IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \35 do \36 { \37 if (!IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \38 { /* probably likely */ } \39 else \40 { \41 IEM_SVM_UPDATE_NRIP(a_pVCpu); \42 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \43 } \44 } while (0)45 46 #else /* !VBOX_WITH_NESTED_HWVIRT_SVM */47 # define IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)48 # define IEMCIMPL_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)49 #endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */50 51 52 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM53 42 54 43 /** … … 898 887 { 899 888 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n")); 900 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);889 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 901 890 } 902 891 … … 906 895 { 907 896 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n")); 908 IEM_ SVM_UPDATE_NRIP(pVCpu);909 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);897 IEM_UPDATE_SVM_NRIP(pVCpu); 898 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 910 899 } 911 900 … … 937 926 } 938 927 if (u8Vector == X86_XCPT_BR) 939 IEM_ SVM_UPDATE_NRIP(pVCpu);928 IEM_UPDATE_SVM_NRIP(pVCpu); 940 929 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept u32InterceptXcpt=%#RX32 u8Vector=%#x " 941 930 "uExitInfo1=%#RX64 uExitInfo2=%#RX64 -> #VMEXIT\n", pVCpu->cpum.GstCtx.hwvirt.svm.CTX_SUFF(pVmcb)->ctrl.u32InterceptXcpt, 942 931 u8Vector, uExitInfo1, uExitInfo2)); 943 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2);932 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_0 + u8Vector, uExitInfo1, uExitInfo2); 944 933 } 945 934 … … 953 942 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssists ? u8Vector : 0; 954 943 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector)); 955 IEM_ SVM_UPDATE_NRIP(pVCpu);956 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);944 IEM_UPDATE_SVM_NRIP(pVCpu); 945 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */); 957 946 } 958 947 … … 1000 989 { 1001 990 Log3(("iemSvmHandleIOIntercept: u16Port=%#x (%u) -> #VMEXIT\n", u16Port, u16Port)); 1002 IEM_ SVM_UPDATE_NRIP(pVCpu);991 IEM_UPDATE_SVM_NRIP(pVCpu); 1003 992 return iemSvmVmexit(pVCpu, SVM_EXIT_IOIO, IoExitInfo.u, pVCpu->cpum.GstCtx.rip + cbInstr); 1004 993 } … … 1059 1048 if (*pbMsrpm & RT_BIT(uMsrpmBit)) 1060 1049 { 1061 IEM_ SVM_UPDATE_NRIP(pVCpu);1050 IEM_UPDATE_SVM_NRIP(pVCpu); 1062 1051 return iemSvmVmexit(pVCpu, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */); 1063 1052 } … … 1086 1075 # else 1087 1076 LogFlow(("iemCImpl_vmrun\n")); 1088 IEM_ SVM_INSTR_COMMON_CHECKS(pVCpu, vmrun);1077 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmrun); 1089 1078 1090 1079 /** @todo Check effective address size using address size prefix. */ … … 1100 1089 { 1101 1090 Log(("vmrun: Guest intercept -> #VMEXIT\n")); 1102 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1091 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMRUN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1103 1092 } 1104 1093 … … 1111 1100 return rcStrict; 1112 1101 # endif 1102 } 1103 1104 1105 /** 1106 * Implements 'VMLOAD'. 1107 */ 1108 IEM_CIMPL_DEF_0(iemCImpl_vmload) 1109 { 1110 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1111 RT_NOREF2(pVCpu, cbInstr); 1112 return VINF_EM_RAW_EMULATE_INSTR; 1113 # else 1114 LogFlow(("iemCImpl_vmload\n")); 1115 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmload); 1116 1117 /** @todo Check effective address size using address size prefix. */ 1118 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1119 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 1120 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) 1121 { 1122 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 1123 return iemRaiseGeneralProtectionFault0(pVCpu); 1124 } 1125 1126 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD)) 1127 { 1128 Log(("vmload: Guest intercept -> #VMEXIT\n")); 1129 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1130 } 1131 1132 SVMVMCBSTATESAVE VmcbNstGst; 1133 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), 1134 sizeof(SVMVMCBSTATESAVE)); 1135 if (rcStrict == VINF_SUCCESS) 1136 { 1137 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode)); 1138 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs); 1139 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs); 1140 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr); 1141 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr); 1142 1143 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase; 1144 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR; 1145 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR; 1146 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR; 1147 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK; 1148 1149 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS; 1150 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP; 1151 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP; 1152 1153 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1154 } 1155 return rcStrict; 1156 # endif 1157 } 1158 1159 1160 /** 1161 * Implements 'VMSAVE'. 1162 */ 1163 IEM_CIMPL_DEF_0(iemCImpl_vmsave) 1164 { 1165 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1166 RT_NOREF2(pVCpu, cbInstr); 1167 return VINF_EM_RAW_EMULATE_INSTR; 1168 # else 1169 LogFlow(("iemCImpl_vmsave\n")); 1170 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, vmsave); 1171 1172 /** @todo Check effective address size using address size prefix. */ 1173 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1174 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 1175 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) 1176 { 1177 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 1178 return iemRaiseGeneralProtectionFault0(pVCpu); 1179 } 1180 1181 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE)) 1182 { 1183 Log(("vmsave: Guest intercept -> #VMEXIT\n")); 1184 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1185 } 1186 1187 SVMVMCBSTATESAVE VmcbNstGst; 1188 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), 1189 sizeof(SVMVMCBSTATESAVE)); 1190 if (rcStrict == VINF_SUCCESS) 1191 { 1192 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode)); 1193 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR 1194 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS); 1195 1196 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs); 1197 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs); 1198 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr); 1199 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr); 1200 1201 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE; 1202 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR; 1203 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR; 1204 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR; 1205 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK; 1206 1207 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs; 1208 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp; 1209 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip; 1210 1211 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst, 1212 sizeof(SVMVMCBSTATESAVE)); 1213 if (rcStrict == VINF_SUCCESS) 1214 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1215 } 1216 return rcStrict; 1217 # endif 1218 } 1219 1220 1221 /** 1222 * Implements 'CLGI'. 1223 */ 1224 IEM_CIMPL_DEF_0(iemCImpl_clgi) 1225 { 1226 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1227 RT_NOREF2(pVCpu, cbInstr); 1228 return VINF_EM_RAW_EMULATE_INSTR; 1229 # else 1230 LogFlow(("iemCImpl_clgi\n")); 1231 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, clgi); 1232 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI)) 1233 { 1234 Log(("clgi: Guest intercept -> #VMEXIT\n")); 1235 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1236 } 1237 1238 pVCpu->cpum.GstCtx.hwvirt.fGif = false; 1239 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1240 1241 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1242 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true); 1243 # else 1244 return VINF_SUCCESS; 1245 # endif 1246 # endif 1247 } 1248 1249 1250 /** 1251 * Implements 'STGI'. 1252 */ 1253 IEM_CIMPL_DEF_0(iemCImpl_stgi) 1254 { 1255 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3) 1256 RT_NOREF2(pVCpu, cbInstr); 1257 return VINF_EM_RAW_EMULATE_INSTR; 1258 # else 1259 LogFlow(("iemCImpl_stgi\n")); 1260 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, stgi); 1261 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI)) 1262 { 1263 Log2(("stgi: Guest intercept -> #VMEXIT\n")); 1264 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1265 } 1266 1267 pVCpu->cpum.GstCtx.hwvirt.fGif = true; 1268 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1269 1270 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3) 1271 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false); 1272 # else 1273 return VINF_SUCCESS; 1274 # endif 1275 # endif 1276 } 1277 1278 1279 /** 1280 * Implements 'INVLPGA'. 1281 */ 1282 IEM_CIMPL_DEF_0(iemCImpl_invlpga) 1283 { 1284 /** @todo Check effective address size using address size prefix. */ 1285 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax; 1286 /** @todo PGM needs virtual ASID support. */ 1287 # if 0 1288 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx; 1289 # endif 1290 1291 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, invlpga); 1292 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA)) 1293 { 1294 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage)); 1295 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1296 } 1297 1298 PGMInvalidatePage(pVCpu, GCPtrPage); 1299 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1300 return VINF_SUCCESS; 1301 } 1302 1303 1304 /** 1305 * Implements 'SKINIT'. 1306 */ 1307 IEM_CIMPL_DEF_0(iemCImpl_skinit) 1308 { 1309 IEM_CHECK_SVM_INSTR_COMMON(pVCpu, invlpga); 1310 1311 uint32_t uIgnore; 1312 uint32_t fFeaturesECX; 1313 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore); 1314 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT)) 1315 return iemRaiseUndefinedOpcode(pVCpu); 1316 1317 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT)) 1318 { 1319 Log2(("skinit: Guest intercept -> #VMEXIT\n")); 1320 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1321 } 1322 1323 RT_NOREF(cbInstr); 1324 return VERR_IEM_INSTR_NOT_IMPLEMENTED; 1325 } 1326 1327 1328 /** 1329 * Implements SVM's implementation of PAUSE. 1330 */ 1331 IEM_CIMPL_DEF_0(iemCImpl_svm_pause) 1332 { 1333 bool fCheckIntercept = true; 1334 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter) 1335 { 1336 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT); 1337 1338 /* TSC based pause-filter thresholding. */ 1339 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold 1340 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0) 1341 { 1342 uint64_t const uTick = TMCpuTickGet(pVCpu); 1343 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold) 1344 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = CPUMGetGuestSvmPauseFilterCount(pVCpu, IEM_GET_CTX(pVCpu)); 1345 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick; 1346 } 1347 1348 /* Simple pause-filter counter. */ 1349 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0) 1350 { 1351 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter; 1352 fCheckIntercept = false; 1353 } 1354 } 1355 1356 if (fCheckIntercept) 1357 IEM_CHECK_SVM_INSTR_INTERCEPT(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0); 1358 1359 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 1360 return VINF_SUCCESS; 1113 1361 } 1114 1362 … … 1146 1394 } 1147 1395 1396 1148 1397 /** 1149 1398 * Implements 'VMMCALL'. … … 1154 1403 { 1155 1404 Log(("vmmcall: Guest intercept -> #VMEXIT\n")); 1156 IEM_ RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1405 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 1157 1406 } 1158 1407 … … 1175 1424 } 1176 1425 1177 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM1178 1179 /**1180 * Implements 'VMLOAD'.1181 */1182 IEM_CIMPL_DEF_0(iemCImpl_vmload)1183 {1184 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)1185 RT_NOREF2(pVCpu, cbInstr);1186 return VINF_EM_RAW_EMULATE_INSTR;1187 # else1188 LogFlow(("iemCImpl_vmload\n"));1189 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload);1190 1191 /** @todo Check effective address size using address size prefix. */1192 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;1193 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)1194 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))1195 {1196 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));1197 return iemRaiseGeneralProtectionFault0(pVCpu);1198 }1199 1200 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMLOAD))1201 {1202 Log(("vmload: Guest intercept -> #VMEXIT\n"));1203 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1204 }1205 1206 SVMVMCBSTATESAVE VmcbNstGst;1207 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),1208 sizeof(SVMVMCBSTATESAVE));1209 if (rcStrict == VINF_SUCCESS)1210 {1211 LogFlow(("vmload: Loading VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));1212 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);1213 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);1214 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);1215 HMSVM_SEG_REG_COPY_FROM_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);1216 1217 pVCpu->cpum.GstCtx.msrKERNELGSBASE = VmcbNstGst.u64KernelGSBase;1218 pVCpu->cpum.GstCtx.msrSTAR = VmcbNstGst.u64STAR;1219 pVCpu->cpum.GstCtx.msrLSTAR = VmcbNstGst.u64LSTAR;1220 pVCpu->cpum.GstCtx.msrCSTAR = VmcbNstGst.u64CSTAR;1221 pVCpu->cpum.GstCtx.msrSFMASK = VmcbNstGst.u64SFMASK;1222 1223 pVCpu->cpum.GstCtx.SysEnter.cs = VmcbNstGst.u64SysEnterCS;1224 pVCpu->cpum.GstCtx.SysEnter.esp = VmcbNstGst.u64SysEnterESP;1225 pVCpu->cpum.GstCtx.SysEnter.eip = VmcbNstGst.u64SysEnterEIP;1226 1227 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1228 }1229 return rcStrict;1230 # endif1231 }1232 1233 1234 /**1235 * Implements 'VMSAVE'.1236 */1237 IEM_CIMPL_DEF_0(iemCImpl_vmsave)1238 {1239 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)1240 RT_NOREF2(pVCpu, cbInstr);1241 return VINF_EM_RAW_EMULATE_INSTR;1242 # else1243 LogFlow(("iemCImpl_vmsave\n"));1244 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmsave);1245 1246 /** @todo Check effective address size using address size prefix. */1247 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;1248 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK)1249 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb))1250 {1251 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb));1252 return iemRaiseGeneralProtectionFault0(pVCpu);1253 }1254 1255 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMSAVE))1256 {1257 Log(("vmsave: Guest intercept -> #VMEXIT\n"));1258 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1259 }1260 1261 SVMVMCBSTATESAVE VmcbNstGst;1262 VBOXSTRICTRC rcStrict = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcbNstGst, GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest),1263 sizeof(SVMVMCBSTATESAVE));1264 if (rcStrict == VINF_SUCCESS)1265 {1266 LogFlow(("vmsave: Saving VMCB at %#RGp enmEffAddrMode=%d\n", GCPhysVmcb, pVCpu->iem.s.enmEffAddrMode));1267 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_FS | CPUMCTX_EXTRN_GS | CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_LDTR1268 | CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS | CPUMCTX_EXTRN_SYSENTER_MSRS);1269 1270 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, FS, fs);1271 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, GS, gs);1272 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, TR, tr);1273 HMSVM_SEG_REG_COPY_TO_VMCB(IEM_GET_CTX(pVCpu), &VmcbNstGst, LDTR, ldtr);1274 1275 VmcbNstGst.u64KernelGSBase = pVCpu->cpum.GstCtx.msrKERNELGSBASE;1276 VmcbNstGst.u64STAR = pVCpu->cpum.GstCtx.msrSTAR;1277 VmcbNstGst.u64LSTAR = pVCpu->cpum.GstCtx.msrLSTAR;1278 VmcbNstGst.u64CSTAR = pVCpu->cpum.GstCtx.msrCSTAR;1279 VmcbNstGst.u64SFMASK = pVCpu->cpum.GstCtx.msrSFMASK;1280 1281 VmcbNstGst.u64SysEnterCS = pVCpu->cpum.GstCtx.SysEnter.cs;1282 VmcbNstGst.u64SysEnterESP = pVCpu->cpum.GstCtx.SysEnter.esp;1283 VmcbNstGst.u64SysEnterEIP = pVCpu->cpum.GstCtx.SysEnter.eip;1284 1285 rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPhysVmcb + RT_UOFFSETOF(SVMVMCB, guest), &VmcbNstGst,1286 sizeof(SVMVMCBSTATESAVE));1287 if (rcStrict == VINF_SUCCESS)1288 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1289 }1290 return rcStrict;1291 # endif1292 }1293 1294 1295 /**1296 * Implements 'CLGI'.1297 */1298 IEM_CIMPL_DEF_0(iemCImpl_clgi)1299 {1300 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)1301 RT_NOREF2(pVCpu, cbInstr);1302 return VINF_EM_RAW_EMULATE_INSTR;1303 # else1304 LogFlow(("iemCImpl_clgi\n"));1305 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, clgi);1306 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_CLGI))1307 {1308 Log(("clgi: Guest intercept -> #VMEXIT\n"));1309 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1310 }1311 1312 pVCpu->cpum.GstCtx.hwvirt.fGif = false;1313 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1314 1315 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)1316 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);1317 # else1318 return VINF_SUCCESS;1319 # endif1320 # endif1321 }1322 1323 1324 /**1325 * Implements 'STGI'.1326 */1327 IEM_CIMPL_DEF_0(iemCImpl_stgi)1328 {1329 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)1330 RT_NOREF2(pVCpu, cbInstr);1331 return VINF_EM_RAW_EMULATE_INSTR;1332 # else1333 LogFlow(("iemCImpl_stgi\n"));1334 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, stgi);1335 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_STGI))1336 {1337 Log2(("stgi: Guest intercept -> #VMEXIT\n"));1338 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1339 }1340 1341 pVCpu->cpum.GstCtx.hwvirt.fGif = true;1342 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1343 1344 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)1345 return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, false);1346 # else1347 return VINF_SUCCESS;1348 # endif1349 # endif1350 }1351 1352 1353 /**1354 * Implements 'INVLPGA'.1355 */1356 IEM_CIMPL_DEF_0(iemCImpl_invlpga)1357 {1358 /** @todo Check effective address size using address size prefix. */1359 RTGCPTR const GCPtrPage = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rax : pVCpu->cpum.GstCtx.eax;1360 /** @todo PGM needs virtual ASID support. */1361 # if 01362 uint32_t const uAsid = pVCpu->cpum.GstCtx.ecx;1363 # endif1364 1365 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);1366 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INVLPGA))1367 {1368 Log2(("invlpga: Guest intercept (%RGp) -> #VMEXIT\n", GCPtrPage));1369 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1370 }1371 1372 PGMInvalidatePage(pVCpu, GCPtrPage);1373 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1374 return VINF_SUCCESS;1375 }1376 1377 1378 /**1379 * Implements 'SKINIT'.1380 */1381 IEM_CIMPL_DEF_0(iemCImpl_skinit)1382 {1383 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, invlpga);1384 1385 uint32_t uIgnore;1386 uint32_t fFeaturesECX;1387 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0 /* iSubLeaf */, &uIgnore, &uIgnore, &fFeaturesECX, &uIgnore);1388 if (!(fFeaturesECX & X86_CPUID_AMD_FEATURE_ECX_SKINIT))1389 return iemRaiseUndefinedOpcode(pVCpu);1390 1391 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SKINIT))1392 {1393 Log2(("skinit: Guest intercept -> #VMEXIT\n"));1394 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SKINIT, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1395 }1396 1397 RT_NOREF(cbInstr);1398 return VERR_IEM_INSTR_NOT_IMPLEMENTED;1399 }1400 1401 1402 /**1403 * Implements SVM's implementation of PAUSE.1404 */1405 IEM_CIMPL_DEF_0(iemCImpl_svm_pause)1406 {1407 bool fCheckIntercept = true;1408 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilter)1409 {1410 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_HWVIRT);1411 1412 /* TSC based pause-filter thresholding. */1413 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmPauseFilterThreshold1414 && pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold > 0)1415 {1416 uint64_t const uTick = TMCpuTickGet(pVCpu);1417 if (uTick - pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick > pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilterThreshold)1418 pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter = IEM_GET_SVM_PAUSE_FILTER_COUNT(pVCpu);1419 pVCpu->cpum.GstCtx.hwvirt.svm.uPrevPauseTick = uTick;1420 }1421 1422 /* Simple pause-filter counter. */1423 if (pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter > 0)1424 {1425 --pVCpu->cpum.GstCtx.hwvirt.svm.cPauseFilter;1426 fCheckIntercept = false;1427 }1428 }1429 1430 if (fCheckIntercept)1431 IEMCIMPL_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(pVCpu, SVM_CTRL_INTERCEPT_PAUSE, SVM_EXIT_PAUSE, 0, 0);1432 1433 iemRegAddToRipAndClearRF(pVCpu, cbInstr);1434 return VINF_SUCCESS;1435 }1436 1437 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */1438
Note:
See TracChangeset
for help on using the changeset viewer.