Changeset 66000 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Mar 8, 2017 8:29:40 PM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r65989 r66000 558 558 559 559 560 561 /**562 * SVM nested-guest \#VMEXIT handler.563 *564 * @param pVCpu The cross context virtual CPU structure.565 * @param uExitCode The exit reason.566 */567 VMM_INT_DECL(void) HMNstGstSvmVmExit(PVMCPU pVCpu, uint64_t uExitCode)568 {569 RT_NOREF2(pVCpu, uExitCode);570 }571 572 573 560 /** 574 561 * VMX nested-guest VM-exit handler. -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r65989 r66000 126 126 /** 127 127 * Performs the operations necessary that are part of the vmmcall instruction 128 * execution for AMD-V.128 * execution in the guest. 129 129 * 130 130 * @returns Strict VBox status code (i.e. informational status codes too). 131 *132 131 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown, 133 132 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and … … 173 172 } 174 173 174 175 /** 176 * Performs the operations necessary that are part of the vmrun instruction 177 * execution in the guest. 178 * 179 * @returns Strict VBox status code (i.e. informational status codes too). 180 * 181 * @param pVCpu The cross context virtual CPU structure. 182 * @param pCtx Pointer to the guest-CPU context. 183 */ 184 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx) 185 { 186 RT_NOREF2(pVCpu, pCtx); 187 188 return VERR_NOT_IMPLEMENTED; 189 } 190 191 192 /** 193 * SVM nested-guest \#VMEXIT handler. 194 * 195 * @returns Strict VBox status code. 196 * @param pVCpu The cross context virtual CPU structure. 197 * @param pCtx The guest-CPU context. 198 * @param uExitCode The exit reason. 199 * @param uExitInfo1 The exit info. 1 field. 200 * @param uExitInfo1 The exit info. 2 field. 201 */ 202 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, int64_t iExitCode, uint64_t uExitInfo1, 203 uint64_t uExitInfo2) 204 { 205 if ( CPUMIsGuestInNestedHwVirtMode(pCtx) 206 || iExitCode == SVM_EXIT_INVALID) 207 { 208 RT_NOREF(pVCpu); 209 210 pCtx->hwvirt.svm.fGif = 0; 211 212 /** @todo implement #VMEXIT. */ 213 214 return VINF_SUCCESS; 215 } 216 else 217 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%RI64 uExitInfo1=%RU64 uExitInfo2=%RU64\n", iExitCode, 218 uExitInfo1, uExitInfo2)); 219 220 return VERR_SVM_IPE_5; 221 } 222 223 224 /** 225 * Peforms the functions of a VMRUN instruction. 226 * 227 * @returns Strict VBox status code. 228 * @param pVCpu The cross context virtual CPU structure. 229 * @param pCtx The guest-CPU context. 230 */ 231 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmRun(PVMCPU pVCpu, PCPUMCTX pCtx) 232 { 233 RT_NOREF2(pVCpu, pCtx); 234 return VERR_NOT_IMPLEMENTED; 235 } 236 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r65934 r66000 14954 14954 #ifdef VBOX_WITH_NESTED_HWVIRT 14955 14955 /** 14956 * Checks if IEM is in the process of delivering an event (interrupt or 14957 * exception). 14958 * 14959 * @returns true if it's raising an interrupt or exception, false otherwise. 14960 * @param pVCpu The cross context virtual CPU structure. 14961 */ 14962 VMM_INT_DECL(bool) IEMIsRaisingIntOrXcpt(PVMCPU pVCpu) 14963 { 14964 return pVCpu->iem.s.cXcptRecursions > 0; 14965 } 14966 14967 14968 /** 14956 14969 * Interface for HM and EM to emulate the STGI instruction. 14957 14970 * -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r65989 r66000 5877 5877 #ifdef VBOX_WITH_NESTED_HWVIRT 5878 5878 /** 5879 * Implements 'VMRUN'. 5880 */ 5881 IEM_CIMPL_DEF_0(iemCImpl_vmrun) 5882 { 5883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5884 IEM_SVM_INSTR_COMMON_CHECKS(pVCpu, vmload); 5885 5886 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5887 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5888 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) 5889 { 5890 Log(("vmrun: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 5891 return iemRaiseGeneralProtectionFault0(pVCpu); 5892 } 5893 5894 #ifndef IN_RC 5895 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMRUN)) 5896 { 5897 Log(("vmrun: Guest intercept -> VMexit\n")); 5898 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5899 } 5900 #endif 5901 5902 /** @todo think - I probably need to map both the HSAVE area page and the 5903 * guest VMCB via iemMemPageMap here and do the copying? */ 5904 pCtx->hwvirt.svm.GCPhysNstGstVmcb = GCPhysVmcb; 5905 void *pvVmcb; 5906 PGMPAGEMAPLOCK PgLockVmcb; 5907 VBOXSTRICTRC rcStrict = iemMemPageMap(pVCpu, GCPhysVmcb, IEM_ACCESS_DATA_RW, &pvVmcb, &PgLockVmcb); 5908 if (rcStrict == VINF_SUCCESS) 5909 return HMSvmVmrun(pVCpu, pCtx); 5910 RT_NOREF(cbInstr); 5911 return rcStrict; 5912 } 5913 5914 5915 /** 5879 5916 * Implements 'VMMCALL'. 5880 5917 */ 5881 5918 IEM_CIMPL_DEF_0(iemCImpl_vmmcall) 5882 5919 { 5883 /* 5884 * We do not check for presence of SVM/AMD-V here as the KVM GIM provider 5885 * might patch in an invalid vmmcall instruction with an Intel vmcall 5886 * instruction. 5887 */ 5920 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 5921 #ifndef IN_RC 5922 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_VMMCALL)) 5923 { 5924 Log(("vmrun: Guest intercept -> VMexit\n")); 5925 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMMCALL, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5926 } 5927 #endif 5928 5888 5929 bool fUpdatedRipAndRF; 5889 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);5890 5930 VBOXSTRICTRC rcStrict = HMSvmVmmcall(pVCpu, pCtx, &fUpdatedRipAndRF); 5891 5931 if (RT_SUCCESS(rcStrict)) … … 5911 5951 { 5912 5952 Log(("vmload: Guest intercept -> VMexit\n")); 5913 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMLOAD); 5914 return VINF_EM_RESCHEDULE; 5953 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMLOAD, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5915 5954 } 5916 5955 #endif 5917 5956 5918 5957 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5919 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5920 { 5921 Log(("vmload: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 5958 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5959 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) 5960 { 5961 Log(("vmload: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 5922 5962 return iemRaiseGeneralProtectionFault0(pVCpu); 5923 5963 } … … 5962 6002 { 5963 6003 Log(("vmsave: Guest intercept -> VMexit\n")); 5964 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_VMSAVE); 5965 return VINF_EM_RESCHEDULE; 6004 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VMSAVE, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 5966 6005 } 5967 6006 #endif 5968 6007 5969 6008 RTGCPHYS const GCPhysVmcb = pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT ? pCtx->rax : pCtx->eax; 5970 if (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 5971 { 5972 Log(("vmsave: VMCB physaddr (%#RGp) not 4K aligned -> #GP(0)\n", GCPhysVmcb)); 6009 if ( (GCPhysVmcb & X86_PAGE_4K_OFFSET_MASK) 6010 || !PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcb)) 6011 { 6012 Log(("vmsave: VMCB physaddr (%#RGp) not valid -> #GP(0)\n", GCPhysVmcb)); 5973 6013 return iemRaiseGeneralProtectionFault0(pVCpu); 5974 6014 } … … 6013 6053 { 6014 6054 Log(("clgi: Guest intercept -> VMexit\n")); 6015 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_CLGI); 6016 return VINF_EM_RESCHEDULE; 6055 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_CLGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6017 6056 } 6018 6057 #endif … … 6035 6074 { 6036 6075 Log2(("stgi: Guest intercept -> VMexit\n")); 6037 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_STGI); 6038 return VINF_EM_RESCHEDULE; 6076 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_STGI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6039 6077 } 6040 6078 #endif … … 6057 6095 { 6058 6096 Log2(("invlpga: Guest intercept -> VMexit\n")); 6059 HMNstGstSvmVmExit(pVCpu, SVM_EXIT_INVLPGA); 6060 return VINF_EM_RESCHEDULE; 6097 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVLPGA, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */); 6061 6098 } 6062 6099 #endif -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r65989 r66000 439 439 440 440 441 #ifdef VBOX_WITH_NESTED_HWVIRT 442 /** Opcode 0x0f 0x01 0xd8. */ 443 FNIEMOP_DEF(iemOp_Grp7_Amd_vmrun) 444 { 445 IEMOP_MNEMONIC(vmrun, "vmrun"); 446 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmrun); 447 } 448 449 /** Opcode 0x0f 0x01 0xd9. */ 450 FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall) 451 { 452 IEMOP_MNEMONIC(vmmcall, "vmmcall"); 453 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall); 454 } 455 456 457 /** Opcode 0x0f 0x01 0xda. */ 458 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload) 459 { 460 IEMOP_MNEMONIC(vmload, "vmload"); 461 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload); 462 } 463 464 465 /** Opcode 0x0f 0x01 0xdb. */ 466 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave) 467 { 468 IEMOP_MNEMONIC(vmsave, "vmsave"); 469 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave); 470 } 471 472 473 /** Opcode 0x0f 0x01 0xdc. */ 474 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi) 475 { 476 IEMOP_MNEMONIC(stgi, "stgi"); 477 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi); 478 } 479 480 481 /** Opcode 0x0f 0x01 0xdd. */ 482 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi) 483 { 484 IEMOP_MNEMONIC(clgi, "clgi"); 485 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi); 486 } 487 488 489 /** Opcode 0x0f 0x01 0xdf. */ 490 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga) 491 { 492 IEMOP_MNEMONIC(invlpga, "invlpga"); 493 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga); 494 } 495 #else 441 496 /** Opcode 0x0f 0x01 0xd8. */ 442 497 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun); 443 498 444 #ifdef VBOX_WITH_NESTED_HWVIRT445 /** Opcode 0x0f 0x01 0xd9. */446 FNIEMOP_DEF(iemOp_Grp7_Amd_vmmcall)447 {448 IEMOP_MNEMONIC(vmmcall, "vmmcall");449 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmmcall);450 }451 452 453 /** Opcode 0x0f 0x01 0xda. */454 FNIEMOP_DEF(iemOp_Grp7_Amd_vmload)455 {456 IEMOP_MNEMONIC(vmload, "vmload");457 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmload);458 }459 460 461 /** Opcode 0x0f 0x01 0xdb. */462 FNIEMOP_DEF(iemOp_Grp7_Amd_vmsave)463 {464 IEMOP_MNEMONIC(vmsave, "vmsave");465 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_vmsave);466 }467 468 469 /** Opcode 0x0f 0x01 0xdc. */470 FNIEMOP_DEF(iemOp_Grp7_Amd_stgi)471 {472 IEMOP_MNEMONIC(stgi, "stgi");473 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stgi);474 }475 476 477 /** Opcode 0x0f 0x01 0xdd. */478 FNIEMOP_DEF(iemOp_Grp7_Amd_clgi)479 {480 IEMOP_MNEMONIC(clgi, "clgi");481 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clgi);482 }483 484 485 /** Opcode 0x0f 0x01 0xdf. */486 FNIEMOP_DEF(iemOp_Grp7_Amd_invlpga)487 {488 IEMOP_MNEMONIC(invlpga, "invlpga");489 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_invlpga);490 }491 #else492 499 /** Opcode 0x0f 0x01 0xd9. */ 493 500 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall); 494 495 501 /** Opcode 0x0f 0x01 0xda. */ 496 502 FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload); -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r65909 r66000 239 239 .Guest.hwvirt.svm.u16InterceptWrDRx resw 1 240 240 .Guest.hwvirt.svm.fGif resb 1 241 .Guest.hwvirt.svm.abPadding resb 3 242 .Guest.hwvirt.svm.GCPhysNstGstVmcb resq 1 241 243 alignb 64 242 244 … … 510 512 .Hyper.hwvirt.svm.u16InterceptWrDRx resw 1 511 513 .Hyper.hwvirt.svm.fGif resb 1 514 .Hyper.hwvirt.svm.abPadding resb 3 515 .Hyper.hwvirt.svm.GCPhysNstGstVmcb resq 1 512 516 alignb 64 513 517
Note:
See TracChangeset
for help on using the changeset viewer.