Changeset 73606 in vbox for trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
- Timestamp:
- Aug 10, 2018 7:38:56 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r73555 r73606 388 388 * Check the common VMX instruction preconditions. 389 389 */ 390 #define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_ Instr) \390 #define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) \ 391 391 do { \ 392 { \393 392 if (!IEM_IS_VMX_ENABLED(a_pVCpu)) \ 394 393 { \ 395 Log((RT_STR(a_Instr) ": CR4.VMXE not enabled -> #UD\n")); \ 394 Log((a_szInstr ": CR4.VMXE not enabled -> #UD\n")); \ 395 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_Vmxe; \ 396 396 return iemRaiseUndefinedOpcode(a_pVCpu); \ 397 397 } \ 398 398 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \ 399 399 { \ 400 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \ 400 Log((a_szInstr ": Real or v8086 mode -> #UD\n")); \ 401 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_RealOrV86Mode; \ 401 402 return iemRaiseUndefinedOpcode(a_pVCpu); \ 402 403 } \ 403 404 if (IEM_IS_LONG_MODE(a_pVCpu) && !IEM_IS_64BIT_CODE(a_pVCpu)) \ 404 405 { \ 405 Log((RT_STR(a_Instr) ": Long mode without 64-bit code segment -> #UD\n")); \ 406 Log((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \ 407 (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = a_InsDiagPrefix##_LongModeCS; \ 406 408 return iemRaiseUndefinedOpcode(a_pVCpu); \ 407 409 } \ 408 } while (0)410 } while (0) 409 411 410 412 /** … … 413 415 # define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu))) 414 416 417 /** 418 * Check if the guest has entered VMX root operation. 419 */ 420 #define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(pVCpu))) 421 422 /** 423 * Check if the guest has entered VMX non-root operation. 424 */ 425 #define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 426 415 427 #else 416 # define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_ Instr)do { } while (0)428 # define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0) 417 429 # define IEM_IS_VMX_ENABLED(a_pVCpu) (false) 430 # define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false) 431 # define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false) 418 432 419 433 #endif … … 938 952 #endif 939 953 954 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 955 IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, PCVMXEXITINSTRINFO pExitInstrInfo, 956 RTGCPTR GCPtrDisp); 957 #endif 958 940 959 /** 941 960 * Sets the pass up status. … … 1037 1056 pVCpu->iem.s.uRexReg = 127; 1038 1057 pVCpu->iem.s.uRexB = 127; 1058 pVCpu->iem.s.offModRm = 127; 1039 1059 pVCpu->iem.s.uRexIndex = 127; 1040 1060 pVCpu->iem.s.iEffSeg = 127; … … 1196 1216 pVCpu->iem.s.cbOpcode = 0; 1197 1217 #endif 1218 pVCpu->iem.s.offModRm = 0; 1198 1219 pVCpu->iem.s.cActiveMappings = 0; 1199 1220 pVCpu->iem.s.iNextMapping = 0; … … 1306 1327 pVCpu->iem.s.offOpcode = 0; 1307 1328 #endif 1329 pVCpu->iem.s.offModRm = 0; 1308 1330 Assert(pVCpu->iem.s.cActiveMappings == 0); 1309 1331 pVCpu->iem.s.iNextMapping = 0; … … 2434 2456 # ifdef IEM_WITH_CODE_TLB 2435 2457 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte; 2436 pVCpu->iem.s.offModRm = offOpcode;2458 pVCpu->iem.s.offModRm = offBuf; 2437 2459 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf; 2438 2460 if (RT_LIKELY( pbBuf != NULL … … 2443 2465 } 2444 2466 # else 2445 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;2467 uintptr_t offOpcode = pVCpu->iem.s.offOpcode; 2446 2468 pVCpu->iem.s.offModRm = offOpcode; 2447 2469 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode)) … … 2468 2490 do \ 2469 2491 { \ 2470 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_p u8)); \2492 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \ 2471 2493 if (rcStrict2 == VINF_SUCCESS) \ 2472 2494 { /* likely */ } \ … … 5523 5545 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */ 5524 5546 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n")); 5525 if (!CPUMIsGuestInNestedHwVirtMode(IEM_GET_CTX(pVCpu))) 5547 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)) 5548 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))) 5526 5549 return VERR_EM_GUEST_CPU_HANG; 5527 5550 } … … 8083 8106 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1))) 8084 8107 return VINF_SUCCESS; 8108 /** @todo We should probably raise #SS(0) here if segment is SS; see AMD spec. 8109 * 4.12.2 "Data Limit Checks in 64-bit Mode". */ 8085 8110 return iemRaiseGeneralProtectionFault0(pVCpu); 8086 8111 } … … 12547 12572 } while (0) 12548 12573 12574 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 12575 /** This instruction raises an \#UD in real and V8086 mode or when not using a 12576 * 64-bit code segment when in long mode (applicable to all VMX instructions 12577 * except VMCALL). */ 12578 # define IEMOP_HLP_VMX_INSTR() \ 12579 do \ 12580 { \ 12581 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \ 12582 && ( !IEM_IS_LONG_MODE(pVCpu) \ 12583 || IEM_IS_64BIT_CODE(pVCpu))) \ 12584 { /* likely */ } \ 12585 else \ 12586 return IEMOP_RAISE_INVALID_OPCODE(); \ 12587 } while (0) 12588 #endif 12589 12549 12590 /** The instruction is not available in 64-bit mode, throw \#UD if we're in 12550 12591 * 64-bit mode. */ … … 15096 15137 15097 15138 /** 15098 * Interface for HM and EM to emulate the INVPCID instruction.15099 *15100 * @param pVCpu The cross context virtual CPU structure.15101 * @param cbInstr The instruction length in bytes.15102 * @param uType The invalidation type.15103 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.15104 *15105 * @remarks In ring-0 not all of the state needs to be synced in.15106 */15107 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)15108 {15109 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);15110 15111 iemInitExec(pVCpu, false /*fBypassHandlers*/);15112 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);15113 Assert(!pVCpu->iem.s.cActiveMappings);15114 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);15115 }15116 15117 15118 15119 /**15120 15139 * Interface for HM and EM to emulate the CPUID instruction. 15121 15140 * … … 15498 15517 15499 15518 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ 15519 15520 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 15521 15522 /** 15523 * Interface for HM and EM to emulate the VMXOFF instruction. 15524 * 15525 * @returns Strict VBox status code. 15526 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15527 * @param cbInstr The instruction length in bytes. 15528 * @thread EMT(pVCpu) 15529 */ 15530 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr) 15531 { 15532 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15533 15534 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15535 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff); 15536 Assert(!pVCpu->iem.s.cActiveMappings); 15537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict); 15538 } 15539 15540 15541 /** 15542 * Interface for HM and EM to emulate the VMXON instruction. 15543 * 15544 * @returns Strict VBox status code. 15545 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 15546 * @param cbInstr The instruction length in bytes. 15547 * @param GCPtrVmxon The linear address of the VMXON pointer. 15548 * @param uExitInstrInfo The VM-exit instruction information field. 15549 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any. 15550 * @thread EMT(pVCpu) 15551 */ 15552 VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, uint8_t cbInstr, RTGCPHYS GCPtrVmxon, uint32_t uExitInstrInfo, 15553 RTGCPTR GCPtrDisp) 15554 { 15555 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3); 15556 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT); 15557 15558 iemInitExec(pVCpu, false /*fBypassHandlers*/); 15559 PCVMXEXITINSTRINFO pExitInstrInfo = (PCVMXEXITINSTRINFO)&uExitInstrInfo; 15560 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, pExitInstrInfo, GCPtrDisp); 15561 if (pVCpu->iem.s.cActiveMappings) 15562 iemMemRollback(pVCpu); 15563 return iemExecStatusCodeFiddling(pVCpu, rcStrict); 15564 } 15565 15566 #endif 15567 15500 15568 #ifdef IN_RING3 15501 15569
Note:
See TracChangeset
for help on using the changeset viewer.