VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 74157

Last change on this file since 74157 was 73606, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 Various bits:

  • IEM: Started VMXON, VMXOFF implementation, use IEM_OPCODE_GET_NEXT_RM.
  • IEM: Fixed INVPCID C impl, removed unused IEMExecDecodedInvpcid.
  • IEM: Updated iemCImpl_load_CrX to check for CR0/CR4 fixed bits in VMX.
  • IEM: Update offModRm to reset/re-initialize where needed.
  • CPUM: Added VMX root, non-root mode and other bits and updated a few places where they're used.
  • HM: Started adding fine-grained VMX instruction failure diagnostics.
  • HM: Made VM instruction error an enum.
  • HM: Added HMVMXAll.cpp for all context VMX code.
  • Ensure building with VBOX_WITH_NESTED_HWVIRT_[SVM|VMX] does the right thing based on host CPU.
  • CPUM: Added dumping of nested-VMX CPUMCTX state.
  • HMVMXR0: Added memory operand decoding.
  • HMVMXR0: VMX instr. privilege checks (CR0/CR4 read shadows are not consulted, so we need to do them)
  • HM: Added some more bit-field representaions.
  • Recompiler: Refuse to run when in nested-VMX guest code.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.5 KB
Line 
1/* $Id: HMSVMAll.cpp 73606 2018-08-10 07:38:56Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/vm.h>
29
30
31#ifndef IN_RC
32
33/**
34 * Emulates a simple MOV TPR (CR8) instruction.
35 *
36 * Used for TPR patching on 32-bit guests. This simply looks up the patch record
37 * at EIP and does the required.
38 *
39 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
40 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
41 * TPR). See hmR3ReplaceTprInstr() for the details.
42 *
43 * @returns VBox status code.
44 * @retval VINF_SUCCESS if the access was handled successfully, RIP + RFLAGS updated.
45 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
46 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
47 *
48 * @param pVCpu The cross context virtual CPU structure.
49 * @param pCtx Pointer to the guest-CPU context.
50 */
51int hmSvmEmulateMovTpr(PVMCPU pVCpu)
52{
53 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
54 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
55
56 /*
57 * We do this in a loop as we increment the RIP after a successful emulation
58 * and the new RIP may be a patched instruction which needs emulation as well.
59 */
60 bool fPatchFound = false;
61 PVM pVM = pVCpu->CTX_SUFF(pVM);
62 for (;;)
63 {
64 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
65 if (!pPatch)
66 break;
67 fPatchFound = true;
68
69 uint8_t u8Tpr;
70 switch (pPatch->enmType)
71 {
72 case HMTPRINSTR_READ:
73 {
74 bool fPending;
75 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
76 AssertRC(rc);
77
78 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
79 AssertRC(rc);
80 pCtx->rip += pPatch->cbOp;
81 pCtx->eflags.Bits.u1RF = 0;
82 break;
83 }
84
85 case HMTPRINSTR_WRITE_REG:
86 case HMTPRINSTR_WRITE_IMM:
87 {
88 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
89 {
90 uint32_t u32Val;
91 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
92 AssertRC(rc);
93 u8Tpr = u32Val;
94 }
95 else
96 u8Tpr = (uint8_t)pPatch->uSrcOperand;
97
98 int rc2 = APICSetTpr(pVCpu, u8Tpr);
99 AssertRC(rc2);
100 pCtx->rip += pPatch->cbOp;
101 pCtx->eflags.Bits.u1RF = 0;
102 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR
103 | HM_CHANGED_GUEST_RIP
104 | HM_CHANGED_GUEST_RFLAGS);
105 break;
106 }
107
108 default:
109 {
110 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
111 pVCpu->hm.s.u32HMError = pPatch->enmType;
112 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
113 }
114 }
115 }
116
117 return fPatchFound ? VINF_SUCCESS : VERR_NOT_FOUND;
118}
119
120# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
121/**
122 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
123 * in IEM).
124 *
125 * @param pVCpu The cross context virtual CPU structure.
126 * @param pCtx Pointer to the guest-CPU context.
127 *
128 * @sa hmR0SvmVmRunCacheVmcb.
129 */
130VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
131{
132 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
133 if (pVmcbNstGstCache->fCacheValid)
134 {
135 /*
136 * Restore fields as our own code might look at the VMCB controls as part
137 * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
138 * restore these fields because currently none of them are written back to memory
139 * by a physical CPU on #VMEXIT.
140 */
141 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
142 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
143 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
144 pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
145 pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
146 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
147 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
148 pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
149 pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
150 pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
151 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
152 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
153 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
154 pVmcbNstGstCache->fCacheValid = false;
155 }
156
157 /*
158 * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
159 * in response to a physical CPU interrupt as no changes to the guest-CPU state are
160 * expected (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
161 *
162 * However, with nested-guests, the state -can- change on trips to ring-3 for we might
163 * try to inject a nested-guest physical interrupt and cause a SVM_EXIT_INTR #VMEXIT for
164 * the nested-guest from ring-3. Import the complete state here as we will be swapping
165 * to the guest VMCB after the #VMEXIT.
166 */
167 CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ALL);
168 AssertMsg(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL),
169 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", pVCpu->cpum.GstCtx.fExtrn, CPUMCTX_EXTRN_ALL));
170 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
171}
172# endif
173
174/**
175 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
176 * enabled for the VM.
177 *
178 * @returns @c true if VGIF is enabled, @c false otherwise.
179 * @param pVM The cross context VM structure.
180 *
181 * @remarks This value returned by this functions is expected by the callers not
182 * to change throughout the lifetime of the VM.
183 */
184VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM)
185{
186 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
187 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
188
189 return HMIsEnabled(pVM) && fVGif && fUseVGif;
190}
191
192
193/**
194 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
195 * value for the nested-guest.
196 *
197 * @returns The TSC offset after applying any nested-guest TSC offset.
198 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
199 * @param uTicks The guest TSC.
200 *
201 * @remarks This function looks at the VMCB cache rather than directly at the
202 * nested-guest VMCB. The latter may have been modified for executing
203 * using hardware-assisted SVM.
204 *
205 * @note If you make any changes to this function, please check if
206 * hmR0SvmNstGstUndoTscOffset() needs adjusting.
207 *
208 * @sa CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
209 */
210VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks)
211{
212 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
213 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
214 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
215 Assert(pVmcbNstGstCache->fCacheValid);
216 return uTicks + pVmcbNstGstCache->u64TSCOffset;
217}
218
219
220/**
221 * Interface used by IEM to handle patched TPR accesses.
222 *
223 * @returns VBox status code
224 * @retval VINF_SUCCESS if hypercall was handled, RIP + RFLAGS all dealt with.
225 * @retval VERR_NOT_FOUND if hypercall was _not_ handled.
226 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE on IPE.
227 *
228 * @param pVCpu The cross context virtual CPU structure.
229 */
230VMM_INT_DECL(int) HMHCSvmMaybeMovTprHypercall(PVMCPU pVCpu)
231{
232 PVM pVM = pVCpu->CTX_SUFF(pVM);
233 if (pVM->hm.s.fTprPatchingAllowed)
234 {
235 int rc = hmSvmEmulateMovTpr(pVCpu);
236 if (RT_SUCCESS(rc))
237 return VINF_SUCCESS;
238 return rc;
239 }
240 return VERR_NOT_FOUND;
241}
242
243
244/**
245 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
246 * incorrect code bytes may be fetched after a world-switch".
247 *
248 * @param pu32Family Where to store the CPU family (can be NULL).
249 * @param pu32Model Where to store the CPU model (can be NULL).
250 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
251 * @returns true if the erratum applies, false otherwise.
252 */
253VMM_INT_DECL(int) HMSvmIsSubjectToErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
254{
255 /*
256 * Erratum 170 which requires a forced TLB flush for each world switch:
257 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
258 *
259 * All BH-G1/2 and DH-G1/2 models include a fix:
260 * Athlon X2: 0x6b 1/2
261 * 0x68 1/2
262 * Athlon 64: 0x7f 1
263 * 0x6f 2
264 * Sempron: 0x7f 1/2
265 * 0x6f 2
266 * 0x6c 2
267 * 0x7c 2
268 * Turion 64: 0x68 2
269 */
270 uint32_t u32Dummy;
271 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
272 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
273 u32BaseFamily = (u32Version >> 8) & 0xf;
274 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
275 u32Model = ((u32Version >> 4) & 0xf);
276 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
277 u32Stepping = u32Version & 0xf;
278
279 bool fErratumApplies = false;
280 if ( u32Family == 0xf
281 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
282 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
283 {
284 fErratumApplies = true;
285 }
286
287 if (pu32Family)
288 *pu32Family = u32Family;
289 if (pu32Model)
290 *pu32Model = u32Model;
291 if (pu32Stepping)
292 *pu32Stepping = u32Stepping;
293
294 return fErratumApplies;
295}
296
297#endif /* !IN_RC */
298
299
300/**
301 * Converts an SVM event type to a TRPM event type.
302 *
303 * @returns The TRPM event type.
304 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
305 * of recognized trap types.
306 *
307 * @param pEvent Pointer to the SVM event.
308 */
309VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent)
310{
311 uint8_t const uType = pEvent->n.u3Type;
312 switch (uType)
313 {
314 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
315 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
316 case SVM_EVENT_EXCEPTION:
317 case SVM_EVENT_NMI: return TRPM_TRAP;
318 default:
319 break;
320 }
321 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
322 return TRPM_32BIT_HACK;
323}
324
325
326/**
327 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
328 *
329 * @returns VBox status code.
330 * @param idMsr The MSR being requested.
331 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
332 * bitmap for @a idMsr.
333 * @param puMsrpmBit Where to store the bit offset starting at the byte
334 * returned in @a pbOffMsrpm.
335 */
336VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
337{
338 Assert(pbOffMsrpm);
339 Assert(puMsrpmBit);
340
341 /*
342 * MSRPM Layout:
343 * Byte offset MSR range
344 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
345 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
346 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
347 * 0x1800 - 0x1fff Reserved
348 *
349 * Each MSR is represented by 2 permission bits (read and write).
350 */
351 if (idMsr <= 0x00001fff)
352 {
353 /* Pentium-compatible MSRs. */
354 uint32_t const bitoffMsr = idMsr << 1;
355 *pbOffMsrpm = bitoffMsr >> 3;
356 *puMsrpmBit = bitoffMsr & 7;
357 return VINF_SUCCESS;
358 }
359
360 if ( idMsr >= 0xc0000000
361 && idMsr <= 0xc0001fff)
362 {
363 /* AMD Sixth Generation x86 Processor MSRs. */
364 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
365 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
366 *puMsrpmBit = bitoffMsr & 7;
367 return VINF_SUCCESS;
368 }
369
370 if ( idMsr >= 0xc0010000
371 && idMsr <= 0xc0011fff)
372 {
373 /* AMD Seventh and Eighth Generation Processor MSRs. */
374 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
375 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
376 *puMsrpmBit = bitoffMsr & 7;
377 return VINF_SUCCESS;
378 }
379
380 *pbOffMsrpm = 0;
381 *puMsrpmBit = 0;
382 return VERR_OUT_OF_RANGE;
383}
384
385
386/**
387 * Determines whether an IOIO intercept is active for the nested-guest or not.
388 *
389 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
390 * @param u16Port The IO port being accessed.
391 * @param enmIoType The type of IO access.
392 * @param cbReg The IO operand size in bytes.
393 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
394 * @param iEffSeg The effective segment number.
395 * @param fRep Whether this is a repeating IO instruction (REP prefix).
396 * @param fStrIo Whether this is a string IO instruction.
397 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
398 * Optional, can be NULL.
399 */
400VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
401 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
402 PSVMIOIOEXITINFO pIoExitInfo)
403{
404 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
405 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
406
407 /*
408 * The IOPM layout:
409 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
410 * two 4K pages.
411 *
412 * For IO instructions that access more than a single byte, the permission bits
413 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
414 *
415 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
416 * we need 3 extra bits beyond the second 4K page.
417 */
418 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
419
420 uint16_t const offIopm = u16Port >> 3;
421 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
422 uint8_t const cShift = u16Port - (offIopm << 3);
423 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
424
425 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
426 Assert(pbIopm);
427 pbIopm += offIopm;
428 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
429 if (u16Iopm & fIopmMask)
430 {
431 if (pIoExitInfo)
432 {
433 static const uint32_t s_auIoOpSize[] =
434 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
435
436 static const uint32_t s_auIoAddrSize[] =
437 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
438
439 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
440 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
441 pIoExitInfo->n.u1Str = fStrIo;
442 pIoExitInfo->n.u1Rep = fRep;
443 pIoExitInfo->n.u3Seg = iEffSeg & 7;
444 pIoExitInfo->n.u1Type = enmIoType;
445 pIoExitInfo->n.u16Port = u16Port;
446 }
447 return true;
448 }
449
450 /** @todo remove later (for debugging as VirtualBox always traps all IO
451 * intercepts). */
452 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
453 return false;
454}
455
456
457/**
458 * Returns whether HM has cached the nested-guest VMCB.
459 *
460 * If the VMCB is cached by HM, it means HM may have potentially modified the
461 * VMCB for execution using hardware-assisted SVM.
462 *
463 * @returns true if HM has cached the nested-guest VMCB, false otherwise.
464 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
465 */
466VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu)
467{
468 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
469 return pVmcbNstGstCache->fCacheValid;
470}
471
472
473/**
474 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
475 * active.
476 *
477 * @returns @c true if in intercept is set, @c false otherwise.
478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
479 * @param fIntercept The SVM control/instruction intercept, see
480 * SVM_CTRL_INTERCEPT_*.
481 */
482VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept)
483{
484 Assert(HMHasGuestSvmVmcbCached(pVCpu));
485 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
486 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
487}
488
489
490/**
491 * Checks if the nested-guest VMCB has the specified CR read intercept active.
492 *
493 * @returns @c true if in intercept is set, @c false otherwise.
494 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
495 * @param uCr The CR register number (0 to 15).
496 */
497VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
498{
499 Assert(uCr < 16);
500 Assert(HMHasGuestSvmVmcbCached(pVCpu));
501 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
502 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
503}
504
505
506/**
507 * Checks if the nested-guest VMCB has the specified CR write intercept active.
508 *
509 * @returns @c true if in intercept is set, @c false otherwise.
510 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
511 * @param uCr The CR register number (0 to 15).
512 */
513VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
514{
515 Assert(uCr < 16);
516 Assert(HMHasGuestSvmVmcbCached(pVCpu));
517 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
518 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
519}
520
521
522/**
523 * Checks if the nested-guest VMCB has the specified DR read intercept active.
524 *
525 * @returns @c true if in intercept is set, @c false otherwise.
526 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
527 * @param uDr The DR register number (0 to 15).
528 */
529VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
530{
531 Assert(uDr < 16);
532 Assert(HMHasGuestSvmVmcbCached(pVCpu));
533 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
534 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
535}
536
537
538/**
539 * Checks if the nested-guest VMCB has the specified DR write intercept active.
540 *
541 * @returns @c true if in intercept is set, @c false otherwise.
542 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
543 * @param uDr The DR register number (0 to 15).
544 */
545VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
546{
547 Assert(uDr < 16);
548 Assert(HMHasGuestSvmVmcbCached(pVCpu));
549 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
550 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
551}
552
553
554/**
555 * Checks if the nested-guest VMCB has the specified exception intercept active.
556 *
557 * @returns true if in intercept is active, false otherwise.
558 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
559 * @param uVector The exception / interrupt vector.
560 */
561VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector)
562{
563 Assert(uVector < 32);
564 Assert(HMHasGuestSvmVmcbCached(pVCpu));
565 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
566 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
567}
568
569
570/**
571 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
572 *
573 * @returns true if virtual-interrupts are masked, @c false otherwise.
574 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
575 */
576VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu)
577{
578 Assert(HMHasGuestSvmVmcbCached(pVCpu));
579 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
580 return pVmcbNstGstCache->fVIntrMasking;
581}
582
583
584/**
585 * Checks if the nested-guest VMCB has nested-paging enabled.
586 *
587 * @returns true if nested-paging is enabled, @c false otherwise.
588 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
589 */
590VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu)
591{
592 Assert(HMHasGuestSvmVmcbCached(pVCpu));
593 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
594 return pVmcbNstGstCache->fNestedPaging;
595}
596
597
598/**
599 * Returns the nested-guest VMCB pause-filter count.
600 *
601 * @returns The pause-filter count.
602 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
603 */
604VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu)
605{
606 Assert(HMHasGuestSvmVmcbCached(pVCpu));
607 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
608 return pVmcbNstGstCache->u16PauseFilterCount;
609}
610
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette