VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 72306

Last change on this file since 72306 was 72079, checked in by vboxsync, 7 years ago

VMM/HMSVM: Added hmR0SvmNstGstUndoTscOffset as counterpart of HMSvmNstGstApplyTscOffset.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.5 KB
Line 
1/* $Id: HMSVMAll.cpp 72079 2018-05-02 05:15:20Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/hm.h>
28#include <VBox/vmm/iem.h>
29#include <VBox/vmm/vm.h>
30#include <VBox/vmm/hm_svm.h>
31
32
33#ifndef IN_RC
34/**
35 * Emulates a simple MOV TPR (CR8) instruction.
36 *
37 * Used for TPR patching on 32-bit guests. This simply looks up the patch record
38 * at EIP and does the required.
39 *
40 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
41 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
42 * TPR). See hmR3ReplaceTprInstr() for the details.
43 *
44 * @returns VBox status code.
45 * @retval VINF_SUCCESS if the access was handled successfully.
46 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
47 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
48 *
49 * @param pVCpu The cross context virtual CPU structure.
50 * @param pCtx Pointer to the guest-CPU context.
51 * @param pfUpdateRipAndRF Whether the guest RIP/EIP has been updated as
52 * part of the TPR patch operation.
53 */
54static int hmSvmEmulateMovTpr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdateRipAndRF)
55{
56 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
57
58 /*
59 * We do this in a loop as we increment the RIP after a successful emulation
60 * and the new RIP may be a patched instruction which needs emulation as well.
61 */
62 bool fUpdateRipAndRF = false;
63 bool fPatchFound = false;
64 PVM pVM = pVCpu->CTX_SUFF(pVM);
65 for (;;)
66 {
67 bool fPending;
68 uint8_t u8Tpr;
69
70 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
71 if (!pPatch)
72 break;
73
74 fPatchFound = true;
75 switch (pPatch->enmType)
76 {
77 case HMTPRINSTR_READ:
78 {
79 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
80 AssertRC(rc);
81
82 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
83 AssertRC(rc);
84 pCtx->rip += pPatch->cbOp;
85 pCtx->eflags.Bits.u1RF = 0;
86 fUpdateRipAndRF = true;
87 break;
88 }
89
90 case HMTPRINSTR_WRITE_REG:
91 case HMTPRINSTR_WRITE_IMM:
92 {
93 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
94 {
95 uint32_t u32Val;
96 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
97 AssertRC(rc);
98 u8Tpr = u32Val;
99 }
100 else
101 u8Tpr = (uint8_t)pPatch->uSrcOperand;
102
103 int rc2 = APICSetTpr(pVCpu, u8Tpr);
104 AssertRC(rc2);
105 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);
106
107 pCtx->rip += pPatch->cbOp;
108 pCtx->eflags.Bits.u1RF = 0;
109 fUpdateRipAndRF = true;
110 break;
111 }
112
113 default:
114 {
115 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
116 pVCpu->hm.s.u32HMError = pPatch->enmType;
117 *pfUpdateRipAndRF = fUpdateRipAndRF;
118 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
119 }
120 }
121 }
122
123 *pfUpdateRipAndRF = fUpdateRipAndRF;
124 if (fPatchFound)
125 return VINF_SUCCESS;
126 return VERR_NOT_FOUND;
127}
128
129
130/**
131 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
132 * in IEM).
133 *
134 * @param pVCpu The cross context virtual CPU structure.
135 * @param pCtx Pointer to the guest-CPU context.
136 *
137 * @sa hmR0SvmVmRunCacheVmcb.
138 */
139VMM_INT_DECL(void) HMSvmNstGstVmExitNotify(PVMCPU pVCpu, PCPUMCTX pCtx)
140{
141 if (pCtx->hwvirt.svm.fHMCachedVmcb)
142 {
143 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
144 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
145
146 /*
147 * Restore fields as our own code might look at the VMCB controls as part
148 * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
149 * restore these fields because currently none of them are written back to memory
150 * by a physical CPU on #VMEXIT.
151 */
152 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
153 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
154 pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
155 pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
156 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
157 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
158 pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
159 pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
160 pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
161 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
162 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
163 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
164 pCtx->hwvirt.svm.fHMCachedVmcb = false;
165 }
166
167 /*
168 * Currently, VMRUN, #VMEXIT transitions involves trips to ring-3 that would flag a full
169 * CPU state change. However, if we exit to ring-3 in response to receiving a physical
170 * interrupt, we skip signaling any CPU state change as normally no change is done to the
171 * execution state (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
172 *
173 * With nested-guests, the state can change on trip to ring-3 for e.g., we might perform a
174 * SVM_EXIT_INTR #VMEXIT for the nested-guest in ring-3. Hence we signal a full CPU state
175 * change here.
176 */
177 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);
178}
179
180
181/**
182 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
183 * enabled for the VM.
184 *
185 * @returns @c true if VGIF is enabled, @c false otherwise.
186 * @param pVM The cross context VM structure.
187 *
188 * @remarks This value returned by this functions is expected by the callers not
189 * to change throughout the lifetime of the VM.
190 */
191VMM_INT_DECL(bool) HMSvmIsVGifActive(PVM pVM)
192{
193 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
194 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
195
196 return HMIsEnabled(pVM) && fVGif && fUseVGif;
197}
198
199
200/**
201 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
202 * value for the nested-guest.
203 *
204 * @returns The TSC offset after applying any nested-guest TSC offset.
205 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
206 * @param uTicks The guest TSC.
207 *
208 * @remarks This function looks at the VMCB cache rather than directly at the
209 * nested-guest VMCB. The latter may have been modified for executing
210 * using hardware-assisted SVM.
211 *
212 * @note If you make any changes to this function, please check if
213 * hmR0SvmNstGstUndoTscOffset() needs adjusting.
214 *
215 * @sa CPUMApplyNestedGuestTscOffset(), hmR0SvmNstGstUndoTscOffset().
216 */
217VMM_INT_DECL(uint64_t) HMSvmNstGstApplyTscOffset(PVMCPU pVCpu, uint64_t uTicks)
218{
219 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
220 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));
221 Assert(pCtx->hwvirt.svm.fHMCachedVmcb);
222 NOREF(pCtx);
223 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
224 return uTicks + pVmcbNstGstCache->u64TSCOffset;
225}
226#endif /* !IN_RC */
227
228
229/**
230 * Performs the operations necessary that are part of the vmmcall instruction
231 * execution in the guest.
232 *
233 * @returns Strict VBox status code (i.e. informational status codes too).
234 * @retval VINF_SUCCESS on successful handling, no \#UD needs to be thrown,
235 * update RIP and eflags.RF depending on @a pfUpdatedRipAndRF and
236 * continue guest execution.
237 * @retval VINF_GIM_HYPERCALL_CONTINUING continue hypercall without updating
238 * RIP.
239 * @retval VINF_GIM_R3_HYPERCALL re-start the hypercall from ring-3.
240 *
241 * @param pVCpu The cross context virtual CPU structure.
242 * @param pCtx Pointer to the guest-CPU context.
243 * @param pfUpdatedRipAndRF Whether the guest RIP/EIP has been updated as
244 * part of handling the VMMCALL operation.
245 */
246VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmmcall(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfUpdatedRipAndRF)
247{
248#ifndef IN_RC
249 /*
250 * TPR patched instruction emulation for 32-bit guests.
251 */
252 PVM pVM = pVCpu->CTX_SUFF(pVM);
253 if (pVM->hm.s.fTprPatchingAllowed)
254 {
255 int rc = hmSvmEmulateMovTpr(pVCpu, pCtx, pfUpdatedRipAndRF);
256 if (RT_SUCCESS(rc))
257 return VINF_SUCCESS;
258
259 if (rc != VERR_NOT_FOUND)
260 {
261 Log(("hmSvmExitVmmCall: hmSvmEmulateMovTpr returns %Rrc\n", rc));
262 return rc;
263 }
264 }
265#endif
266
267 /*
268 * Paravirtualized hypercalls.
269 */
270 *pfUpdatedRipAndRF = false;
271 if (pVCpu->hm.s.fHypercallsEnabled)
272 return GIMHypercall(pVCpu, pCtx);
273
274 return VERR_NOT_AVAILABLE;
275}
276
277
278/**
279 * Converts an SVM event type to a TRPM event type.
280 *
281 * @returns The TRPM event type.
282 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
283 * of recognized trap types.
284 *
285 * @param pEvent Pointer to the SVM event.
286 */
287VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent)
288{
289 uint8_t const uType = pEvent->n.u3Type;
290 switch (uType)
291 {
292 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
293 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
294 case SVM_EVENT_EXCEPTION:
295 case SVM_EVENT_NMI: return TRPM_TRAP;
296 default:
297 break;
298 }
299 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
300 return TRPM_32BIT_HACK;
301}
302
303
304/**
305 * Gets the MSR permission bitmap byte and bit offset for the specified MSR.
306 *
307 * @returns VBox status code.
308 * @param idMsr The MSR being requested.
309 * @param pbOffMsrpm Where to store the byte offset in the MSR permission
310 * bitmap for @a idMsr.
311 * @param puMsrpmBit Where to store the bit offset starting at the byte
312 * returned in @a pbOffMsrpm.
313 */
314VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint8_t *puMsrpmBit)
315{
316 Assert(pbOffMsrpm);
317 Assert(puMsrpmBit);
318
319 /*
320 * MSRPM Layout:
321 * Byte offset MSR range
322 * 0x000 - 0x7ff 0x00000000 - 0x00001fff
323 * 0x800 - 0xfff 0xc0000000 - 0xc0001fff
324 * 0x1000 - 0x17ff 0xc0010000 - 0xc0011fff
325 * 0x1800 - 0x1fff Reserved
326 *
327 * Each MSR is represented by 2 permission bits (read and write).
328 */
329 if (idMsr <= 0x00001fff)
330 {
331 /* Pentium-compatible MSRs. */
332 uint32_t const bitoffMsr = idMsr << 1;
333 *pbOffMsrpm = bitoffMsr >> 3;
334 *puMsrpmBit = bitoffMsr & 7;
335 return VINF_SUCCESS;
336 }
337
338 if ( idMsr >= 0xc0000000
339 && idMsr <= 0xc0001fff)
340 {
341 /* AMD Sixth Generation x86 Processor MSRs. */
342 uint32_t const bitoffMsr = (idMsr - 0xc0000000) << 1;
343 *pbOffMsrpm = 0x800 + (bitoffMsr >> 3);
344 *puMsrpmBit = bitoffMsr & 7;
345 return VINF_SUCCESS;
346 }
347
348 if ( idMsr >= 0xc0010000
349 && idMsr <= 0xc0011fff)
350 {
351 /* AMD Seventh and Eighth Generation Processor MSRs. */
352 uint32_t const bitoffMsr = (idMsr - 0xc0010000) << 1;
353 *pbOffMsrpm = 0x1000 + (bitoffMsr >> 3);
354 *puMsrpmBit = bitoffMsr & 7;
355 return VINF_SUCCESS;
356 }
357
358 *pbOffMsrpm = 0;
359 *puMsrpmBit = 0;
360 return VERR_OUT_OF_RANGE;
361}
362
363
364/**
365 * Determines whether an IOIO intercept is active for the nested-guest or not.
366 *
367 * @param pvIoBitmap Pointer to the nested-guest IO bitmap.
368 * @param u16Port The IO port being accessed.
369 * @param enmIoType The type of IO access.
370 * @param cbReg The IO operand size in bytes.
371 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
372 * @param iEffSeg The effective segment number.
373 * @param fRep Whether this is a repeating IO instruction (REP prefix).
374 * @param fStrIo Whether this is a string IO instruction.
375 * @param pIoExitInfo Pointer to the SVMIOIOEXITINFO struct to be filled.
376 * Optional, can be NULL.
377 */
378VMM_INT_DECL(bool) HMSvmIsIOInterceptActive(void *pvIoBitmap, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
379 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo,
380 PSVMIOIOEXITINFO pIoExitInfo)
381{
382 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
383 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
384
385 /*
386 * The IOPM layout:
387 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or
388 * two 4K pages.
389 *
390 * For IO instructions that access more than a single byte, the permission bits
391 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.
392 *
393 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),
394 * we need 3 extra bits beyond the second 4K page.
395 */
396 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };
397
398 uint16_t const offIopm = u16Port >> 3;
399 uint16_t const fSizeMask = s_auSizeMasks[(cAddrSizeBits >> SVM_IOIO_OP_SIZE_SHIFT) & 7];
400 uint8_t const cShift = u16Port - (offIopm << 3);
401 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);
402
403 uint8_t const *pbIopm = (uint8_t *)pvIoBitmap;
404 Assert(pbIopm);
405 pbIopm += offIopm;
406 uint16_t const u16Iopm = *(uint16_t *)pbIopm;
407 if (u16Iopm & fIopmMask)
408 {
409 if (pIoExitInfo)
410 {
411 static const uint32_t s_auIoOpSize[] =
412 { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
413
414 static const uint32_t s_auIoAddrSize[] =
415 { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
416
417 pIoExitInfo->u = s_auIoOpSize[cbReg & 7];
418 pIoExitInfo->u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
419 pIoExitInfo->n.u1Str = fStrIo;
420 pIoExitInfo->n.u1Rep = fRep;
421 pIoExitInfo->n.u3Seg = iEffSeg & 7;
422 pIoExitInfo->n.u1Type = enmIoType;
423 pIoExitInfo->n.u16Port = u16Port;
424 }
425 return true;
426 }
427
428 /** @todo remove later (for debugging as VirtualBox always traps all IO
429 * intercepts). */
430 AssertMsgFailed(("iemSvmHandleIOIntercept: We expect an IO intercept here!\n"));
431 return false;
432}
433
434
435/**
436 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
437 * active.
438 *
439 * @returns @c true if in intercept is set, @c false otherwise.
440 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
441 * @param pCtx Pointer to the context.
442 * @param fIntercept The SVM control/instruction intercept, see
443 * SVM_CTRL_INTERCEPT_*.
444 */
445VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fIntercept)
446{
447 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
448 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
449 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
450}
451
452
453/**
454 * Checks if the nested-guest VMCB has the specified CR read intercept active.
455 *
456 * @returns @c true if in intercept is set, @c false otherwise.
457 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
458 * @param pCtx Pointer to the context.
459 * @param uCr The CR register number (0 to 15).
460 */
461VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
462{
463 Assert(uCr < 16);
464 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
465 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
466 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
467}
468
469
470/**
471 * Checks if the nested-guest VMCB has the specified CR write intercept active.
472 *
473 * @returns @c true if in intercept is set, @c false otherwise.
474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
475 * @param pCtx Pointer to the context.
476 * @param uCr The CR register number (0 to 15).
477 */
478VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCr)
479{
480 Assert(uCr < 16);
481 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
482 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
483 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
484}
485
486
487/**
488 * Checks if the nested-guest VMCB has the specified DR read intercept active.
489 *
490 * @returns @c true if in intercept is set, @c false otherwise.
491 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
492 * @param pCtx Pointer to the context.
493 * @param uDr The DR register number (0 to 15).
494 */
495VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
496{
497 Assert(uDr < 16);
498 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
499 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
500 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
501}
502
503
504/**
505 * Checks if the nested-guest VMCB has the specified DR write intercept active.
506 *
507 * @returns @c true if in intercept is set, @c false otherwise.
508 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
509 * @param pCtx Pointer to the context.
510 * @param uDr The DR register number (0 to 15).
511 */
512VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uDr)
513{
514 Assert(uDr < 16);
515 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
516 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
517 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
518}
519
520
521/**
522 * Checks if the nested-guest VMCB has the specified exception intercept active.
523 *
524 * @returns true if in intercept is active, false otherwise.
525 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
526 * @param pCtx Pointer to the context.
527 * @param uVector The exception / interrupt vector.
528 */
529VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uVector)
530{
531 Assert(uVector < 32);
532 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
533 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
534 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
535}
536
537
538/**
539 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
540 *
541 * @returns true if virtual-interrupts are masked, @c false otherwise.
542 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
543 * @param pCtx Pointer to the context.
544 */
545VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu, PCCPUMCTX pCtx)
546{
547 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
548 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
549 return pVmcbNstGstCache->fVIntrMasking;
550}
551
552
553/**
554 * Checks if the nested-guest VMCB has nested-paging enabled.
555 *
556 * @returns true if nested-paging is enabled, @c false otherwise.
557 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
558 * @param pCtx Pointer to the context.
559 */
560VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu, PCCPUMCTX pCtx)
561{
562 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
563 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
564 return pVmcbNstGstCache->fNestedPaging;
565}
566
567
568/**
569 * Returns the nested-guest VMCB pause-filter count.
570 *
571 * @returns The pause-filter count.
572 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
573 * @param pCtx Pointer to the context.
574 */
575VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu, PCCPUMCTX pCtx)
576{
577 Assert(pCtx->hwvirt.svm.fHMCachedVmcb); NOREF(pCtx);
578 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
579 return pVmcbNstGstCache->u16PauseFilterCount;
580}
581
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette