VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp@ 78404

Last change on this file since 78404 was 78371, checked in by vboxsync, 6 years ago

VMM: Move VT-x/AMD-V helpers that are based on CPU specs to CPUM in preparation of upcoming changes. It is better placed in CPUM if say NEM in future needs to implement nested VT-x/AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.7 KB
Line 
1/* $Id: HMSVMAll.cpp 78371 2019-05-03 08:21:44Z vboxsync $ */
2/** @file
3 * HM SVM (AMD-V) - All contexts.
4 */
5
6/*
7 * Copyright (C) 2017-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include "HMInternal.h"
25#include <VBox/vmm/apic.h>
26#include <VBox/vmm/gim.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/vm.h>
29
30#include <VBox/err.h>
31
32
33#ifndef IN_RC
34
35/**
36 * Emulates a simple MOV TPR (CR8) instruction.
37 *
38 * Used for TPR patching on 32-bit guests. This simply looks up the patch record
39 * at EIP and does the required.
40 *
41 * This VMMCALL is used a fallback mechanism when mov to/from cr8 isn't exactly
42 * like how we want it to be (e.g. not followed by shr 4 as is usually done for
43 * TPR). See hmR3ReplaceTprInstr() for the details.
44 *
45 * @returns VBox status code.
46 * @retval VINF_SUCCESS if the access was handled successfully, RIP + RFLAGS updated.
47 * @retval VERR_NOT_FOUND if no patch record for this RIP could be found.
48 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE if the found patch type is invalid.
49 *
50 * @param pVCpu The cross context virtual CPU structure.
51 * @param pCtx Pointer to the guest-CPU context.
52 */
53VMM_INT_DECL(int) hmEmulateSvmMovTpr(PVMCPU pVCpu)
54{
55 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
56 Log4(("Emulated VMMCall TPR access replacement at RIP=%RGv\n", pCtx->rip));
57
58 /*
59 * We do this in a loop as we increment the RIP after a successful emulation
60 * and the new RIP may be a patched instruction which needs emulation as well.
61 */
62 bool fPatchFound = false;
63 PVM pVM = pVCpu->CTX_SUFF(pVM);
64 for (;;)
65 {
66 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
67 if (!pPatch)
68 break;
69 fPatchFound = true;
70
71 uint8_t u8Tpr;
72 switch (pPatch->enmType)
73 {
74 case HMTPRINSTR_READ:
75 {
76 bool fPending;
77 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPending, NULL /* pu8PendingIrq */);
78 AssertRC(rc);
79
80 rc = DISWriteReg32(CPUMCTX2CORE(pCtx), pPatch->uDstOperand, u8Tpr);
81 AssertRC(rc);
82 pCtx->rip += pPatch->cbOp;
83 pCtx->eflags.Bits.u1RF = 0;
84 break;
85 }
86
87 case HMTPRINSTR_WRITE_REG:
88 case HMTPRINSTR_WRITE_IMM:
89 {
90 if (pPatch->enmType == HMTPRINSTR_WRITE_REG)
91 {
92 uint32_t u32Val;
93 int rc = DISFetchReg32(CPUMCTX2CORE(pCtx), pPatch->uSrcOperand, &u32Val);
94 AssertRC(rc);
95 u8Tpr = u32Val;
96 }
97 else
98 u8Tpr = (uint8_t)pPatch->uSrcOperand;
99
100 int rc2 = APICSetTpr(pVCpu, u8Tpr);
101 AssertRC(rc2);
102 pCtx->rip += pPatch->cbOp;
103 pCtx->eflags.Bits.u1RF = 0;
104 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR
105 | HM_CHANGED_GUEST_RIP
106 | HM_CHANGED_GUEST_RFLAGS);
107 break;
108 }
109
110 default:
111 {
112 AssertMsgFailed(("Unexpected patch type %d\n", pPatch->enmType));
113 pVCpu->hm.s.u32HMError = pPatch->enmType;
114 return VERR_SVM_UNEXPECTED_PATCH_TYPE;
115 }
116 }
117 }
118
119 return fPatchFound ? VINF_SUCCESS : VERR_NOT_FOUND;
120}
121
122# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
123/**
124 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g.
125 * in IEM).
126 *
127 * @param pVCpu The cross context virtual CPU structure.
128 * @param pCtx Pointer to the guest-CPU context.
129 *
130 * @sa hmR0SvmVmRunCacheVmcb.
131 */
132VMM_INT_DECL(void) HMNotifySvmNstGstVmexit(PVMCPU pVCpu, PCPUMCTX pCtx)
133{
134 PSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
135 if (pVmcbNstGstCache->fCacheValid)
136 {
137 /*
138 * Restore fields as our own code might look at the VMCB controls as part
139 * of the #VMEXIT handling in IEM. Otherwise, strictly speaking we don't need to
140 * restore these fields because currently none of them are written back to memory
141 * by a physical CPU on #VMEXIT.
142 */
143 PSVMVMCBCTRL pVmcbNstGstCtrl = &pCtx->hwvirt.svm.CTX_SUFF(pVmcb)->ctrl;
144 pVmcbNstGstCtrl->u16InterceptRdCRx = pVmcbNstGstCache->u16InterceptRdCRx;
145 pVmcbNstGstCtrl->u16InterceptWrCRx = pVmcbNstGstCache->u16InterceptWrCRx;
146 pVmcbNstGstCtrl->u16InterceptRdDRx = pVmcbNstGstCache->u16InterceptRdDRx;
147 pVmcbNstGstCtrl->u16InterceptWrDRx = pVmcbNstGstCache->u16InterceptWrDRx;
148 pVmcbNstGstCtrl->u16PauseFilterThreshold = pVmcbNstGstCache->u16PauseFilterThreshold;
149 pVmcbNstGstCtrl->u16PauseFilterCount = pVmcbNstGstCache->u16PauseFilterCount;
150 pVmcbNstGstCtrl->u32InterceptXcpt = pVmcbNstGstCache->u32InterceptXcpt;
151 pVmcbNstGstCtrl->u64InterceptCtrl = pVmcbNstGstCache->u64InterceptCtrl;
152 pVmcbNstGstCtrl->u64TSCOffset = pVmcbNstGstCache->u64TSCOffset;
153 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = pVmcbNstGstCache->fVIntrMasking;
154 pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging = pVmcbNstGstCache->fNestedPaging;
155 pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt = pVmcbNstGstCache->fLbrVirt;
156 pVmcbNstGstCache->fCacheValid = false;
157 }
158
159 /*
160 * Transitions to ring-3 flag a full CPU-state change except if we transition to ring-3
161 * in response to a physical CPU interrupt as no changes to the guest-CPU state are
162 * expected (see VINF_EM_RAW_INTERRUPT handling in hmR0SvmExitToRing3).
163 *
164 * However, with nested-guests, the state -can- change on trips to ring-3 for we might
165 * try to inject a nested-guest physical interrupt and cause a SVM_EXIT_INTR #VMEXIT for
166 * the nested-guest from ring-3. Import the complete state here as we will be swapping
167 * to the guest VMCB after the #VMEXIT.
168 */
169 CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_ALL);
170 AssertMsg(!(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_ALL),
171 ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", pVCpu->cpum.GstCtx.fExtrn, CPUMCTX_EXTRN_ALL));
172 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST);
173}
174# endif
175
176/**
177 * Checks if the Virtual GIF (Global Interrupt Flag) feature is supported and
178 * enabled for the VM.
179 *
180 * @returns @c true if VGIF is enabled, @c false otherwise.
181 * @param pVM The cross context VM structure.
182 *
183 * @remarks This value returned by this functions is expected by the callers not
184 * to change throughout the lifetime of the VM.
185 */
186VMM_INT_DECL(bool) HMIsSvmVGifActive(PVM pVM)
187{
188 bool const fVGif = RT_BOOL(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);
189 bool const fUseVGif = fVGif && pVM->hm.s.svm.fVGif;
190 return fVGif && fUseVGif;
191}
192
193
194/**
195 * Applies the TSC offset of an SVM nested-guest if any and returns the new TSC
196 * value for the nested-guest.
197 *
198 * @returns The TSC offset after applying any nested-guest TSC offset.
199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
200 * @param uTicks The guest TSC.
201 *
202 * @remarks This function looks at the VMCB cache rather than directly at the
203 * nested-guest VMCB. The latter may have been modified for executing
204 * using hardware-assisted SVM.
205 *
206 * @sa CPUMRemoveNestedGuestTscOffset, HMRemoveSvmNstGstTscOffset.
207 */
208VMM_INT_DECL(uint64_t) HMApplySvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
209{
210 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
211 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
212 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
213 Assert(pVmcbNstGstCache->fCacheValid);
214 return uTicks + pVmcbNstGstCache->u64TSCOffset;
215}
216
217
218/**
219 * Removes the TSC offset of an SVM nested-guest if any and returns the new TSC
220 * value for the guest.
221 *
222 * @returns The TSC offset after removing any nested-guest TSC offset.
223 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
224 * @param uTicks The nested-guest TSC.
225 *
226 * @remarks This function looks at the VMCB cache rather than directly at the
227 * nested-guest VMCB. The latter may have been modified for executing
228 * using hardware-assisted SVM.
229 *
230 * @sa CPUMApplyNestedGuestTscOffset, HMApplySvmNstGstTscOffset.
231 */
232VMM_INT_DECL(uint64_t) HMRemoveSvmNstGstTscOffset(PVMCPU pVCpu, uint64_t uTicks)
233{
234 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
235 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx)); RT_NOREF(pCtx);
236 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
237 Assert(pVmcbNstGstCache->fCacheValid);
238 return uTicks - pVmcbNstGstCache->u64TSCOffset;
239}
240
241
242/**
243 * Interface used by IEM to handle patched TPR accesses.
244 *
245 * @returns VBox status code
246 * @retval VINF_SUCCESS if hypercall was handled, RIP + RFLAGS all dealt with.
247 * @retval VERR_NOT_FOUND if hypercall was _not_ handled.
248 * @retval VERR_SVM_UNEXPECTED_PATCH_TYPE on IPE.
249 *
250 * @param pVCpu The cross context virtual CPU structure.
251 */
252VMM_INT_DECL(int) HMHCMaybeMovTprSvmHypercall(PVMCPU pVCpu)
253{
254 PVM pVM = pVCpu->CTX_SUFF(pVM);
255 if (pVM->hm.s.fTprPatchingAllowed)
256 {
257 int rc = hmEmulateSvmMovTpr(pVCpu);
258 if (RT_SUCCESS(rc))
259 return VINF_SUCCESS;
260 return rc;
261 }
262 return VERR_NOT_FOUND;
263}
264
265
266/**
267 * Checks if the current AMD CPU is subject to erratum 170 "In SVM mode,
268 * incorrect code bytes may be fetched after a world-switch".
269 *
270 * @param pu32Family Where to store the CPU family (can be NULL).
271 * @param pu32Model Where to store the CPU model (can be NULL).
272 * @param pu32Stepping Where to store the CPU stepping (can be NULL).
273 * @returns true if the erratum applies, false otherwise.
274 */
275VMM_INT_DECL(int) HMIsSubjectToSvmErratum170(uint32_t *pu32Family, uint32_t *pu32Model, uint32_t *pu32Stepping)
276{
277 /*
278 * Erratum 170 which requires a forced TLB flush for each world switch:
279 * See AMD spec. "Revision Guide for AMD NPT Family 0Fh Processors".
280 *
281 * All BH-G1/2 and DH-G1/2 models include a fix:
282 * Athlon X2: 0x6b 1/2
283 * 0x68 1/2
284 * Athlon 64: 0x7f 1
285 * 0x6f 2
286 * Sempron: 0x7f 1/2
287 * 0x6f 2
288 * 0x6c 2
289 * 0x7c 2
290 * Turion 64: 0x68 2
291 */
292 uint32_t u32Dummy;
293 uint32_t u32Version, u32Family, u32Model, u32Stepping, u32BaseFamily;
294 ASMCpuId(1, &u32Version, &u32Dummy, &u32Dummy, &u32Dummy);
295 u32BaseFamily = (u32Version >> 8) & 0xf;
296 u32Family = u32BaseFamily + (u32BaseFamily == 0xf ? ((u32Version >> 20) & 0x7f) : 0);
297 u32Model = ((u32Version >> 4) & 0xf);
298 u32Model = u32Model | ((u32BaseFamily == 0xf ? (u32Version >> 16) & 0x0f : 0) << 4);
299 u32Stepping = u32Version & 0xf;
300
301 bool fErratumApplies = false;
302 if ( u32Family == 0xf
303 && !((u32Model == 0x68 || u32Model == 0x6b || u32Model == 0x7f) && u32Stepping >= 1)
304 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2))
305 {
306 fErratumApplies = true;
307 }
308
309 if (pu32Family)
310 *pu32Family = u32Family;
311 if (pu32Model)
312 *pu32Model = u32Model;
313 if (pu32Stepping)
314 *pu32Stepping = u32Stepping;
315
316 return fErratumApplies;
317}
318
319#endif /* !IN_RC */
320
321
322/**
323 * Converts an SVM event type to a TRPM event type.
324 *
325 * @returns The TRPM event type.
326 * @retval TRPM_32BIT_HACK if the specified type of event isn't among the set
327 * of recognized trap types.
328 *
329 * @param pEvent Pointer to the SVM event.
330 * @param uVector The vector associated with the event.
331 */
332VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent, uint8_t uVector)
333{
334 uint8_t const uType = pEvent->n.u3Type;
335 switch (uType)
336 {
337 case SVM_EVENT_EXTERNAL_IRQ: return TRPM_HARDWARE_INT;
338 case SVM_EVENT_SOFTWARE_INT: return TRPM_SOFTWARE_INT;
339 case SVM_EVENT_NMI: return TRPM_TRAP;
340 case SVM_EVENT_EXCEPTION:
341 {
342 if ( uVector == X86_XCPT_BP
343 || uVector == X86_XCPT_OF)
344 return TRPM_SOFTWARE_INT;
345 return TRPM_TRAP;
346 }
347 default:
348 break;
349 }
350 AssertMsgFailed(("HMSvmEventToTrpmEvent: Invalid pending-event type %#x\n", uType));
351 return TRPM_32BIT_HACK;
352}
353
354
355/**
356 * Returns whether HM has cached the nested-guest VMCB.
357 *
358 * If the VMCB is cached by HM, it means HM may have potentially modified the
359 * VMCB for execution using hardware-assisted SVM.
360 *
361 * @returns true if HM has cached the nested-guest VMCB, false otherwise.
362 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
363 */
364VMM_INT_DECL(bool) HMHasGuestSvmVmcbCached(PVMCPU pVCpu)
365{
366 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
367 return pVmcbNstGstCache->fCacheValid;
368}
369
370
371/**
372 * Checks if the nested-guest VMCB has the specified ctrl/instruction intercept
373 * active.
374 *
375 * @returns @c true if in intercept is set, @c false otherwise.
376 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
377 * @param fIntercept The SVM control/instruction intercept, see
378 * SVM_CTRL_INTERCEPT_*.
379 */
380VMM_INT_DECL(bool) HMIsGuestSvmCtrlInterceptSet(PVMCPU pVCpu, uint64_t fIntercept)
381{
382 Assert(HMHasGuestSvmVmcbCached(pVCpu));
383 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
384 return RT_BOOL(pVmcbNstGstCache->u64InterceptCtrl & fIntercept);
385}
386
387
388/**
389 * Checks if the nested-guest VMCB has the specified CR read intercept active.
390 *
391 * @returns @c true if in intercept is set, @c false otherwise.
392 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
393 * @param uCr The CR register number (0 to 15).
394 */
395VMM_INT_DECL(bool) HMIsGuestSvmReadCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
396{
397 Assert(uCr < 16);
398 Assert(HMHasGuestSvmVmcbCached(pVCpu));
399 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
400 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdCRx & (1 << uCr));
401}
402
403
404/**
405 * Checks if the nested-guest VMCB has the specified CR write intercept active.
406 *
407 * @returns @c true if in intercept is set, @c false otherwise.
408 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
409 * @param uCr The CR register number (0 to 15).
410 */
411VMM_INT_DECL(bool) HMIsGuestSvmWriteCRxInterceptSet(PVMCPU pVCpu, uint8_t uCr)
412{
413 Assert(uCr < 16);
414 Assert(HMHasGuestSvmVmcbCached(pVCpu));
415 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
416 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrCRx & (1 << uCr));
417}
418
419
420/**
421 * Checks if the nested-guest VMCB has the specified DR read intercept active.
422 *
423 * @returns @c true if in intercept is set, @c false otherwise.
424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
425 * @param uDr The DR register number (0 to 15).
426 */
427VMM_INT_DECL(bool) HMIsGuestSvmReadDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
428{
429 Assert(uDr < 16);
430 Assert(HMHasGuestSvmVmcbCached(pVCpu));
431 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
432 return RT_BOOL(pVmcbNstGstCache->u16InterceptRdDRx & (1 << uDr));
433}
434
435
436/**
437 * Checks if the nested-guest VMCB has the specified DR write intercept active.
438 *
439 * @returns @c true if in intercept is set, @c false otherwise.
440 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
441 * @param uDr The DR register number (0 to 15).
442 */
443VMM_INT_DECL(bool) HMIsGuestSvmWriteDRxInterceptSet(PVMCPU pVCpu, uint8_t uDr)
444{
445 Assert(uDr < 16);
446 Assert(HMHasGuestSvmVmcbCached(pVCpu));
447 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
448 return RT_BOOL(pVmcbNstGstCache->u16InterceptWrDRx & (1 << uDr));
449}
450
451
452/**
453 * Checks if the nested-guest VMCB has the specified exception intercept active.
454 *
455 * @returns true if in intercept is active, false otherwise.
456 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
457 * @param uVector The exception / interrupt vector.
458 */
459VMM_INT_DECL(bool) HMIsGuestSvmXcptInterceptSet(PVMCPU pVCpu, uint8_t uVector)
460{
461 Assert(uVector < 32);
462 Assert(HMHasGuestSvmVmcbCached(pVCpu));
463 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
464 return RT_BOOL(pVmcbNstGstCache->u32InterceptXcpt & (1 << uVector));
465}
466
467
468/**
469 * Checks if the nested-guest VMCB has virtual-interrupts masking enabled.
470 *
471 * @returns true if virtual-interrupts are masked, @c false otherwise.
472 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
473 */
474VMM_INT_DECL(bool) HMIsGuestSvmVirtIntrMasking(PVMCPU pVCpu)
475{
476 Assert(HMHasGuestSvmVmcbCached(pVCpu));
477 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
478 return pVmcbNstGstCache->fVIntrMasking;
479}
480
481
482/**
483 * Checks if the nested-guest VMCB has nested-paging enabled.
484 *
485 * @returns true if nested-paging is enabled, @c false otherwise.
486 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
487 */
488VMM_INT_DECL(bool) HMIsGuestSvmNestedPagingEnabled(PVMCPU pVCpu)
489{
490 Assert(HMHasGuestSvmVmcbCached(pVCpu));
491 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
492 return pVmcbNstGstCache->fNestedPaging;
493}
494
495
496/**
497 * Returns the nested-guest VMCB pause-filter count.
498 *
499 * @returns The pause-filter count.
500 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
501 */
502VMM_INT_DECL(uint16_t) HMGetGuestSvmPauseFilterCount(PVMCPU pVCpu)
503{
504 Assert(HMHasGuestSvmVmcbCached(pVCpu));
505 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache;
506 return pVmcbNstGstCache->u16PauseFilterCount;
507}
508
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette