VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 72524

Last change on this file since 72524 was 72522, checked in by vboxsync, 7 years ago

NEM,TM: Work on TSC and NEM/win. bugref:9044 [=>office]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 192.8 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 72522 2018-06-12 08:45:27Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32
33/*********************************************************************************************************************************
34* Global Variables *
35*********************************************************************************************************************************/
36/** NEM_WIN_PAGE_STATE_XXX names. */
37NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
38
39/** HV_INTERCEPT_ACCESS_TYPE names. */
40static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
41
42
43/*********************************************************************************************************************************
44* Internal Functions *
45*********************************************************************************************************************************/
46NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
47 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
48
49
50#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
51
52/**
53 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
54 *
55 * @returns VBox status code.
56 * @param pVM The cross context VM structure.
57 * @param pVCpu The cross context virtual CPU structure of the caller.
58 * @param GCPhysSrc The source page. Does not need to be page aligned.
59 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
60 * when A20 is disabled.
61 * @param fFlags HV_MAP_GPA_XXX.
62 */
63DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
64{
65#ifdef IN_RING0
66 /** @todo optimize further, caller generally has the physical address. */
67 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
68 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
69 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
70 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
71 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
72 1, fFlags);
73#else
74 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
75 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
76 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
77 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
78 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
79#endif
80}
81
82
83/**
84 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
85 *
86 * @returns VBox status code.
87 * @param pVM The cross context VM structure.
88 * @param pVCpu The cross context virtual CPU structure of the caller.
89 * @param GCPhys The page to unmap. Does not need to be page aligned.
90 */
91DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
92{
93# ifdef IN_RING0
94 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
95 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
96 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
97# else
98 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
99 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
100 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
101# endif
102}
103
104#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
105#ifndef IN_RING0
106
107NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
108{
109# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
110 NOREF(pCtx);
111 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
112 AssertLogRelRCReturn(rc, rc);
113 return rc;
114
115# else
116 /*
117 * The following is very similar to what nemR0WinExportState() does.
118 */
119 WHV_REGISTER_NAME aenmNames[128];
120 WHV_REGISTER_VALUE aValues[128];
121
122 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
123 if ( !fWhat
124 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
125 return VINF_SUCCESS;
126 uintptr_t iReg = 0;
127
128# define ADD_REG64(a_enmName, a_uValue) do { \
129 aenmNames[iReg] = (a_enmName); \
130 aValues[iReg].Reg128.High64 = 0; \
131 aValues[iReg].Reg64 = (a_uValue); \
132 iReg++; \
133 } while (0)
134# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
135 aenmNames[iReg] = (a_enmName); \
136 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
137 aValues[iReg].Reg128.High64 = (a_uValueHi); \
138 iReg++; \
139 } while (0)
140
141 /* GPRs */
142 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
143 {
144 if (fWhat & CPUMCTX_EXTRN_RAX)
145 ADD_REG64(WHvX64RegisterRax, pCtx->rax);
146 if (fWhat & CPUMCTX_EXTRN_RCX)
147 ADD_REG64(WHvX64RegisterRcx, pCtx->rcx);
148 if (fWhat & CPUMCTX_EXTRN_RDX)
149 ADD_REG64(WHvX64RegisterRdx, pCtx->rdx);
150 if (fWhat & CPUMCTX_EXTRN_RBX)
151 ADD_REG64(WHvX64RegisterRbx, pCtx->rbx);
152 if (fWhat & CPUMCTX_EXTRN_RSP)
153 ADD_REG64(WHvX64RegisterRsp, pCtx->rsp);
154 if (fWhat & CPUMCTX_EXTRN_RBP)
155 ADD_REG64(WHvX64RegisterRbp, pCtx->rbp);
156 if (fWhat & CPUMCTX_EXTRN_RSI)
157 ADD_REG64(WHvX64RegisterRsi, pCtx->rsi);
158 if (fWhat & CPUMCTX_EXTRN_RDI)
159 ADD_REG64(WHvX64RegisterRdi, pCtx->rdi);
160 if (fWhat & CPUMCTX_EXTRN_R8_R15)
161 {
162 ADD_REG64(WHvX64RegisterR8, pCtx->r8);
163 ADD_REG64(WHvX64RegisterR9, pCtx->r9);
164 ADD_REG64(WHvX64RegisterR10, pCtx->r10);
165 ADD_REG64(WHvX64RegisterR11, pCtx->r11);
166 ADD_REG64(WHvX64RegisterR12, pCtx->r12);
167 ADD_REG64(WHvX64RegisterR13, pCtx->r13);
168 ADD_REG64(WHvX64RegisterR14, pCtx->r14);
169 ADD_REG64(WHvX64RegisterR15, pCtx->r15);
170 }
171 }
172
173 /* RIP & Flags */
174 if (fWhat & CPUMCTX_EXTRN_RIP)
175 ADD_REG64(WHvX64RegisterRip, pCtx->rip);
176 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
177 ADD_REG64(WHvX64RegisterRflags, pCtx->rflags.u);
178
179 /* Segments */
180# define ADD_SEG(a_enmName, a_SReg) \
181 do { \
182 aenmNames[iReg] = a_enmName; \
183 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
184 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
185 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
186 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
187 iReg++; \
188 } while (0)
189 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
190 {
191 if (fWhat & CPUMCTX_EXTRN_ES)
192 ADD_SEG(WHvX64RegisterEs, pCtx->es);
193 if (fWhat & CPUMCTX_EXTRN_CS)
194 ADD_SEG(WHvX64RegisterCs, pCtx->cs);
195 if (fWhat & CPUMCTX_EXTRN_SS)
196 ADD_SEG(WHvX64RegisterSs, pCtx->ss);
197 if (fWhat & CPUMCTX_EXTRN_DS)
198 ADD_SEG(WHvX64RegisterDs, pCtx->ds);
199 if (fWhat & CPUMCTX_EXTRN_FS)
200 ADD_SEG(WHvX64RegisterFs, pCtx->fs);
201 if (fWhat & CPUMCTX_EXTRN_GS)
202 ADD_SEG(WHvX64RegisterGs, pCtx->gs);
203 }
204
205 /* Descriptor tables & task segment. */
206 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
207 {
208 if (fWhat & CPUMCTX_EXTRN_LDTR)
209 ADD_SEG(WHvX64RegisterLdtr, pCtx->ldtr);
210 if (fWhat & CPUMCTX_EXTRN_TR)
211 ADD_SEG(WHvX64RegisterTr, pCtx->tr);
212 if (fWhat & CPUMCTX_EXTRN_IDTR)
213 {
214 aenmNames[iReg] = WHvX64RegisterIdtr;
215 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
216 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
217 iReg++;
218 }
219 if (fWhat & CPUMCTX_EXTRN_GDTR)
220 {
221 aenmNames[iReg] = WHvX64RegisterGdtr;
222 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
223 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
224 iReg++;
225 }
226 }
227
228 /* Control registers. */
229 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
230 {
231 if (fWhat & CPUMCTX_EXTRN_CR0)
232 ADD_REG64(WHvX64RegisterCr0, pCtx->cr0);
233 if (fWhat & CPUMCTX_EXTRN_CR2)
234 ADD_REG64(WHvX64RegisterCr2, pCtx->cr2);
235 if (fWhat & CPUMCTX_EXTRN_CR3)
236 ADD_REG64(WHvX64RegisterCr3, pCtx->cr3);
237 if (fWhat & CPUMCTX_EXTRN_CR4)
238 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4);
239 }
240 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
241 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
242
243 /* Debug registers. */
244/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
245 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
246 {
247 ADD_REG64(WHvX64RegisterDr0, pCtx->dr[0]); // CPUMGetHyperDR0(pVCpu));
248 ADD_REG64(WHvX64RegisterDr1, pCtx->dr[1]); // CPUMGetHyperDR1(pVCpu));
249 ADD_REG64(WHvX64RegisterDr2, pCtx->dr[2]); // CPUMGetHyperDR2(pVCpu));
250 ADD_REG64(WHvX64RegisterDr3, pCtx->dr[3]); // CPUMGetHyperDR3(pVCpu));
251 }
252 if (fWhat & CPUMCTX_EXTRN_DR6)
253 ADD_REG64(WHvX64RegisterDr6, pCtx->dr[6]); // CPUMGetHyperDR6(pVCpu));
254 if (fWhat & CPUMCTX_EXTRN_DR7)
255 ADD_REG64(WHvX64RegisterDr7, pCtx->dr[7]); // CPUMGetHyperDR7(pVCpu));
256
257 /* Floating point state. */
258 if (fWhat & CPUMCTX_EXTRN_X87)
259 {
260 ADD_REG128(WHvX64RegisterFpMmx0, pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1]);
261 ADD_REG128(WHvX64RegisterFpMmx1, pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1]);
262 ADD_REG128(WHvX64RegisterFpMmx2, pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1]);
263 ADD_REG128(WHvX64RegisterFpMmx3, pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1]);
264 ADD_REG128(WHvX64RegisterFpMmx4, pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1]);
265 ADD_REG128(WHvX64RegisterFpMmx5, pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1]);
266 ADD_REG128(WHvX64RegisterFpMmx6, pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1]);
267 ADD_REG128(WHvX64RegisterFpMmx7, pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1]);
268
269 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
270 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
271 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
272 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
273 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
274 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
275 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
276 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
277 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
278 iReg++;
279
280 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
281 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
282 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
283 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
284 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
285 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
286 iReg++;
287 }
288
289 /* Vector state. */
290 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
291 {
292 ADD_REG128(WHvX64RegisterXmm0, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
293 ADD_REG128(WHvX64RegisterXmm1, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
294 ADD_REG128(WHvX64RegisterXmm2, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
295 ADD_REG128(WHvX64RegisterXmm3, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
296 ADD_REG128(WHvX64RegisterXmm4, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
297 ADD_REG128(WHvX64RegisterXmm5, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
298 ADD_REG128(WHvX64RegisterXmm6, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
299 ADD_REG128(WHvX64RegisterXmm7, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
300 ADD_REG128(WHvX64RegisterXmm8, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
301 ADD_REG128(WHvX64RegisterXmm9, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
302 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi);
303 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi);
304 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi);
305 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi);
306 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi);
307 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi);
308 }
309
310 /* MSRs */
311 // WHvX64RegisterTsc - don't touch
312 if (fWhat & CPUMCTX_EXTRN_EFER)
313 ADD_REG64(WHvX64RegisterEfer, pCtx->msrEFER);
314 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
315 ADD_REG64(WHvX64RegisterKernelGsBase, pCtx->msrKERNELGSBASE);
316 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
317 {
318 ADD_REG64(WHvX64RegisterSysenterCs, pCtx->SysEnter.cs);
319 ADD_REG64(WHvX64RegisterSysenterEip, pCtx->SysEnter.eip);
320 ADD_REG64(WHvX64RegisterSysenterEsp, pCtx->SysEnter.esp);
321 }
322 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
323 {
324 ADD_REG64(WHvX64RegisterStar, pCtx->msrSTAR);
325 ADD_REG64(WHvX64RegisterLstar, pCtx->msrLSTAR);
326 ADD_REG64(WHvX64RegisterCstar, pCtx->msrCSTAR);
327 ADD_REG64(WHvX64RegisterSfmask, pCtx->msrSFMASK);
328 }
329 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
330 {
331 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
332 ADD_REG64(WHvX64RegisterPat, pCtx->msrPAT);
333#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
334 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
335#endif
336 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
337 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
338 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
339 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
340 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
341 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
342 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
343 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
344 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
345 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
346 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
347 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
348 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
349 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
350#if 0 /** @todo these registers aren't available? Might explain something.. .*/
351 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
352 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
353 {
354 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
355 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
356 }
357#endif
358 }
359
360 /* event injection (clear it). */
361 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
362 ADD_REG64(WHvRegisterPendingInterruption, 0);
363
364 /* Interruptibility state. This can get a little complicated since we get
365 half of the state via HV_X64_VP_EXECUTION_STATE. */
366 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
367 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
368 {
369 ADD_REG64(WHvRegisterInterruptState, 0);
370 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
371 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
372 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
373 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
374 aValues[iReg - 1].InterruptState.NmiMasked = 1;
375 }
376 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
377 {
378 if ( pVCpu->nem.s.fLastInterruptShadow
379 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
380 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
381 {
382 ADD_REG64(WHvRegisterInterruptState, 0);
383 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
384 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
385 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
386 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
387 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
388 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
389 }
390 }
391 else
392 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
393
394 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
395 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
396 if ( fDesiredIntWin
397 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
398 {
399 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
400 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
401 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
402 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
403 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
404 }
405
406 /// @todo WHvRegisterPendingEvent0
407 /// @todo WHvRegisterPendingEvent1
408
409 /*
410 * Set the registers.
411 */
412 Assert(iReg < RT_ELEMENTS(aValues));
413 Assert(iReg < RT_ELEMENTS(aenmNames));
414# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
415 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
416 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
417# endif
418 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
419 if (SUCCEEDED(hrc))
420 {
421 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
422 return VINF_SUCCESS;
423 }
424 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
425 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
426 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
427 return VERR_INTERNAL_ERROR;
428
429# undef ADD_REG64
430# undef ADD_REG128
431# undef ADD_SEG
432
433# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
434}
435
436
437NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
438{
439# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
440 /* See NEMR0ImportState */
441 NOREF(pCtx);
442 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
443 if (RT_SUCCESS(rc))
444 return rc;
445 if (rc == VERR_NEM_FLUSH_TLB)
446 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
447 if (rc == VERR_NEM_CHANGE_PGM_MODE)
448 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
449 AssertLogRelRCReturn(rc, rc);
450 return rc;
451
452# else
453 WHV_REGISTER_NAME aenmNames[128];
454
455 fWhat &= pCtx->fExtrn;
456 uintptr_t iReg = 0;
457
458 /* GPRs */
459 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
460 {
461 if (fWhat & CPUMCTX_EXTRN_RAX)
462 aenmNames[iReg++] = WHvX64RegisterRax;
463 if (fWhat & CPUMCTX_EXTRN_RCX)
464 aenmNames[iReg++] = WHvX64RegisterRcx;
465 if (fWhat & CPUMCTX_EXTRN_RDX)
466 aenmNames[iReg++] = WHvX64RegisterRdx;
467 if (fWhat & CPUMCTX_EXTRN_RBX)
468 aenmNames[iReg++] = WHvX64RegisterRbx;
469 if (fWhat & CPUMCTX_EXTRN_RSP)
470 aenmNames[iReg++] = WHvX64RegisterRsp;
471 if (fWhat & CPUMCTX_EXTRN_RBP)
472 aenmNames[iReg++] = WHvX64RegisterRbp;
473 if (fWhat & CPUMCTX_EXTRN_RSI)
474 aenmNames[iReg++] = WHvX64RegisterRsi;
475 if (fWhat & CPUMCTX_EXTRN_RDI)
476 aenmNames[iReg++] = WHvX64RegisterRdi;
477 if (fWhat & CPUMCTX_EXTRN_R8_R15)
478 {
479 aenmNames[iReg++] = WHvX64RegisterR8;
480 aenmNames[iReg++] = WHvX64RegisterR9;
481 aenmNames[iReg++] = WHvX64RegisterR10;
482 aenmNames[iReg++] = WHvX64RegisterR11;
483 aenmNames[iReg++] = WHvX64RegisterR12;
484 aenmNames[iReg++] = WHvX64RegisterR13;
485 aenmNames[iReg++] = WHvX64RegisterR14;
486 aenmNames[iReg++] = WHvX64RegisterR15;
487 }
488 }
489
490 /* RIP & Flags */
491 if (fWhat & CPUMCTX_EXTRN_RIP)
492 aenmNames[iReg++] = WHvX64RegisterRip;
493 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
494 aenmNames[iReg++] = WHvX64RegisterRflags;
495
496 /* Segments */
497 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
498 {
499 if (fWhat & CPUMCTX_EXTRN_ES)
500 aenmNames[iReg++] = WHvX64RegisterEs;
501 if (fWhat & CPUMCTX_EXTRN_CS)
502 aenmNames[iReg++] = WHvX64RegisterCs;
503 if (fWhat & CPUMCTX_EXTRN_SS)
504 aenmNames[iReg++] = WHvX64RegisterSs;
505 if (fWhat & CPUMCTX_EXTRN_DS)
506 aenmNames[iReg++] = WHvX64RegisterDs;
507 if (fWhat & CPUMCTX_EXTRN_FS)
508 aenmNames[iReg++] = WHvX64RegisterFs;
509 if (fWhat & CPUMCTX_EXTRN_GS)
510 aenmNames[iReg++] = WHvX64RegisterGs;
511 }
512
513 /* Descriptor tables. */
514 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
515 {
516 if (fWhat & CPUMCTX_EXTRN_LDTR)
517 aenmNames[iReg++] = WHvX64RegisterLdtr;
518 if (fWhat & CPUMCTX_EXTRN_TR)
519 aenmNames[iReg++] = WHvX64RegisterTr;
520 if (fWhat & CPUMCTX_EXTRN_IDTR)
521 aenmNames[iReg++] = WHvX64RegisterIdtr;
522 if (fWhat & CPUMCTX_EXTRN_GDTR)
523 aenmNames[iReg++] = WHvX64RegisterGdtr;
524 }
525
526 /* Control registers. */
527 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
528 {
529 if (fWhat & CPUMCTX_EXTRN_CR0)
530 aenmNames[iReg++] = WHvX64RegisterCr0;
531 if (fWhat & CPUMCTX_EXTRN_CR2)
532 aenmNames[iReg++] = WHvX64RegisterCr2;
533 if (fWhat & CPUMCTX_EXTRN_CR3)
534 aenmNames[iReg++] = WHvX64RegisterCr3;
535 if (fWhat & CPUMCTX_EXTRN_CR4)
536 aenmNames[iReg++] = WHvX64RegisterCr4;
537 }
538 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
539 aenmNames[iReg++] = WHvX64RegisterCr8;
540
541 /* Debug registers. */
542 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
543 {
544 aenmNames[iReg++] = WHvX64RegisterDr0;
545 aenmNames[iReg++] = WHvX64RegisterDr1;
546 aenmNames[iReg++] = WHvX64RegisterDr2;
547 aenmNames[iReg++] = WHvX64RegisterDr3;
548 }
549 if (fWhat & CPUMCTX_EXTRN_DR6)
550 aenmNames[iReg++] = WHvX64RegisterDr6;
551 if (fWhat & CPUMCTX_EXTRN_DR7)
552 aenmNames[iReg++] = WHvX64RegisterDr7;
553
554 /* Floating point state. */
555 if (fWhat & CPUMCTX_EXTRN_X87)
556 {
557 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
558 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
559 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
560 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
561 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
562 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
563 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
564 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
565 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
566 }
567 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
568 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
569
570 /* Vector state. */
571 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
572 {
573 aenmNames[iReg++] = WHvX64RegisterXmm0;
574 aenmNames[iReg++] = WHvX64RegisterXmm1;
575 aenmNames[iReg++] = WHvX64RegisterXmm2;
576 aenmNames[iReg++] = WHvX64RegisterXmm3;
577 aenmNames[iReg++] = WHvX64RegisterXmm4;
578 aenmNames[iReg++] = WHvX64RegisterXmm5;
579 aenmNames[iReg++] = WHvX64RegisterXmm6;
580 aenmNames[iReg++] = WHvX64RegisterXmm7;
581 aenmNames[iReg++] = WHvX64RegisterXmm8;
582 aenmNames[iReg++] = WHvX64RegisterXmm9;
583 aenmNames[iReg++] = WHvX64RegisterXmm10;
584 aenmNames[iReg++] = WHvX64RegisterXmm11;
585 aenmNames[iReg++] = WHvX64RegisterXmm12;
586 aenmNames[iReg++] = WHvX64RegisterXmm13;
587 aenmNames[iReg++] = WHvX64RegisterXmm14;
588 aenmNames[iReg++] = WHvX64RegisterXmm15;
589 }
590
591 /* MSRs */
592 // WHvX64RegisterTsc - don't touch
593 if (fWhat & CPUMCTX_EXTRN_EFER)
594 aenmNames[iReg++] = WHvX64RegisterEfer;
595 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
596 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
597 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
598 {
599 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
600 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
601 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
602 }
603 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
604 {
605 aenmNames[iReg++] = WHvX64RegisterStar;
606 aenmNames[iReg++] = WHvX64RegisterLstar;
607 aenmNames[iReg++] = WHvX64RegisterCstar;
608 aenmNames[iReg++] = WHvX64RegisterSfmask;
609 }
610
611//#ifdef LOG_ENABLED
612// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
613//#endif
614 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
615 {
616 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
617 aenmNames[iReg++] = WHvX64RegisterPat;
618#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
619 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
620#endif
621 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
622 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
623 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
624 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
625 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
626 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
627 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
628 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
629 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
630 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
631 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
632 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
633 aenmNames[iReg++] = WHvX64RegisterTscAux;
634 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
635//#ifdef LOG_ENABLED
636// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
637// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
638//#endif
639 }
640
641 /* Interruptibility. */
642 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
643 {
644 aenmNames[iReg++] = WHvRegisterInterruptState;
645 aenmNames[iReg++] = WHvX64RegisterRip;
646 }
647
648 /* event injection */
649 aenmNames[iReg++] = WHvRegisterPendingInterruption;
650 aenmNames[iReg++] = WHvRegisterPendingEvent0;
651 aenmNames[iReg++] = WHvRegisterPendingEvent1;
652
653 size_t const cRegs = iReg;
654 Assert(cRegs < RT_ELEMENTS(aenmNames));
655
656 /*
657 * Get the registers.
658 */
659 WHV_REGISTER_VALUE aValues[128];
660 RT_ZERO(aValues);
661 Assert(RT_ELEMENTS(aValues) >= cRegs);
662 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
663# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
664 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
665 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
666# endif
667 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
668 AssertLogRelMsgReturn(SUCCEEDED(hrc),
669 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
670 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
671 , VERR_NEM_GET_REGISTERS_FAILED);
672
673 iReg = 0;
674# define GET_REG64(a_DstVar, a_enmName) do { \
675 Assert(aenmNames[iReg] == (a_enmName)); \
676 (a_DstVar) = aValues[iReg].Reg64; \
677 iReg++; \
678 } while (0)
679# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
680 Assert(aenmNames[iReg] == (a_enmName)); \
681 if ((a_DstVar) != aValues[iReg].Reg64) \
682 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
683 (a_DstVar) = aValues[iReg].Reg64; \
684 iReg++; \
685 } while (0)
686# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
687 Assert(aenmNames[iReg] == a_enmName); \
688 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
689 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
690 iReg++; \
691 } while (0)
692# define GET_SEG(a_SReg, a_enmName) do { \
693 Assert(aenmNames[iReg] == (a_enmName)); \
694 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
695 iReg++; \
696 } while (0)
697
698 /* GPRs */
699 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
700 {
701 if (fWhat & CPUMCTX_EXTRN_RAX)
702 GET_REG64(pCtx->rax, WHvX64RegisterRax);
703 if (fWhat & CPUMCTX_EXTRN_RCX)
704 GET_REG64(pCtx->rcx, WHvX64RegisterRcx);
705 if (fWhat & CPUMCTX_EXTRN_RDX)
706 GET_REG64(pCtx->rdx, WHvX64RegisterRdx);
707 if (fWhat & CPUMCTX_EXTRN_RBX)
708 GET_REG64(pCtx->rbx, WHvX64RegisterRbx);
709 if (fWhat & CPUMCTX_EXTRN_RSP)
710 GET_REG64(pCtx->rsp, WHvX64RegisterRsp);
711 if (fWhat & CPUMCTX_EXTRN_RBP)
712 GET_REG64(pCtx->rbp, WHvX64RegisterRbp);
713 if (fWhat & CPUMCTX_EXTRN_RSI)
714 GET_REG64(pCtx->rsi, WHvX64RegisterRsi);
715 if (fWhat & CPUMCTX_EXTRN_RDI)
716 GET_REG64(pCtx->rdi, WHvX64RegisterRdi);
717 if (fWhat & CPUMCTX_EXTRN_R8_R15)
718 {
719 GET_REG64(pCtx->r8, WHvX64RegisterR8);
720 GET_REG64(pCtx->r9, WHvX64RegisterR9);
721 GET_REG64(pCtx->r10, WHvX64RegisterR10);
722 GET_REG64(pCtx->r11, WHvX64RegisterR11);
723 GET_REG64(pCtx->r12, WHvX64RegisterR12);
724 GET_REG64(pCtx->r13, WHvX64RegisterR13);
725 GET_REG64(pCtx->r14, WHvX64RegisterR14);
726 GET_REG64(pCtx->r15, WHvX64RegisterR15);
727 }
728 }
729
730 /* RIP & Flags */
731 if (fWhat & CPUMCTX_EXTRN_RIP)
732 GET_REG64(pCtx->rip, WHvX64RegisterRip);
733 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
734 GET_REG64(pCtx->rflags.u, WHvX64RegisterRflags);
735
736 /* Segments */
737 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
738 {
739 if (fWhat & CPUMCTX_EXTRN_ES)
740 GET_SEG(pCtx->es, WHvX64RegisterEs);
741 if (fWhat & CPUMCTX_EXTRN_CS)
742 GET_SEG(pCtx->cs, WHvX64RegisterCs);
743 if (fWhat & CPUMCTX_EXTRN_SS)
744 GET_SEG(pCtx->ss, WHvX64RegisterSs);
745 if (fWhat & CPUMCTX_EXTRN_DS)
746 GET_SEG(pCtx->ds, WHvX64RegisterDs);
747 if (fWhat & CPUMCTX_EXTRN_FS)
748 GET_SEG(pCtx->fs, WHvX64RegisterFs);
749 if (fWhat & CPUMCTX_EXTRN_GS)
750 GET_SEG(pCtx->gs, WHvX64RegisterGs);
751 }
752
753 /* Descriptor tables and the task segment. */
754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
755 {
756 if (fWhat & CPUMCTX_EXTRN_LDTR)
757 GET_SEG(pCtx->ldtr, WHvX64RegisterLdtr);
758
759 if (fWhat & CPUMCTX_EXTRN_TR)
760 {
761 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
762 avoid to trigger sanity assertions around the code, always fix this. */
763 GET_SEG(pCtx->tr, WHvX64RegisterTr);
764 switch (pCtx->tr.Attr.n.u4Type)
765 {
766 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
767 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
768 break;
769 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
770 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
771 break;
772 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
773 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
774 break;
775 }
776 }
777 if (fWhat & CPUMCTX_EXTRN_IDTR)
778 {
779 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
780 pCtx->idtr.cbIdt = aValues[iReg].Table.Limit;
781 pCtx->idtr.pIdt = aValues[iReg].Table.Base;
782 iReg++;
783 }
784 if (fWhat & CPUMCTX_EXTRN_GDTR)
785 {
786 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
787 pCtx->gdtr.cbGdt = aValues[iReg].Table.Limit;
788 pCtx->gdtr.pGdt = aValues[iReg].Table.Base;
789 iReg++;
790 }
791 }
792
793 /* Control registers. */
794 bool fMaybeChangedMode = false;
795 bool fFlushTlb = false;
796 bool fFlushGlobalTlb = false;
797 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
798 {
799 if (fWhat & CPUMCTX_EXTRN_CR0)
800 {
801 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
802 if (pCtx->cr0 != aValues[iReg].Reg64)
803 {
804 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
805 fMaybeChangedMode = true;
806 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
807 }
808 iReg++;
809 }
810 if (fWhat & CPUMCTX_EXTRN_CR2)
811 GET_REG64(pCtx->cr2, WHvX64RegisterCr2);
812 if (fWhat & CPUMCTX_EXTRN_CR3)
813 {
814 if (pCtx->cr3 != aValues[iReg].Reg64)
815 {
816 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
817 fFlushTlb = true;
818 }
819 iReg++;
820 }
821 if (fWhat & CPUMCTX_EXTRN_CR4)
822 {
823 if (pCtx->cr4 != aValues[iReg].Reg64)
824 {
825 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
826 fMaybeChangedMode = true;
827 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
828 }
829 iReg++;
830 }
831 }
832 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
833 {
834 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
836 iReg++;
837 }
838
839 /* Debug registers. */
840 /** @todo fixme */
841 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
842 {
843 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
844 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
845 if (pCtx->dr[0] != aValues[iReg].Reg64)
846 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
847 iReg++;
848 if (pCtx->dr[1] != aValues[iReg].Reg64)
849 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
850 iReg++;
851 if (pCtx->dr[2] != aValues[iReg].Reg64)
852 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
853 iReg++;
854 if (pCtx->dr[3] != aValues[iReg].Reg64)
855 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
856 iReg++;
857 }
858 if (fWhat & CPUMCTX_EXTRN_DR6)
859 {
860 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
861 if (pCtx->dr[6] != aValues[iReg].Reg64)
862 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
863 iReg++;
864 }
865 if (fWhat & CPUMCTX_EXTRN_DR7)
866 {
867 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
868 if (pCtx->dr[7] != aValues[iReg].Reg64)
869 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
870 iReg++;
871 }
872
873 /* Floating point state. */
874 if (fWhat & CPUMCTX_EXTRN_X87)
875 {
876 GET_REG128(pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
877 GET_REG128(pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
878 GET_REG128(pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
879 GET_REG128(pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
880 GET_REG128(pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
881 GET_REG128(pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
882 GET_REG128(pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
883 GET_REG128(pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
884
885 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
886 pCtx->pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
887 pCtx->pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
888 pCtx->pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
889 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
890 pCtx->pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
891 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
892 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
893 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
894 iReg++;
895 }
896
897 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
898 {
899 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
900 if (fWhat & CPUMCTX_EXTRN_X87)
901 {
902 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
903 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
904 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
905 }
906 pCtx->pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
907 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
908 iReg++;
909 }
910
911 /* Vector state. */
912 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
913 {
914 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
915 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
916 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
917 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
918 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
919 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
920 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
921 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
922 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
923 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
924 GET_REG128(pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
925 GET_REG128(pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
926 GET_REG128(pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
927 GET_REG128(pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
928 GET_REG128(pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
929 GET_REG128(pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
930 }
931
932 /* MSRs */
933 // WHvX64RegisterTsc - don't touch
934 if (fWhat & CPUMCTX_EXTRN_EFER)
935 {
936 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
937 if (aValues[iReg].Reg64 != pCtx->msrEFER)
938 {
939 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, aValues[iReg].Reg64));
940 if ((aValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
941 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
942 pCtx->msrEFER = aValues[iReg].Reg64;
943 fMaybeChangedMode = true;
944 }
945 iReg++;
946 }
947 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
948 GET_REG64_LOG7(pCtx->msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
949 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
950 {
951 GET_REG64_LOG7(pCtx->SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
952 GET_REG64_LOG7(pCtx->SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
953 GET_REG64_LOG7(pCtx->SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
954 }
955 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
956 {
957 GET_REG64_LOG7(pCtx->msrSTAR, WHvX64RegisterStar, "MSR STAR");
958 GET_REG64_LOG7(pCtx->msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
959 GET_REG64_LOG7(pCtx->msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
960 GET_REG64_LOG7(pCtx->msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
961 }
962 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
963 {
964 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
965 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
966 if (aValues[iReg].Reg64 != uOldBase)
967 {
968 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
969 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
970 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
971 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", VBOXSTRICTRC_VAL(rc2), aValues[iReg].Reg64));
972 }
973 iReg++;
974
975 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterPat, "MSR PAT");
976#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
977 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterMsrMtrrCap);
978#endif
979 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
980 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
981 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
982 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
983 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
984 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
985 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
986 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
987 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
988 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
989 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
990 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
991 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
992 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
993 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
994 }
995
996 /* Interruptibility. */
997 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
998 {
999 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1000 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1001
1002 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1003 {
1004 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1005 if (aValues[iReg].InterruptState.InterruptShadow)
1006 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1007 else
1008 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1009 }
1010
1011 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1012 {
1013 if (aValues[iReg].InterruptState.NmiMasked)
1014 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1015 else
1016 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1017 }
1018
1019 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1020 iReg += 2;
1021 }
1022
1023 /* Event injection. */
1024 /// @todo WHvRegisterPendingInterruption
1025 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1026 if (aValues[iReg].PendingInterruption.InterruptionPending)
1027 {
1028 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1029 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1030 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1031 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1032 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1033 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1034 }
1035
1036 /// @todo WHvRegisterPendingEvent0
1037 /// @todo WHvRegisterPendingEvent1
1038
1039 /* Almost done, just update extrn flags and maybe change PGM mode. */
1040 pCtx->fExtrn &= ~fWhat;
1041 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1042 pCtx->fExtrn = 0;
1043
1044 /* Typical. */
1045 if (!fMaybeChangedMode && !fFlushTlb)
1046 return VINF_SUCCESS;
1047
1048 /*
1049 * Slow.
1050 */
1051 if (fMaybeChangedMode)
1052 {
1053 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1054 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1055 }
1056
1057 if (fFlushTlb)
1058 {
1059 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1060 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1061 }
1062
1063 return VINF_SUCCESS;
1064# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1065}
1066
1067#endif /* !IN_RING0 */
1068
1069
1070/**
1071 * Interface for importing state on demand (used by IEM).
1072 *
1073 * @returns VBox status code.
1074 * @param pVCpu The cross context CPU structure.
1075 * @param pCtx The target CPU context.
1076 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1077 */
1078VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1079{
1080 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1081
1082#ifdef IN_RING0
1083 /** @todo improve and secure this translation */
1084 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1085 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1086 VMCPUID idCpu = pVCpu->idCpu;
1087 ASMCompilerBarrier();
1088 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1089
1090 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], pCtx, fWhat);
1091#else
1092 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1093#endif
1094}
1095
1096
1097/**
1098 * Query the CPU tick counter and optionally the TSC_AUX MSR value.
1099 *
1100 * @returns VBox status code.
1101 * @param pVCpu The cross context CPU structure.
1102 * @param pcTicks Where to return the CPU tick count.
1103 * @param puAux Where to return the TSC_AUX register value.
1104 */
1105VMM_INT_DECL(int) NEMHCQueryCpuTick(PVMCPU pVCpu, uint64_t *pcTicks, uint32_t *puAux)
1106{
1107#ifdef IN_RING3
1108 PVM pVM = pVCpu->CTX_SUFF(pVM);
1109 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_VM_THREAD_NOT_EMT);
1110 AssertReturn(VM_IS_NEM_ENABLED(pVM), VERR_NEM_IPE_9);
1111
1112# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
1113 /* Call ring-0 and get the values. */
1114 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_QUERY_CPU_TICK, 0, NULL);
1115 AssertLogRelRCReturn(rc, rc);
1116 *pcTicks = pVCpu->nem.s.Hypercall.QueryCpuTick.cTicks;
1117 if (puAux)
1118 *puAux = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX
1119 ? pVCpu->nem.s.Hypercall.QueryCpuTick.uAux : CPUMGetGuestTscAux(pVCpu);
1120 return VINF_SUCCESS;
1121
1122# else
1123 /* Call the offical API. */
1124 WHV_REGISTER_NAME aenmNames[2] = { WHvX64RegisterTsc, WHvX64RegisterTscAux };
1125 WHV_REGISTER_VALUE aValues[2] = { {0, 0}, {0, 0} };
1126 Assert(RT_ELEMENTS(aenmNames) == RT_ELEMENTS(aValues));
1127 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, 2, aValues);
1128 AssertLogRelMsgReturn(SUCCEEDED(hrc),
1129 ("WHvGetVirtualProcessorRegisters(%p, %u,{tsc,tsc_aux},2,) -> %Rhrc (Last=%#x/%u)\n",
1130 pVM->nem.s.hPartition, pVCpu->idCpu, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
1131 , VERR_NEM_GET_REGISTERS_FAILED);
1132 *pcTicks = aValues[0].Reg64;
1133 if (puAux)
1134 *pcTicks = pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX ? aValues[0].Reg64 : CPUMGetGuestTscAux(pVCpu);
1135 return VINF_SUCCESS;
1136#endif
1137#else /* IN_RING0 */
1138 /** @todo improve and secure this translation */
1139 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1140 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1141 VMCPUID idCpu = pVCpu->idCpu;
1142 ASMCompilerBarrier();
1143 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1144
1145 int rc = nemR0WinQueryCpuTick(pGVM, &pGVM->aCpus[idCpu], pcTicks, puAux);
1146 if (RT_SUCCESS(rc) && puAux && !(pVCpu->cpum.GstCtx.fExtrn & CPUMCTX_EXTRN_TSC_AUX))
1147 *puAux = CPUMGetGuestTscAux(pVCpu);
1148 return rc;
1149#endif /* IN_RING0 */
1150}
1151
1152
1153#ifdef LOG_ENABLED
1154/**
1155 * Get the virtual processor running status.
1156 */
1157DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1158{
1159# ifdef IN_RING0
1160 NOREF(pVCpu);
1161 return VidProcessorStatusUndefined;
1162# else
1163 RTERRVARS Saved;
1164 RTErrVarsSave(&Saved);
1165
1166 /*
1167 * This API is disabled in release builds, it seems. On build 17101 it requires
1168 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1169 */
1170 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1171 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1172 AssertRC(rcNt);
1173
1174 RTErrVarsRestore(&Saved);
1175 return enmCpuStatus;
1176# endif
1177}
1178#endif
1179
1180
1181#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1182# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1183/**
1184 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1185 *
1186 * This is an experiment only.
1187 *
1188 * @returns VBox status code.
1189 * @param pVM The cross context VM structure.
1190 * @param pVCpu The cross context virtual CPU structure of the
1191 * calling EMT.
1192 */
1193NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1194{
1195 /*
1196 * Work the state.
1197 *
1198 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1199 * So, we just need to modify the state and kick the EMT if it's waiting on
1200 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1201 */
1202 for (;;)
1203 {
1204 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1205 switch (enmState)
1206 {
1207 case VMCPUSTATE_STARTED_EXEC_NEM:
1208 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1209 {
1210 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1211 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1212 return VINF_SUCCESS;
1213 }
1214 break;
1215
1216 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1217 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1218 {
1219# ifdef IN_RING0
1220 NTSTATUS rcNt = KeAlertThread(??);
1221# else
1222 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1223# endif
1224 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1225 Assert(rcNt == STATUS_SUCCESS);
1226 if (NT_SUCCESS(rcNt))
1227 {
1228 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1229 return VINF_SUCCESS;
1230 }
1231 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1232 }
1233 break;
1234
1235 default:
1236 return VINF_SUCCESS;
1237 }
1238
1239 ASMNopPause();
1240 RT_NOREF(pVM);
1241 }
1242}
1243# endif /* IN_RING3 */
1244#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1245
1246
1247#ifdef LOG_ENABLED
1248/**
1249 * Logs the current CPU state.
1250 */
1251NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1252{
1253 if (LogIs3Enabled())
1254 {
1255# ifdef IN_RING3
1256 char szRegs[4096];
1257 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1258 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1259 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1260 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1261 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1262 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1263 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1264 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1265 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1266 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1267 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1268 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1269 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1270 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1271 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1272 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1273 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1274 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1275 " efer=%016VR{efer}\n"
1276 " pat=%016VR{pat}\n"
1277 " sf_mask=%016VR{sf_mask}\n"
1278 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1279 " lstar=%016VR{lstar}\n"
1280 " star=%016VR{star} cstar=%016VR{cstar}\n"
1281 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1282 );
1283
1284 char szInstr[256];
1285 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1286 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1287 szInstr, sizeof(szInstr), NULL);
1288 Log3(("%s%s\n", szRegs, szInstr));
1289# else
1290 /** @todo stat logging in ring-0 */
1291 RT_NOREF(pVM, pVCpu);
1292# endif
1293 }
1294}
1295#endif /* LOG_ENABLED */
1296
1297
1298/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1299#define SWITCH_IT(a_szPrefix) \
1300 do \
1301 switch (u)\
1302 { \
1303 case 0x00: return a_szPrefix ""; \
1304 case 0x01: return a_szPrefix ",Pnd"; \
1305 case 0x02: return a_szPrefix ",Dbg"; \
1306 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1307 case 0x04: return a_szPrefix ",Shw"; \
1308 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1309 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1310 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1311 default: AssertFailedReturn("WTF?"); \
1312 } \
1313 while (0)
1314
1315#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1316/**
1317 * Translates the execution stat bitfield into a short log string, VID version.
1318 *
1319 * @returns Read-only log string.
1320 * @param pMsgHdr The header which state to summarize.
1321 */
1322static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1323{
1324 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1325 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1326 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1327 if (pMsgHdr->ExecutionState.EferLma)
1328 SWITCH_IT("LM");
1329 else if (pMsgHdr->ExecutionState.Cr0Pe)
1330 SWITCH_IT("PM");
1331 else
1332 SWITCH_IT("RM");
1333}
1334#elif defined(IN_RING3)
1335/**
1336 * Translates the execution stat bitfield into a short log string, WinHv version.
1337 *
1338 * @returns Read-only log string.
1339 * @param pExitCtx The exit context which state to summarize.
1340 */
1341static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1342{
1343 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1344 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1345 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1346 if (pExitCtx->ExecutionState.EferLma)
1347 SWITCH_IT("LM");
1348 else if (pExitCtx->ExecutionState.Cr0Pe)
1349 SWITCH_IT("PM");
1350 else
1351 SWITCH_IT("RM");
1352}
1353#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1354#undef SWITCH_IT
1355
1356
1357#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1358/**
1359 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1360 *
1361 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1362 *
1363 * @param pVCpu The cross context virtual CPU structure.
1364 * @param pCtx The CPU context to update.
1365 * @param pExitCtx The exit context.
1366 */
1367DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1368{
1369 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1370
1371 /* Advance the RIP. */
1372 Assert(pMsgHdr->InstructionLength > 0 && pMsgHdr->InstructionLength < 16);
1373 pCtx->rip += pMsgHdr->InstructionLength;
1374 pCtx->rflags.Bits.u1RF = 0;
1375
1376 /* Update interrupt inhibition. */
1377 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1378 { /* likely */ }
1379 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1380 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1381}
1382#elif defined(IN_RING3)
1383/**
1384 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1385 *
1386 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1387 *
1388 * @param pVCpu The cross context virtual CPU structure.
1389 * @param pCtx The CPU context to update.
1390 * @param pExitCtx The exit context.
1391 */
1392DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1393{
1394 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1395
1396 /* Advance the RIP. */
1397 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1398 pCtx->rip += pExitCtx->InstructionLength;
1399 pCtx->rflags.Bits.u1RF = 0;
1400
1401 /* Update interrupt inhibition. */
1402 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1403 { /* likely */ }
1404 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1405 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1406}
1407#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1408
1409
1410
1411NEM_TMPL_STATIC DECLCALLBACK(int)
1412nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1413{
1414 RT_NOREF_PV(pvUser);
1415#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1416 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1417 AssertRC(rc);
1418 if (RT_SUCCESS(rc))
1419#else
1420 RT_NOREF_PV(pVCpu);
1421 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1422 if (SUCCEEDED(hrc))
1423#endif
1424 {
1425 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1426 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1427 }
1428 else
1429 {
1430#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1431 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1432#else
1433 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1434 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1435 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1436#endif
1437 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1438 }
1439 if (pVM->nem.s.cMappedPages > 0)
1440 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1441 return VINF_SUCCESS;
1442}
1443
1444
1445/**
1446 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1447 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1448 */
1449typedef struct NEMHCWINHMACPCCSTATE
1450{
1451 /** Input: Write access. */
1452 bool fWriteAccess;
1453 /** Output: Set if we did something. */
1454 bool fDidSomething;
1455 /** Output: Set it we should resume. */
1456 bool fCanResume;
1457} NEMHCWINHMACPCCSTATE;
1458
1459/**
1460 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1461 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1462 * NEMHCWINHMACPCCSTATE structure. }
1463 */
1464NEM_TMPL_STATIC DECLCALLBACK(int)
1465nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1466{
1467 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1468 pState->fDidSomething = false;
1469 pState->fCanResume = false;
1470
1471 /* If A20 is disabled, we may need to make another query on the masked
1472 page to get the correct protection information. */
1473 uint8_t u2State = pInfo->u2NemState;
1474 RTGCPHYS GCPhysSrc;
1475 if ( pVM->nem.s.fA20Enabled
1476 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1477 GCPhysSrc = GCPhys;
1478 else
1479 {
1480 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1481 PGMPHYSNEMPAGEINFO Info2;
1482 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1483 AssertRCReturn(rc, rc);
1484
1485 *pInfo = Info2;
1486 pInfo->u2NemState = u2State;
1487 }
1488
1489 /*
1490 * Consolidate current page state with actual page protection and access type.
1491 * We don't really consider downgrades here, as they shouldn't happen.
1492 */
1493#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1494 /** @todo Someone at microsoft please explain:
1495 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1496 * readonly page as writable (unmap, then map again). Specifically, this was an
1497 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1498 * a hope to work around that we no longer pre-map anything, just unmap stuff
1499 * and do it lazily here. And here we will first unmap, restart, and then remap
1500 * with new protection or backing.
1501 */
1502#endif
1503 int rc;
1504 switch (u2State)
1505 {
1506 case NEM_WIN_PAGE_STATE_UNMAPPED:
1507 case NEM_WIN_PAGE_STATE_NOT_SET:
1508 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1509 {
1510 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1511 return VINF_SUCCESS;
1512 }
1513
1514 /* Don't bother remapping it if it's a write request to a non-writable page. */
1515 if ( pState->fWriteAccess
1516 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1517 {
1518 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1519 return VINF_SUCCESS;
1520 }
1521
1522 /* Map the page. */
1523 rc = nemHCNativeSetPhysPage(pVM,
1524 pVCpu,
1525 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1526 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1527 pInfo->fNemProt,
1528 &u2State,
1529 true /*fBackingState*/);
1530 pInfo->u2NemState = u2State;
1531 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1532 GCPhys, g_apszPageStates[u2State], rc));
1533 pState->fDidSomething = true;
1534 pState->fCanResume = true;
1535 return rc;
1536
1537 case NEM_WIN_PAGE_STATE_READABLE:
1538 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1539 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1540 {
1541 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1542 return VINF_SUCCESS;
1543 }
1544
1545#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1546 /* Upgrade page to writable. */
1547/** @todo test this*/
1548 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1549 && pState->fWriteAccess)
1550 {
1551 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1552 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1553 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1554 AssertRC(rc);
1555 if (RT_SUCCESS(rc))
1556 {
1557 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1558 pState->fDidSomething = true;
1559 pState->fCanResume = true;
1560 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1561 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1562 }
1563 }
1564 else
1565 {
1566 /* Need to emulate the acces. */
1567 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1568 rc = VINF_SUCCESS;
1569 }
1570 return rc;
1571#else
1572 break;
1573#endif
1574
1575 case NEM_WIN_PAGE_STATE_WRITABLE:
1576 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1577 {
1578 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1579 return VINF_SUCCESS;
1580 }
1581#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1582 AssertFailed(); /* There should be no downgrades. */
1583#endif
1584 break;
1585
1586 default:
1587 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1588 }
1589
1590 /*
1591 * Unmap and restart the instruction.
1592 * If this fails, which it does every so often, just unmap everything for now.
1593 */
1594#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1595 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1596 AssertRC(rc);
1597 if (RT_SUCCESS(rc))
1598#else
1599 /** @todo figure out whether we mess up the state or if it's WHv. */
1600 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1601 if (SUCCEEDED(hrc))
1602#endif
1603 {
1604 pState->fDidSomething = true;
1605 pState->fCanResume = true;
1606 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1607 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1608 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1609 return VINF_SUCCESS;
1610 }
1611#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1612 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1613 return rc;
1614#else
1615 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1616 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1617 pVM->nem.s.cMappedPages));
1618
1619 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1620 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1621
1622 pState->fDidSomething = true;
1623 pState->fCanResume = true;
1624 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1625 return VINF_SUCCESS;
1626#endif
1627}
1628
1629
1630
1631#if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1632/**
1633 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and
1634 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses.
1635 *
1636 * @returns VBox strict status code.
1637 * @param pGVM The global (ring-0) VM structure.
1638 * @param pGVCpu The global (ring-0) per CPU structure.
1639 * @param pCtx The CPU context to import into.
1640 * @param fWhat What to import.
1641 * @param pszCaller Who is doing the importing.
1642 */
1643DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller)
1644{
1645 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat);
1646 if (RT_SUCCESS(rc))
1647 {
1648 Assert(rc == VINF_SUCCESS);
1649 return VINF_SUCCESS;
1650 }
1651
1652 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB || rc == VERR_NEM_UPDATE_APIC_BASE)
1653 {
1654 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1655 return -rc;
1656 }
1657 RT_NOREF(pszCaller);
1658 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1659}
1660#endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/
1661
1662#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
1663/**
1664 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1665 *
1666 * Unlike the wrapped APIs, this checks whether it's necessary.
1667 *
1668 * @returns VBox strict status code.
1669 * @param pGVM The global (ring-0) VM structure.
1670 * @param pGVCpu The global (ring-0) per CPU structure.
1671 * @param pCtx The CPU context to import into.
1672 * @param fWhat What to import.
1673 * @param pszCaller Who is doing the importing.
1674 */
1675DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx,
1676 uint64_t fWhat, const char *pszCaller)
1677{
1678 if (pCtx->fExtrn & fWhat)
1679 {
1680#ifdef IN_RING0
1681 RT_NOREF(pVCpu);
1682 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller);
1683#else
1684 RT_NOREF(pGVCpu, pszCaller);
1685 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1686 AssertRCReturn(rc, rc);
1687#endif
1688 }
1689 return VINF_SUCCESS;
1690}
1691#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */
1692
1693#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1694/**
1695 * Copies register state from the X64 intercept message header.
1696 *
1697 * ASSUMES no state copied yet.
1698 *
1699 * @param pVCpu The cross context per CPU structure.
1700 * @param pCtx The registe rcontext.
1701 * @param pHdr The X64 intercept message header.
1702 * @sa nemR3WinCopyStateFromX64Header
1703 */
1704DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1705{
1706 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1707 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1708 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment);
1709 pCtx->rip = pHdr->Rip;
1710 pCtx->rflags.u = pHdr->Rflags;
1711
1712 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1713 if (!pHdr->ExecutionState.InterruptShadow)
1714 {
1715 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1716 { /* likely */ }
1717 else
1718 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1719 }
1720 else
1721 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1722
1723 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1724}
1725#elif defined(IN_RING3)
1726/**
1727 * Copies register state from the (common) exit context.
1728 *
1729 * ASSUMES no state copied yet.
1730 *
1731 * @param pVCpu The cross context per CPU structure.
1732 * @param pCtx The registe rcontext.
1733 * @param pExitCtx The common exit context.
1734 * @sa nemHCWinCopyStateFromX64Header
1735 */
1736DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1737{
1738 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1739 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1740 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs);
1741 pCtx->rip = pExitCtx->Rip;
1742 pCtx->rflags.u = pExitCtx->Rflags;
1743
1744 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1745 if (!pExitCtx->ExecutionState.InterruptShadow)
1746 {
1747 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1748 { /* likely */ }
1749 else
1750 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1751 }
1752 else
1753 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1754
1755 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1756}
1757#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1758
1759
1760#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1761/**
1762 * Deals with memory intercept message.
1763 *
1764 * @returns Strict VBox status code.
1765 * @param pVM The cross context VM structure.
1766 * @param pVCpu The cross context per CPU structure.
1767 * @param pMsg The message.
1768 * @param pCtx The register context.
1769 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1770 * @sa nemR3WinHandleExitMemory
1771 */
1772NEM_TMPL_STATIC VBOXSTRICTRC
1773nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1774{
1775 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1776 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1777 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1778 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1779
1780 /*
1781 * Whatever we do, we must clear pending event injection upon resume.
1782 */
1783 if (pMsg->Header.ExecutionState.InterruptionPending)
1784 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1785
1786#if 0 /* Experiment: 20K -> 34K exit/s. */
1787 if ( pMsg->Header.ExecutionState.EferLma
1788 && pMsg->Header.CsSegment.Long
1789 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1790 {
1791 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1792 && pMsg->InstructionBytes[0] == 0x89
1793 && pMsg->InstructionBytes[1] == 0x03)
1794 {
1795 pCtx->rip = pMsg->Header.Rip + 2;
1796 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
1797 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1798 //Log(("%RX64 msg:\n%.80Rhxd\n", pCtx->rip, pMsg));
1799 return VINF_SUCCESS;
1800 }
1801 }
1802#endif
1803
1804 /*
1805 * Ask PGM for information about the given GCPhys. We need to check if we're
1806 * out of sync first.
1807 */
1808 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1809 PGMPHYSNEMPAGEINFO Info;
1810 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1811 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1812 if (RT_SUCCESS(rc))
1813 {
1814 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1815 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1816 {
1817 if (State.fCanResume)
1818 {
1819 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1820 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1821 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1822 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1823 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1824 return VINF_SUCCESS;
1825 }
1826 }
1827 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1828 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1829 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1830 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1831 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1832 }
1833 else
1834 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1835 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1836 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
1837 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1838
1839 /*
1840 * Emulate the memory access, either access handler or special memory.
1841 */
1842 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1843 VBOXSTRICTRC rcStrict;
1844# ifdef IN_RING0
1845 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx,
1846 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
1847 if (rcStrict != VINF_SUCCESS)
1848 return rcStrict;
1849# else
1850 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1851 AssertRCReturn(rc, rc);
1852 NOREF(pGVCpu);
1853# endif
1854
1855 if (pMsg->Reserved1)
1856 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
1857 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
1858 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
1859 //if (pMsg->InstructionByteCount > 0)
1860 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1861
1862 if (pMsg->InstructionByteCount > 0)
1863 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip,
1864 pMsg->InstructionBytes, pMsg->InstructionByteCount);
1865 else
1866 rcStrict = IEMExecOne(pVCpu);
1867 /** @todo do we need to do anything wrt debugging here? */
1868 return rcStrict;
1869}
1870#elif defined(IN_RING3)
1871/**
1872 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1873 *
1874 * @returns Strict VBox status code.
1875 * @param pVM The cross context VM structure.
1876 * @param pVCpu The cross context per CPU structure.
1877 * @param pExit The VM exit information to handle.
1878 * @param pCtx The register context.
1879 * @sa nemHCWinHandleMessageMemory
1880 */
1881NEM_TMPL_STATIC VBOXSTRICTRC
1882nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
1883{
1884 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
1885 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
1886
1887 /*
1888 * Whatever we do, we must clear pending event injection upon resume.
1889 */
1890 if (pExit->VpContext.ExecutionState.InterruptionPending)
1891 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1892
1893 /*
1894 * Ask PGM for information about the given GCPhys. We need to check if we're
1895 * out of sync first.
1896 */
1897 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
1898 PGMPHYSNEMPAGEINFO Info;
1899 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1900 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1901 if (RT_SUCCESS(rc))
1902 {
1903 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
1904 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1905 {
1906 if (State.fCanResume)
1907 {
1908 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1909 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1910 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1911 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1912 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1913 return VINF_SUCCESS;
1914 }
1915 }
1916 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1917 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1918 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1919 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1920 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1921 }
1922 else
1923 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1924 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1925 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
1926 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1927
1928 /*
1929 * Emulate the memory access, either access handler or special memory.
1930 */
1931 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
1932 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1933 AssertRCReturn(rc, rc);
1934
1935 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
1936 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
1937 //if (pMsg->InstructionByteCount > 0)
1938 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1939
1940 VBOXSTRICTRC rcStrict;
1941 if (pExit->MemoryAccess.InstructionByteCount > 0)
1942 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
1943 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
1944 else
1945 rcStrict = IEMExecOne(pVCpu);
1946 /** @todo do we need to do anything wrt debugging here? */
1947 return rcStrict;
1948}
1949#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1950
1951
1952#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1953/**
1954 * Deals with I/O port intercept message.
1955 *
1956 * @returns Strict VBox status code.
1957 * @param pVM The cross context VM structure.
1958 * @param pVCpu The cross context per CPU structure.
1959 * @param pMsg The message.
1960 * @param pCtx The register context.
1961 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1962 */
1963NEM_TMPL_STATIC VBOXSTRICTRC
1964nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1965{
1966 Assert( pMsg->AccessInfo.AccessSize == 1
1967 || pMsg->AccessInfo.AccessSize == 2
1968 || pMsg->AccessInfo.AccessSize == 4);
1969 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1970 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
1971 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1972
1973 /*
1974 * Whatever we do, we must clear pending event injection upon resume.
1975 */
1976 if (pMsg->Header.ExecutionState.InterruptionPending)
1977 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1978
1979 VBOXSTRICTRC rcStrict;
1980 if (!pMsg->AccessInfo.StringOp)
1981 {
1982 /*
1983 * Simple port I/O.
1984 */
1985 static uint32_t const s_fAndMask[8] =
1986 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
1987 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
1988
1989 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1990 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1991 {
1992 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
1993 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
1994 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1995 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
1996 if (IOM_SUCCESS(rcStrict))
1997 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
1998# ifdef IN_RING0
1999 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
2000 && !pCtx->rflags.Bits.u1TF
2001 /** @todo check for debug breakpoints */ )
2002 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2003 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
2004# endif
2005 else
2006 {
2007 pCtx->rax = pMsg->Rax;
2008 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2009 }
2010 }
2011 else
2012 {
2013 uint32_t uValue = 0;
2014 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
2015 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2016 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2017 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2018 if (IOM_SUCCESS(rcStrict))
2019 {
2020 if (pMsg->AccessInfo.AccessSize != 4)
2021 pCtx->rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2022 else
2023 pCtx->rax = uValue;
2024 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2025 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax));
2026 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2027 }
2028 else
2029 {
2030 pCtx->rax = pMsg->Rax;
2031 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2032# ifdef IN_RING0
2033 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
2034 && !pCtx->rflags.Bits.u1TF
2035 /** @todo check for debug breakpoints */ )
2036 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
2037 pMsg->AccessInfo.AccessSize);
2038# endif
2039 }
2040 }
2041 }
2042 else
2043 {
2044 /*
2045 * String port I/O.
2046 */
2047 /** @todo Someone at Microsoft please explain how we can get the address mode
2048 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2049 * getting the default mode, it can always be overridden by a prefix. This
2050 * forces us to interpret the instruction from opcodes, which is suboptimal.
2051 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2052 * CPUs that are reasonably new.
2053 *
2054 * Of course, it's possible this is an undocumented and we just need to do some
2055 * experiments to figure out how it's communicated. Alternatively, we can scan
2056 * the opcode bytes for possible evil prefixes.
2057 */
2058 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2059 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2060 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2061 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2062 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2063 pCtx->rax = pMsg->Rax;
2064 pCtx->rcx = pMsg->Rcx;
2065 pCtx->rdi = pMsg->Rdi;
2066 pCtx->rsi = pMsg->Rsi;
2067# ifdef IN_RING0
2068 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2069 if (rcStrict != VINF_SUCCESS)
2070 return rcStrict;
2071# else
2072 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2073 AssertRCReturn(rc, rc);
2074 RT_NOREF(pGVCpu);
2075# endif
2076
2077 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2078 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2079 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2080 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2081 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2082 rcStrict = IEMExecOne(pVCpu);
2083 }
2084 if (IOM_SUCCESS(rcStrict))
2085 {
2086 /*
2087 * Do debug checks.
2088 */
2089 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2090 || (pMsg->Header.Rflags & X86_EFL_TF)
2091 || DBGFBpIsHwIoArmed(pVM) )
2092 {
2093 /** @todo Debugging. */
2094 }
2095 }
2096 return rcStrict;
2097}
2098#elif defined(IN_RING3)
2099/**
2100 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2101 *
2102 * @returns Strict VBox status code.
2103 * @param pVM The cross context VM structure.
2104 * @param pVCpu The cross context per CPU structure.
2105 * @param pExit The VM exit information to handle.
2106 * @param pCtx The register context.
2107 * @sa nemHCWinHandleMessageIoPort
2108 */
2109NEM_TMPL_STATIC VBOXSTRICTRC
2110nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2111{
2112 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2113 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2114 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2115 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2116
2117 /*
2118 * Whatever we do, we must clear pending event injection upon resume.
2119 */
2120 if (pExit->VpContext.ExecutionState.InterruptionPending)
2121 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2122
2123 VBOXSTRICTRC rcStrict;
2124 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2125 {
2126 /*
2127 * Simple port I/O.
2128 */
2129 static uint32_t const s_fAndMask[8] =
2130 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2131 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2132 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2133 {
2134 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2135 pExit->IoPortAccess.AccessInfo.AccessSize);
2136 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2137 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2138 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2139 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2140 if (IOM_SUCCESS(rcStrict))
2141 {
2142 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2143 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2144 }
2145 }
2146 else
2147 {
2148 uint32_t uValue = 0;
2149 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue, pExit->IoPortAccess.AccessInfo.AccessSize);
2150 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2151 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2152 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2153 if (IOM_SUCCESS(rcStrict))
2154 {
2155 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2156 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2157 else
2158 pCtx->rax = uValue;
2159 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2160 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax));
2161 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2162 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2163 }
2164 }
2165 }
2166 else
2167 {
2168 /*
2169 * String port I/O.
2170 */
2171 /** @todo Someone at Microsoft please explain how we can get the address mode
2172 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2173 * getting the default mode, it can always be overridden by a prefix. This
2174 * forces us to interpret the instruction from opcodes, which is suboptimal.
2175 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2176 * CPUs that are reasonably new.
2177 *
2178 * Of course, it's possible this is an undocumented and we just need to do some
2179 * experiments to figure out how it's communicated. Alternatively, we can scan
2180 * the opcode bytes for possible evil prefixes.
2181 */
2182 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2183 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2184 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2185 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2186 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2187 pCtx->rax = pExit->IoPortAccess.Rax;
2188 pCtx->rcx = pExit->IoPortAccess.Rcx;
2189 pCtx->rdi = pExit->IoPortAccess.Rdi;
2190 pCtx->rsi = pExit->IoPortAccess.Rsi;
2191# ifdef IN_RING0
2192 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2193 if (rcStrict != VINF_SUCCESS)
2194 return rcStrict;
2195# else
2196 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2197 AssertRCReturn(rc, rc);
2198# endif
2199
2200 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2201 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2202 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2203 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2204 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2205 rcStrict = IEMExecOne(pVCpu);
2206 }
2207 if (IOM_SUCCESS(rcStrict))
2208 {
2209 /*
2210 * Do debug checks.
2211 */
2212 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2213 || (pExit->VpContext.Rflags & X86_EFL_TF)
2214 || DBGFBpIsHwIoArmed(pVM) )
2215 {
2216 /** @todo Debugging. */
2217 }
2218 }
2219 return rcStrict;
2220
2221}
2222#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2223
2224
2225#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2226/**
2227 * Deals with interrupt window message.
2228 *
2229 * @returns Strict VBox status code.
2230 * @param pVM The cross context VM structure.
2231 * @param pVCpu The cross context per CPU structure.
2232 * @param pMsg The message.
2233 * @param pCtx The register context.
2234 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2235 * @sa nemR3WinHandleExitInterruptWindow
2236 */
2237NEM_TMPL_STATIC VBOXSTRICTRC
2238nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg,
2239 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2240{
2241 /*
2242 * Assert message sanity.
2243 */
2244 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2245 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2246 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2247 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2248 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2249
2250 /*
2251 * Just copy the state we've got and handle it in the loop for now.
2252 */
2253 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2254 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2255 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2256 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2257
2258 /** @todo call nemHCWinHandleInterruptFF */
2259 RT_NOREF(pVM, pGVCpu);
2260 return VINF_SUCCESS;
2261}
2262#elif defined(IN_RING3)
2263/**
2264 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2265 *
2266 * @returns Strict VBox status code.
2267 * @param pVM The cross context VM structure.
2268 * @param pVCpu The cross context per CPU structure.
2269 * @param pExit The VM exit information to handle.
2270 * @param pCtx The register context.
2271 * @sa nemHCWinHandleMessageInterruptWindow
2272 */
2273NEM_TMPL_STATIC VBOXSTRICTRC
2274nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2275{
2276 /*
2277 * Assert message sanity.
2278 */
2279 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2280 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2281 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2282 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2283
2284 /*
2285 * Just copy the state we've got and handle it in the loop for now.
2286 */
2287 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2288 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2289 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2290 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2291 pExit->VpContext.ExecutionState.InterruptShadow));
2292
2293 /** @todo call nemHCWinHandleInterruptFF */
2294 RT_NOREF(pVM);
2295 return VINF_SUCCESS;
2296}
2297#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2298
2299#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2300/**
2301 * Deals with CPUID intercept message.
2302 *
2303 * @returns Strict VBox status code.
2304 * @param pVCpu The cross context per CPU structure.
2305 * @param pMsg The message.
2306 * @param pCtx The register context.
2307 */
2308NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx)
2309{
2310 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2311
2312 /*
2313 * Soak up state and execute the instruction.
2314 *
2315 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2316 * function and make everyone use it.
2317 */
2318 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2319 * only get weirder with nested VT-x and AMD-V support. */
2320 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2321
2322 /* Copy in the low register values (top is always cleared). */
2323 pCtx->rax = (uint32_t)pMsg->Rax;
2324 pCtx->rcx = (uint32_t)pMsg->Rcx;
2325 pCtx->rdx = (uint32_t)pMsg->Rdx;
2326 pCtx->rbx = (uint32_t)pMsg->Rbx;
2327 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2328
2329 /* Get the correct values. */
2330 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2331
2332 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2333 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2334 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2335 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2336 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2337
2338 /* Move RIP and we're done. */
2339 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2340
2341 return VINF_SUCCESS;
2342}
2343#elif defined(IN_RING3)
2344/**
2345 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2346 *
2347 * @returns Strict VBox status code.
2348 * @param pVM The cross context VM structure.
2349 * @param pVCpu The cross context per CPU structure.
2350 * @param pExit The VM exit information to handle.
2351 * @param pCtx The register context.
2352 * @sa nemHCWinHandleMessageInterruptWindow
2353 */
2354NEM_TMPL_STATIC VBOXSTRICTRC
2355nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2356{
2357 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2358
2359 /*
2360 * Soak up state and execute the instruction.
2361 *
2362 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2363 * function and make everyone use it.
2364 */
2365 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2366 * only get weirder with nested VT-x and AMD-V support. */
2367 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2368
2369 /* Copy in the low register values (top is always cleared). */
2370 pCtx->rax = (uint32_t)pExit->CpuidAccess.Rax;
2371 pCtx->rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2372 pCtx->rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2373 pCtx->rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2374 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2375
2376 /* Get the correct values. */
2377 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2378
2379 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2380 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2381 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2382 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2383 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2384
2385 /* Move RIP and we're done. */
2386 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2387
2388 RT_NOREF_PV(pVM);
2389 return VINF_SUCCESS;
2390}
2391#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2392
2393#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2394/**
2395 * Deals with MSR intercept message.
2396 *
2397 * @returns Strict VBox status code.
2398 * @param pVCpu The cross context per CPU structure.
2399 * @param pMsg The message.
2400 * @param pCtx The register context.
2401 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2402 * @sa nemR3WinHandleExitMsr
2403 */
2404NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg,
2405 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2406{
2407 /*
2408 * A wee bit of sanity first.
2409 */
2410 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2411 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2412 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2413
2414 /*
2415 * Check CPL as that's common to both RDMSR and WRMSR.
2416 */
2417 VBOXSTRICTRC rcStrict;
2418 if (pMsg->Header.ExecutionState.Cpl == 0)
2419 {
2420 /*
2421 * Get all the MSR state. Since we're getting EFER, we also need to
2422 * get CR0, CR4 and CR3.
2423 */
2424 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2425 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2426 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2427 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2428 "MSRs");
2429 if (rcStrict == VINF_SUCCESS)
2430 {
2431
2432 /*
2433 * Handle writes.
2434 */
2435 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2436 {
2437 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2438 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2439 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2440 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2441 if (rcStrict == VINF_SUCCESS)
2442 {
2443 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2444 return VINF_SUCCESS;
2445 }
2446# ifndef IN_RING3
2447 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2448 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2449 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2450 return rcStrict;
2451# else
2452 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2453 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2454 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2455# endif
2456 }
2457 /*
2458 * Handle reads.
2459 */
2460 else
2461 {
2462 uint64_t uValue = 0;
2463 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2464 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2465 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2466 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2467 if (rcStrict == VINF_SUCCESS)
2468 {
2469 pCtx->rax = (uint32_t)uValue;
2470 pCtx->rdx = uValue >> 32;
2471 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2472 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2473 return VINF_SUCCESS;
2474 }
2475# ifndef IN_RING3
2476 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2477 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2478 rcStrict = VINF_CPUM_R3_MSR_READ;
2479 return rcStrict;
2480# else
2481 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2482 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2483 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2484# endif
2485 }
2486 }
2487 else
2488 {
2489 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2490 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2491 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2492 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2493 return rcStrict;
2494 }
2495 }
2496 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2497 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2498 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2499 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2500 else
2501 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2502 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2503 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2504
2505 /*
2506 * If we get down here, we're supposed to #GP(0).
2507 */
2508 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2509 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2510 if (rcStrict == VINF_SUCCESS)
2511 {
2512 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2513 if (rcStrict == VINF_IEM_RAISED_XCPT)
2514 rcStrict = VINF_SUCCESS;
2515 else if (rcStrict != VINF_SUCCESS)
2516 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2517 }
2518 return rcStrict;
2519}
2520#elif defined(IN_RING3)
2521/**
2522 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2523 *
2524 * @returns Strict VBox status code.
2525 * @param pVM The cross context VM structure.
2526 * @param pVCpu The cross context per CPU structure.
2527 * @param pExit The VM exit information to handle.
2528 * @param pCtx The register context.
2529 * @sa nemHCWinHandleMessageMsr
2530 */
2531NEM_TMPL_STATIC VBOXSTRICTRC
2532nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2533{
2534 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2535
2536 /*
2537 * Check CPL as that's common to both RDMSR and WRMSR.
2538 */
2539 VBOXSTRICTRC rcStrict;
2540 if (pExit->VpContext.ExecutionState.Cpl == 0)
2541 {
2542 /*
2543 * Get all the MSR state. Since we're getting EFER, we also need to
2544 * get CR0, CR4 and CR3.
2545 */
2546 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2547 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2548 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2549 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2550 "MSRs");
2551 if (rcStrict == VINF_SUCCESS)
2552 {
2553 /*
2554 * Handle writes.
2555 */
2556 if (pExit->MsrAccess.AccessInfo.IsWrite)
2557 {
2558 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2559 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2560 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2561 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2562 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2563 if (rcStrict == VINF_SUCCESS)
2564 {
2565 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2566 return VINF_SUCCESS;
2567 }
2568 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
2569 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2570 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
2571 VBOXSTRICTRC_VAL(rcStrict) ));
2572 }
2573 /*
2574 * Handle reads.
2575 */
2576 else
2577 {
2578 uint64_t uValue = 0;
2579 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
2580 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
2581 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2582 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2583 if (rcStrict == VINF_SUCCESS)
2584 {
2585 pCtx->rax = (uint32_t)uValue;
2586 pCtx->rdx = uValue >> 32;
2587 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2588 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2589 return VINF_SUCCESS;
2590 }
2591 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2592 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2593 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2594 }
2595 }
2596 else
2597 {
2598 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2599 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2600 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2601 return rcStrict;
2602 }
2603 }
2604 else if (pExit->MsrAccess.AccessInfo.IsWrite)
2605 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2606 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2607 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
2608 else
2609 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2610 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2611 pExit->MsrAccess.MsrNumber));
2612
2613 /*
2614 * If we get down here, we're supposed to #GP(0).
2615 */
2616 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2617 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2618 if (rcStrict == VINF_SUCCESS)
2619 {
2620 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2621 if (rcStrict == VINF_IEM_RAISED_XCPT)
2622 rcStrict = VINF_SUCCESS;
2623 else if (rcStrict != VINF_SUCCESS)
2624 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2625 }
2626
2627 RT_NOREF_PV(pVM);
2628 return rcStrict;
2629}
2630#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2631
2632
2633/**
2634 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
2635 * checks if the given opcodes are of interest at all.
2636 *
2637 * @returns true if interesting, false if not.
2638 * @param cbOpcodes Number of opcode bytes available.
2639 * @param pbOpcodes The opcode bytes.
2640 * @param f64BitMode Whether we're in 64-bit mode.
2641 */
2642DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
2643{
2644 /*
2645 * Currently only interested in VMCALL and VMMCALL.
2646 */
2647 while (cbOpcodes >= 3)
2648 {
2649 switch (pbOpcodes[0])
2650 {
2651 case 0x0f:
2652 switch (pbOpcodes[1])
2653 {
2654 case 0x01:
2655 switch (pbOpcodes[2])
2656 {
2657 case 0xc1: /* 0f 01 c1 VMCALL */
2658 return true;
2659 case 0xd9: /* 0f 01 d9 VMMCALL */
2660 return true;
2661 default:
2662 break;
2663 }
2664 break;
2665 }
2666 break;
2667
2668 default:
2669 return false;
2670
2671 /* prefixes */
2672 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
2673 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
2674 if (!f64BitMode)
2675 return false;
2676 RT_FALL_THRU();
2677 case X86_OP_PRF_CS:
2678 case X86_OP_PRF_SS:
2679 case X86_OP_PRF_DS:
2680 case X86_OP_PRF_ES:
2681 case X86_OP_PRF_FS:
2682 case X86_OP_PRF_GS:
2683 case X86_OP_PRF_SIZE_OP:
2684 case X86_OP_PRF_SIZE_ADDR:
2685 case X86_OP_PRF_LOCK:
2686 case X86_OP_PRF_REPZ:
2687 case X86_OP_PRF_REPNZ:
2688 cbOpcodes--;
2689 pbOpcodes++;
2690 continue;
2691 }
2692 break;
2693 }
2694 return false;
2695}
2696
2697
2698#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2699/**
2700 * Copies state included in a exception intercept message.
2701 *
2702 * @param pVCpu The cross context per CPU structure.
2703 * @param pMsg The message.
2704 * @param pCtx The register context.
2705 * @param fClearXcpt Clear pending exception.
2706 */
2707DECLINLINE(void) nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg,
2708 PCPUMCTX pCtx, bool fClearXcpt)
2709{
2710 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2711 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
2712 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
2713 pCtx->rax = pMsg->Rax;
2714 pCtx->rcx = pMsg->Rcx;
2715 pCtx->rdx = pMsg->Rdx;
2716 pCtx->rbx = pMsg->Rbx;
2717 pCtx->rsp = pMsg->Rsp;
2718 pCtx->rbp = pMsg->Rbp;
2719 pCtx->rsi = pMsg->Rsi;
2720 pCtx->rdi = pMsg->Rdi;
2721 pCtx->r8 = pMsg->R8;
2722 pCtx->r9 = pMsg->R9;
2723 pCtx->r10 = pMsg->R10;
2724 pCtx->r11 = pMsg->R11;
2725 pCtx->r12 = pMsg->R12;
2726 pCtx->r13 = pMsg->R13;
2727 pCtx->r14 = pMsg->R14;
2728 pCtx->r15 = pMsg->R15;
2729 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2730 NEM_WIN_COPY_BACK_SEG(pCtx->ss, pMsg->SsSegment);
2731}
2732#elif defined(IN_RING3)
2733/**
2734 * Copies state included in a exception intercept exit.
2735 *
2736 * @param pVCpu The cross context per CPU structure.
2737 * @param pExit The VM exit information.
2738 * @param pCtx The register context.
2739 * @param fClearXcpt Clear pending exception.
2740 */
2741DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit,
2742 PCPUMCTX pCtx, bool fClearXcpt)
2743{
2744 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2745 if (fClearXcpt)
2746 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2747}
2748#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2749
2750
2751#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2752/**
2753 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
2754 *
2755 * @returns Strict VBox status code.
2756 * @param pVCpu The cross context per CPU structure.
2757 * @param pMsg The message.
2758 * @param pCtx The register context.
2759 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2760 * @sa nemR3WinHandleExitMsr
2761 */
2762NEM_TMPL_STATIC VBOXSTRICTRC
2763nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
2764{
2765 /*
2766 * Assert sanity.
2767 */
2768 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2769 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2770 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2771 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
2772
2773 /*
2774 * Get most of the register state since we'll end up making IEM inject the
2775 * event. The exception isn't normally flaged as a pending event, so duh.
2776 *
2777 * Note! We can optimize this later with event injection.
2778 */
2779 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
2780 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2781 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
2782 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/);
2783 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2784 if (pMsg->ExceptionVector == X86_XCPT_DB)
2785 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
2786 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, fWhat, "Xcpt");
2787 if (rcStrict != VINF_SUCCESS)
2788 return rcStrict;
2789
2790 /*
2791 * Handle the intercept.
2792 */
2793 TRPMEVENT enmEvtType = TRPM_TRAP;
2794 switch (pMsg->ExceptionVector)
2795 {
2796 /*
2797 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2798 * and need to turn them over to GIM.
2799 *
2800 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
2801 * #UD for handling non-native hypercall instructions. (IEM will
2802 * decode both and let the GIM provider decide whether to accept it.)
2803 */
2804 case X86_XCPT_UD:
2805 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
2806 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
2807 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
2808 {
2809 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, pMsg->InstructionBytes,
2810 pMsg->InstructionByteCount);
2811 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
2812 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
2813 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
2814 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
2815 return rcStrict;
2816 }
2817 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2818 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
2819 break;
2820
2821 /*
2822 * Filter debug exceptions.
2823 */
2824 case X86_XCPT_DB:
2825 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
2826 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
2827 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
2828 break;
2829
2830 case X86_XCPT_BP:
2831 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
2832 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2833 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
2834 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
2835 break;
2836
2837 /* This shouldn't happen. */
2838 default:
2839 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
2840 }
2841
2842 /*
2843 * Inject it.
2844 */
2845 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
2846 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
2847 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
2848 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
2849 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
2850 return rcStrict;
2851}
2852#elif defined(IN_RING3)
2853/**
2854 * Deals with MSR access exits (WHvRunVpExitReasonException).
2855 *
2856 * @returns Strict VBox status code.
2857 * @param pVM The cross context VM structure.
2858 * @param pVCpu The cross context per CPU structure.
2859 * @param pExit The VM exit information to handle.
2860 * @param pCtx The register context.
2861 * @sa nemR3WinHandleExitException
2862 */
2863NEM_TMPL_STATIC VBOXSTRICTRC
2864nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2865{
2866 /*
2867 * Assert sanity.
2868 */
2869 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2870
2871 /*
2872 * Get most of the register state since we'll end up making IEM inject the
2873 * event. The exception isn't normally flaged as a pending event, so duh.
2874 *
2875 * Note! We can optimize this later with event injection.
2876 */
2877 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2878 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
2879 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
2880 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
2881 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2882 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
2883 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
2884 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt");
2885 if (rcStrict != VINF_SUCCESS)
2886 return rcStrict;
2887
2888 /*
2889 * Handle the intercept.
2890 */
2891 TRPMEVENT enmEvtType = TRPM_TRAP;
2892 switch (pExit->VpException.ExceptionType)
2893 {
2894 /*
2895 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2896 * and need to turn them over to GIM.
2897 *
2898 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
2899 * #UD for handling non-native hypercall instructions. (IEM will
2900 * decode both and let the GIM provider decide whether to accept it.)
2901 */
2902 case X86_XCPT_UD:
2903 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
2904 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
2905 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
2906 {
2907 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
2908 pExit->VpException.InstructionBytes,
2909 pExit->VpException.InstructionByteCount);
2910 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
2911 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2912 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
2913 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
2914 return rcStrict;
2915 }
2916
2917 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
2918 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2919 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
2920 break;
2921
2922 /*
2923 * Filter debug exceptions.
2924 */
2925 case X86_XCPT_DB:
2926 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
2927 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
2928 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
2929 break;
2930
2931 case X86_XCPT_BP:
2932 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
2933 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2934 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
2935 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
2936 break;
2937
2938 /* This shouldn't happen. */
2939 default:
2940 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
2941 }
2942
2943 /*
2944 * Inject it.
2945 */
2946 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
2947 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
2948 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
2949 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2950 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
2951
2952 RT_NOREF_PV(pVM);
2953 return rcStrict;
2954}
2955#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2956
2957
2958#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2959/**
2960 * Deals with unrecoverable exception (triple fault).
2961 *
2962 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
2963 * here too. So we'll leave it to IEM to decide.
2964 *
2965 * @returns Strict VBox status code.
2966 * @param pVCpu The cross context per CPU structure.
2967 * @param pMsgHdr The message header.
2968 * @param pCtx The register context.
2969 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2970 * @sa nemR3WinHandleExitUnrecoverableException
2971 */
2972NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
2973 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
2974 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2975{
2976 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength));
2977
2978# if 0
2979 /*
2980 * Just copy the state we've got and handle it in the loop for now.
2981 */
2982 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2983 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
2984 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
2985 return VINF_EM_TRIPLE_FAULT;
2986# else
2987 /*
2988 * Let IEM decide whether this is really it.
2989 */
2990 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2991 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2992 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
2993 if (rcStrict == VINF_SUCCESS)
2994 {
2995 rcStrict = IEMExecOne(pVCpu);
2996 if (rcStrict == VINF_SUCCESS)
2997 {
2998 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2999 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
3000 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3001 return VINF_SUCCESS;
3002 }
3003 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3004 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3005 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3006 else
3007 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3008 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3009 }
3010 else
3011 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
3012 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3013 return rcStrict;
3014# endif
3015}
3016#elif defined(IN_RING3)
3017/**
3018 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
3019 *
3020 * @returns Strict VBox status code.
3021 * @param pVM The cross context VM structure.
3022 * @param pVCpu The cross context per CPU structure.
3023 * @param pExit The VM exit information to handle.
3024 * @param pCtx The register context.
3025 * @sa nemHCWinHandleMessageUnrecoverableException
3026 */
3027NEM_TMPL_STATIC VBOXSTRICTRC
3028nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3029{
3030 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
3031
3032# if 0
3033 /*
3034 * Just copy the state we've got and handle it in the loop for now.
3035 */
3036 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3037 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3038 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3039 RT_NOREF_PV(pVM);
3040 return VINF_EM_TRIPLE_FAULT;
3041# else
3042 /*
3043 * Let IEM decide whether this is really it.
3044 */
3045 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
3046 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
3047 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
3048 if (rcStrict == VINF_SUCCESS)
3049 {
3050 rcStrict = IEMExecOne(pVCpu);
3051 if (rcStrict == VINF_SUCCESS)
3052 {
3053 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3054 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
3055 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3056 return VINF_SUCCESS;
3057 }
3058 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3059 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3060 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3061 else
3062 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3063 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3064 }
3065 else
3066 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3067 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3068 RT_NOREF_PV(pVM);
3069 return rcStrict;
3070# endif
3071
3072}
3073#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3074
3075
3076#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3077/**
3078 * Handles messages (VM exits).
3079 *
3080 * @returns Strict VBox status code.
3081 * @param pVM The cross context VM structure.
3082 * @param pVCpu The cross context per CPU structure.
3083 * @param pMappingHeader The message slot mapping.
3084 * @param pCtx The register context.
3085 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3086 * @sa nemR3WinHandleExit
3087 */
3088NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3089 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3090{
3091 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3092 {
3093 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3094 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3095 switch (pMsg->Header.MessageType)
3096 {
3097 case HvMessageTypeUnmappedGpa:
3098 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3099 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3100 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3101
3102 case HvMessageTypeGpaIntercept:
3103 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3104 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3105 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3106
3107 case HvMessageTypeX64IoPortIntercept:
3108 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3109 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3110 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu);
3111
3112 case HvMessageTypeX64Halt:
3113 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3114 Log4(("HaltExit\n"));
3115 return VINF_EM_HALT;
3116
3117 case HvMessageTypeX64InterruptWindow:
3118 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3119 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3120 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu);
3121
3122 case HvMessageTypeX64CpuidIntercept:
3123 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3124 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3125 return nemHCWinHandleMessageCpuId(pVCpu, &pMsg->X64CpuIdIntercept, pCtx);
3126
3127 case HvMessageTypeX64MsrIntercept:
3128 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3129 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3130 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pCtx, pGVCpu);
3131
3132 case HvMessageTypeX64ExceptionIntercept:
3133 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3134 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3135 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pCtx, pGVCpu);
3136
3137 case HvMessageTypeUnrecoverableException:
3138 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3139 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3140 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
3141
3142 case HvMessageTypeInvalidVpRegisterValue:
3143 case HvMessageTypeUnsupportedFeature:
3144 case HvMessageTypeTlbPageSizeMismatch:
3145 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3146 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3147 VERR_NEM_IPE_3);
3148
3149 case HvMessageTypeX64ApicEoi:
3150 case HvMessageTypeX64LegacyFpError:
3151 case HvMessageTypeX64RegisterIntercept:
3152 case HvMessageTypeApicEoi:
3153 case HvMessageTypeFerrAsserted:
3154 case HvMessageTypeEventLogBufferComplete:
3155 case HvMessageTimerExpired:
3156 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3157 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3158 VERR_NEM_IPE_3);
3159
3160 default:
3161 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3162 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3163 VERR_NEM_IPE_3);
3164 }
3165 }
3166 else
3167 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3168 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3169 VERR_NEM_IPE_4);
3170}
3171#elif defined(IN_RING3)
3172/**
3173 * Handles VM exits.
3174 *
3175 * @returns Strict VBox status code.
3176 * @param pVM The cross context VM structure.
3177 * @param pVCpu The cross context per CPU structure.
3178 * @param pExit The VM exit information to handle.
3179 * @param pCtx The register context.
3180 * @sa nemHCWinHandleMessage
3181 */
3182NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3183{
3184 switch (pExit->ExitReason)
3185 {
3186 case WHvRunVpExitReasonMemoryAccess:
3187 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3188 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx);
3189
3190 case WHvRunVpExitReasonX64IoPortAccess:
3191 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3192 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx);
3193
3194 case WHvRunVpExitReasonX64Halt:
3195 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3196 Log4(("HaltExit\n"));
3197 return VINF_EM_HALT;
3198
3199 case WHvRunVpExitReasonCanceled:
3200 return VINF_SUCCESS;
3201
3202 case WHvRunVpExitReasonX64InterruptWindow:
3203 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3204 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx);
3205
3206 case WHvRunVpExitReasonX64Cpuid:
3207 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3208 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit, pCtx);
3209
3210 case WHvRunVpExitReasonX64MsrAccess:
3211 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3212 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx);
3213
3214 case WHvRunVpExitReasonException:
3215 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3216 return nemR3WinHandleExitException(pVM, pVCpu, pExit, pCtx);
3217
3218 case WHvRunVpExitReasonUnrecoverableException:
3219 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3220 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx);
3221
3222 case WHvRunVpExitReasonUnsupportedFeature:
3223 case WHvRunVpExitReasonInvalidVpRegisterValue:
3224 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3225 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3226 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3227
3228 /* Undesired exits: */
3229 case WHvRunVpExitReasonNone:
3230 default:
3231 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3232 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3233 }
3234}
3235#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3236
3237
3238#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3239/**
3240 * Worker for nemHCWinRunGC that stops the execution on the way out.
3241 *
3242 * The CPU was running the last time we checked, no there are no messages that
3243 * needs being marked handled/whatever. Caller checks this.
3244 *
3245 * @returns rcStrict on success, error status on failure.
3246 * @param pVM The cross context VM structure.
3247 * @param pVCpu The cross context per CPU structure.
3248 * @param rcStrict The nemHCWinRunGC return status. This is a little
3249 * bit unnecessary, except in internal error cases,
3250 * since we won't need to stop the CPU if we took an
3251 * exit.
3252 * @param pMappingHeader The message slot mapping.
3253 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3254 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3255 */
3256NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3257 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3258 PGVM pGVM, PGVMCPU pGVCpu)
3259{
3260 /*
3261 * Try stopping the processor. If we're lucky we manage to do this before it
3262 * does another VM exit.
3263 */
3264# ifdef IN_RING0
3265 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3266 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3267 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3268 NULL, 0);
3269 if (NT_SUCCESS(rcNt))
3270 {
3271 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3272 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3273 return rcStrict;
3274 }
3275# else
3276 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3277 if (fRet)
3278 {
3279 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3280 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3281 return rcStrict;
3282 }
3283 RT_NOREF(pGVM, pGVCpu);
3284# endif
3285
3286 /*
3287 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3288 */
3289# ifdef IN_RING0
3290 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3291 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3292# else
3293 DWORD dwErr = RTNtLastErrorValue();
3294 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3295 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3296# endif
3297 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3298 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3299
3300 /*
3301 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3302 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3303 */
3304# ifdef IN_RING0
3305 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3306 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3307 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3308 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3309 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3310 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3311 NULL, 0);
3312 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3313 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3314# else
3315 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3316 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3317 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3318 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3319# endif
3320
3321 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3322 if (enmVidMsgType != VidMessageStopRequestComplete)
3323 {
3324 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu);
3325 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3326 rcStrict = rcStrict2;
3327
3328 /*
3329 * Mark it as handled and get the stop request completed message, then mark
3330 * that as handled too. CPU is back into fully stopped stated then.
3331 */
3332# ifdef IN_RING0
3333 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3334 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE;
3335 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3336 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3337 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3338 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3339 NULL, 0);
3340 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3341 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3342# else
3343 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3344 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3345 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3346 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3347# endif
3348
3349 /* It should be a stop request completed message. */
3350 enmVidMsgType = pMappingHeader->enmVidMsgType;
3351 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3352 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3353 enmVidMsgType, pMappingHeader->cbMessage),
3354 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3355
3356 /*
3357 * Mark the VidMessageStopRequestComplete message as handled.
3358 */
3359# ifdef IN_RING0
3360 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3361 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE;
3362 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3363 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3364 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3365 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3366 NULL, 0);
3367 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3368 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3369# else
3370 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3371 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3372 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3373# endif
3374 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3375 }
3376 else
3377 {
3378 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3379 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3380 VBOXSTRICTRC_VAL(rcStrict) ));
3381 }
3382 return rcStrict;
3383}
3384#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3385
3386#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
3387
3388/**
3389 * Deals with pending interrupt related force flags, may inject interrupt.
3390 *
3391 * @returns VBox strict status code.
3392 * @param pVM The cross context VM structure.
3393 * @param pVCpu The cross context per CPU structure.
3394 * @param pGVCpu The global (ring-0) per CPU structure.
3395 * @param pCtx The register context.
3396 * @param pfInterruptWindows Where to return interrupt window flags.
3397 */
3398NEM_TMPL_STATIC VBOXSTRICTRC
3399nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows)
3400{
3401 Assert(!TRPMHasTrap(pVCpu));
3402 RT_NOREF_PV(pVM);
3403
3404 /*
3405 * First update APIC. We ASSUME this won't need TPR/CR8.
3406 */
3407 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3408 {
3409 APICUpdatePendingInterrupts(pVCpu);
3410 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3411 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3412 return VINF_SUCCESS;
3413 }
3414
3415 /*
3416 * We don't currently implement SMIs.
3417 */
3418 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3419
3420 /*
3421 * Check if we've got the minimum of state required for deciding whether we
3422 * can inject interrupts and NMIs. If we don't have it, get all we might require
3423 * for injection via IEM.
3424 */
3425 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3426 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3427 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3428 if (pCtx->fExtrn & fNeedExtrn)
3429 {
3430 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3431 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3432 if (rcStrict != VINF_SUCCESS)
3433 return rcStrict;
3434 }
3435 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3436 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip;
3437
3438 /*
3439 * NMI? Try deliver it first.
3440 */
3441 if (fPendingNmi)
3442 {
3443 if ( !fInhibitInterrupts
3444 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3445 {
3446 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3447 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3448 if (rcStrict == VINF_SUCCESS)
3449 {
3450 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3451 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
3452 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3453 }
3454 return rcStrict;
3455 }
3456 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
3457 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
3458 }
3459
3460 /*
3461 * APIC or PIC interrupt?
3462 */
3463 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3464 {
3465 if ( !fInhibitInterrupts
3466 && pCtx->rflags.Bits.u1IF)
3467 {
3468 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
3469 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3470 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3471 if (rcStrict == VINF_SUCCESS)
3472 {
3473 uint8_t bInterrupt;
3474 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
3475 if (RT_SUCCESS(rc))
3476 {
3477 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
3478 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3479 }
3480 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3481 {
3482 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
3483 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
3484 }
3485 else
3486 Log8(("PDMGetInterrupt failed -> %d\n", rc));
3487 }
3488 return rcStrict;
3489 }
3490 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
3491 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
3492 }
3493
3494 return VINF_SUCCESS;
3495}
3496
3497
3498/**
3499 * Inner NEM runloop for windows.
3500 *
3501 * @returns Strict VBox status code.
3502 * @param pVM The cross context VM structure.
3503 * @param pVCpu The cross context per CPU structure.
3504 * @param pGVM The ring-0 VM structure (NULL in ring-3).
3505 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
3506 */
3507NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
3508{
3509 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3510 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));
3511# ifdef LOG_ENABLED
3512 if (LogIs3Enabled())
3513 nemHCWinLogState(pVM, pVCpu);
3514# endif
3515# ifdef IN_RING0
3516 Assert(pVCpu->idCpu == pGVCpu->idCpu);
3517# endif
3518
3519 /*
3520 * Try switch to NEM runloop state.
3521 */
3522 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
3523 { /* likely */ }
3524 else
3525 {
3526 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3527 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
3528 return VINF_SUCCESS;
3529 }
3530
3531 /*
3532 * The run loop.
3533 *
3534 * Current approach to state updating to use the sledgehammer and sync
3535 * everything every time. This will be optimized later.
3536 */
3537# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3538 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
3539 uint32_t cMillies = 5000; /** @todo lower this later... */
3540# endif
3541 const bool fSingleStepping = DBGFIsStepping(pVCpu);
3542// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
3543// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
3544// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
3545 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3546 for (unsigned iLoop = 0;; iLoop++)
3547 {
3548# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3549 /*
3550 * Hack alert!
3551 */
3552 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
3553 if (cMappedPages >= 4000)
3554 {
3555 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
3556 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
3557 }
3558# endif
3559
3560 /*
3561 * Pending interrupts or such? Need to check and deal with this prior
3562 * to the state syncing.
3563 */
3564 pVCpu->nem.s.fDesiredInterruptWindows = 0;
3565 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
3566 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3567 {
3568# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3569 /* Make sure the CPU isn't executing. */
3570 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3571 {
3572 pVCpu->nem.s.fHandleAndGetFlags = 0;
3573 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3574 if (rcStrict == VINF_SUCCESS)
3575 { /* likely */ }
3576 else
3577 {
3578 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3579 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3580 break;
3581 }
3582 }
3583# endif
3584
3585 /* Try inject interrupt. */
3586 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows);
3587 if (rcStrict == VINF_SUCCESS)
3588 { /* likely */ }
3589 else
3590 {
3591 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3592 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3593 break;
3594 }
3595 }
3596
3597 /*
3598 * Ensure that hyper-V has the whole state.
3599 * (We always update the interrupt windows settings when active as hyper-V seems
3600 * to forget about it after an exit.)
3601 */
3602 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
3603 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
3604 || pVCpu->nem.s.fDesiredInterruptWindows
3605 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
3606 {
3607# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3608 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */);
3609# endif
3610# ifdef IN_RING0
3611 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx);
3612# else
3613 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
3614 RT_NOREF(pGVM, pGVCpu);
3615# endif
3616 AssertRCReturn(rc2, rc2);
3617 }
3618
3619 /*
3620 * Run a bit.
3621 */
3622 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3623 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3624 {
3625# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3626 if (pVCpu->nem.s.fHandleAndGetFlags)
3627 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
3628 else
3629 {
3630# ifdef IN_RING0
3631 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3632 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
3633 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3634 NULL, 0);
3635 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
3636 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
3637 VERR_NEM_IPE_5);
3638# else
3639 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
3640 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
3641 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
3642 VERR_NEM_IPE_5);
3643# endif
3644 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3645 }
3646# endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3647
3648 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
3649 {
3650# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3651# ifdef IN_RING0
3652 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3653 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
3654 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3655 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3656 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3657 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3658 NULL, 0);
3659 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3660 if (rcNt == STATUS_SUCCESS)
3661# else
3662 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3663 pVCpu->nem.s.fHandleAndGetFlags, cMillies);
3664 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3665 if (fRet)
3666# endif
3667# else
3668 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
3669 RT_ZERO(ExitReason);
3670 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
3671 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3672 if (SUCCEEDED(hrc))
3673# endif
3674 {
3675 /*
3676 * Deal with the message.
3677 */
3678# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3679 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu);
3680 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
3681# else
3682 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx);
3683# endif
3684 if (rcStrict == VINF_SUCCESS)
3685 { /* hopefully likely */ }
3686 else
3687 {
3688 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3689 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3690 break;
3691 }
3692 }
3693 else
3694 {
3695# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3696
3697 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
3698 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
3699 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
3700# ifndef IN_RING0
3701 DWORD rcNt = GetLastError();
3702# endif
3703 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
3704 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
3705 || rcNt == STATUS_ALERTED /* just in case */
3706 || rcNt == STATUS_USER_APC /* ditto */
3707 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
3708 pVCpu->idCpu, rcNt, rcNt),
3709 VERR_NEM_IPE_0);
3710 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3711 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
3712# else
3713 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
3714 pVCpu->idCpu, hrc, GetLastError()),
3715 VERR_NEM_IPE_0);
3716
3717# endif
3718 }
3719
3720 /*
3721 * If no relevant FFs are pending, loop.
3722 */
3723 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3724 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3725 continue;
3726
3727 /** @todo Try handle pending flags, not just return to EM loops. Take care
3728 * not to set important RCs here unless we've handled a message. */
3729 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
3730 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3731 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
3732 }
3733 else
3734 {
3735 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
3736 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
3737 }
3738 }
3739 else
3740 {
3741 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
3742 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
3743 }
3744 break;
3745 } /* the run loop */
3746
3747
3748 /*
3749 * If the CPU is running, make sure to stop it before we try sync back the
3750 * state and return to EM. We don't sync back the whole state if we can help it.
3751 */
3752# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3753 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3754 {
3755 pVCpu->nem.s.fHandleAndGetFlags = 0;
3756 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3757 }
3758# endif
3759
3760 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
3761 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3762
3763 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
3764 {
3765 /* Try anticipate what we might need. */
3766 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
3767 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
3768 || RT_FAILURE(rcStrict))
3769 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3770# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
3771 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
3772 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
3773 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
3774 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
3775 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
3776# endif
3777 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
3778 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3779 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
3780
3781 if (pCtx->fExtrn & fImport)
3782 {
3783# ifdef IN_RING0
3784 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3785 if (RT_SUCCESS(rc2))
3786 pCtx->fExtrn &= ~fImport;
3787 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
3788 {
3789 pCtx->fExtrn &= ~fImport;
3790 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
3791 rcStrict = -rc2;
3792 else
3793 {
3794 pVCpu->nem.s.rcPending = -rc2;
3795 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
3796 }
3797 }
3798# else
3799 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3800 if (RT_SUCCESS(rc2))
3801 pCtx->fExtrn &= ~fImport;
3802# endif
3803 else if (RT_SUCCESS(rcStrict))
3804 rcStrict = rc2;
3805 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
3806 pCtx->fExtrn = 0;
3807 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
3808 }
3809 else
3810 {
3811 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3812 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3813 }
3814 }
3815 else
3816 {
3817 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3818 pCtx->fExtrn = 0;
3819 }
3820
3821 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
3822 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3823 return rcStrict;
3824}
3825
3826#endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */
3827
3828/**
3829 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
3830 */
3831NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
3832 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
3833{
3834 /* We'll just unmap the memory. */
3835 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
3836 {
3837#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3838 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
3839 AssertRC(rc);
3840 if (RT_SUCCESS(rc))
3841#else
3842 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
3843 if (SUCCEEDED(hrc))
3844#endif
3845 {
3846 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3847 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
3848 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
3849 }
3850 else
3851 {
3852#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3853 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
3854 return rc;
3855#else
3856 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3857 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3858 return VERR_NEM_IPE_2;
3859#endif
3860 }
3861 }
3862 RT_NOREF(pVCpu, pvUser);
3863 return VINF_SUCCESS;
3864}
3865
3866
3867/**
3868 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
3869 *
3870 * @returns The PGMPhysNemQueryPageInfo result.
3871 * @param pVM The cross context VM structure.
3872 * @param pVCpu The cross context virtual CPU structure.
3873 * @param GCPhys The page to unmap.
3874 */
3875NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
3876{
3877 PGMPHYSNEMPAGEINFO Info;
3878 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
3879 nemHCWinUnsetForA20CheckerCallback, NULL);
3880}
3881
3882
3883void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3884{
3885 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3886 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3887}
3888
3889
3890void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3891 int fRestoreAsRAM, bool fRestoreAsRAM2)
3892{
3893 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
3894 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
3895 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
3896}
3897
3898
3899void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3900 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3901{
3902 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3903 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3904 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3905}
3906
3907
3908/**
3909 * Worker that maps pages into Hyper-V.
3910 *
3911 * This is used by the PGM physical page notifications as well as the memory
3912 * access VMEXIT handlers.
3913 *
3914 * @returns VBox status code.
3915 * @param pVM The cross context VM structure.
3916 * @param pVCpu The cross context virtual CPU structure of the
3917 * calling EMT.
3918 * @param GCPhysSrc The source page address.
3919 * @param GCPhysDst The hyper-V destination page. This may differ from
3920 * GCPhysSrc when A20 is disabled.
3921 * @param fPageProt NEM_PAGE_PROT_XXX.
3922 * @param pu2State Our page state (input/output).
3923 * @param fBackingChanged Set if the page backing is being changed.
3924 * @thread EMT(pVCpu)
3925 */
3926NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3927 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3928{
3929#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3930 /*
3931 * When using the hypercalls instead of the ring-3 APIs, we don't need to
3932 * unmap memory before modifying it. We still want to track the state though,
3933 * since unmap will fail when called an unmapped page and we don't want to redo
3934 * upgrades/downgrades.
3935 */
3936 uint8_t const u2OldState = *pu2State;
3937 int rc;
3938 if (fPageProt == NEM_PAGE_PROT_NONE)
3939 {
3940 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3941 {
3942 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3943 if (RT_SUCCESS(rc))
3944 {
3945 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3946 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3947 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3948 }
3949 else
3950 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3951 }
3952 else
3953 rc = VINF_SUCCESS;
3954 }
3955 else if (fPageProt & NEM_PAGE_PROT_WRITE)
3956 {
3957 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
3958 {
3959 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3960 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
3961 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3962 if (RT_SUCCESS(rc))
3963 {
3964 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3965 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3966 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3967 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3968 NOREF(cMappedPages);
3969 }
3970 else
3971 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3972 }
3973 else
3974 rc = VINF_SUCCESS;
3975 }
3976 else
3977 {
3978 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
3979 {
3980 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3981 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3982 if (RT_SUCCESS(rc))
3983 {
3984 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3985 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3986 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3987 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3988 NOREF(cMappedPages);
3989 }
3990 else
3991 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3992 }
3993 else
3994 rc = VINF_SUCCESS;
3995 }
3996
3997 return VINF_SUCCESS;
3998
3999#else
4000 /*
4001 * Looks like we need to unmap a page before we can change the backing
4002 * or even modify the protection. This is going to be *REALLY* efficient.
4003 * PGM lends us two bits to keep track of the state here.
4004 */
4005 uint8_t const u2OldState = *pu2State;
4006 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
4007 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
4008 if ( fBackingChanged
4009 || u2NewState != u2OldState)
4010 {
4011 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
4012 {
4013# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4014 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4015 AssertRC(rc);
4016 if (RT_SUCCESS(rc))
4017 {
4018 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4019 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4020 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4021 {
4022 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4023 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4024 return VINF_SUCCESS;
4025 }
4026 }
4027 else
4028 {
4029 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4030 return rc;
4031 }
4032# else
4033 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
4034 if (SUCCEEDED(hrc))
4035 {
4036 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4037 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4038 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
4039 {
4040 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
4041 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
4042 return VINF_SUCCESS;
4043 }
4044 }
4045 else
4046 {
4047 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4048 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4049 return VERR_NEM_INIT_FAILED;
4050 }
4051# endif
4052 }
4053 }
4054
4055 /*
4056 * Writeable mapping?
4057 */
4058 if (fPageProt & NEM_PAGE_PROT_WRITE)
4059 {
4060# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4061 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4062 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4063 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4064 AssertRC(rc);
4065 if (RT_SUCCESS(rc))
4066 {
4067 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4068 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4069 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4070 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4071 return VINF_SUCCESS;
4072 }
4073 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4074 return rc;
4075# else
4076 void *pvPage;
4077 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4078 if (RT_SUCCESS(rc))
4079 {
4080 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4081 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4082 if (SUCCEEDED(hrc))
4083 {
4084 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4085 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4086 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4087 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4088 return VINF_SUCCESS;
4089 }
4090 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4091 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4092 return VERR_NEM_INIT_FAILED;
4093 }
4094 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4095 return rc;
4096# endif
4097 }
4098
4099 if (fPageProt & NEM_PAGE_PROT_READ)
4100 {
4101# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4102 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4103 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4104 AssertRC(rc);
4105 if (RT_SUCCESS(rc))
4106 {
4107 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4108 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4109 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4110 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4111 return VINF_SUCCESS;
4112 }
4113 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4114 return rc;
4115# else
4116 const void *pvPage;
4117 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4118 if (RT_SUCCESS(rc))
4119 {
4120 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4121 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4122 if (SUCCEEDED(hrc))
4123 {
4124 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4125 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4126 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4127 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4128 return VINF_SUCCESS;
4129 }
4130 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4131 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4132 return VERR_NEM_INIT_FAILED;
4133 }
4134 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4135 return rc;
4136# endif
4137 }
4138
4139 /* We already unmapped it above. */
4140 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4141 return VINF_SUCCESS;
4142#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4143}
4144
4145
4146NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4147{
4148 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4149 {
4150 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4151 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4152 return VINF_SUCCESS;
4153 }
4154
4155#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4156 PVMCPU pVCpu = VMMGetCpu(pVM);
4157 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4158 AssertRC(rc);
4159 if (RT_SUCCESS(rc))
4160 {
4161 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4162 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4163 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4164 return VINF_SUCCESS;
4165 }
4166 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4167 return rc;
4168#else
4169 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4170 if (SUCCEEDED(hrc))
4171 {
4172 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4173 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4174 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4175 return VINF_SUCCESS;
4176 }
4177 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4178 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4179 return VERR_NEM_IPE_6;
4180#endif
4181}
4182
4183
4184int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4185 PGMPAGETYPE enmType, uint8_t *pu2State)
4186{
4187 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4188 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4189 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4190
4191 int rc;
4192#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4193 PVMCPU pVCpu = VMMGetCpu(pVM);
4194 if ( pVM->nem.s.fA20Enabled
4195 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4196 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4197 else
4198 {
4199 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4200 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4201 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4202 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4203
4204 }
4205#else
4206 RT_NOREF_PV(fPageProt);
4207 if ( pVM->nem.s.fA20Enabled
4208 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4209 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4210 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4211 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4212 else
4213 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4214#endif
4215 return rc;
4216}
4217
4218
4219void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4220 PGMPAGETYPE enmType, uint8_t *pu2State)
4221{
4222 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4223 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4224 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4225
4226#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4227 PVMCPU pVCpu = VMMGetCpu(pVM);
4228 if ( pVM->nem.s.fA20Enabled
4229 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4230 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4231 else
4232 {
4233 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4234 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4235 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4236 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4237 }
4238#else
4239 RT_NOREF_PV(fPageProt);
4240 if ( pVM->nem.s.fA20Enabled
4241 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4242 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4243 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4244 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4245 /* else: ignore since we've got the alias page at this address. */
4246#endif
4247}
4248
4249
4250void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4251 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4252{
4253 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4254 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4255 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4256
4257#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4258 PVMCPU pVCpu = VMMGetCpu(pVM);
4259 if ( pVM->nem.s.fA20Enabled
4260 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4261 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4262 else
4263 {
4264 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4265 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4266 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4267 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4268 }
4269#else
4270 RT_NOREF_PV(fPageProt);
4271 if ( pVM->nem.s.fA20Enabled
4272 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4273 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4274 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4275 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4276 /* else: ignore since we've got the alias page at this address. */
4277#endif
4278}
4279
4280
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette