VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 72496

Last change on this file since 72496 was 72490, checked in by vboxsync, 7 years ago

NEM,EM: Generic optimization of I/O port accesses that have to be executed in ring-3. Only NEM uses the feature for now. bugref:9044 bugref:9193

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 190.4 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 72490 2018-06-09 15:11:13Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32
33/*********************************************************************************************************************************
34* Global Variables *
35*********************************************************************************************************************************/
36/** NEM_WIN_PAGE_STATE_XXX names. */
37NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
38
39/** HV_INTERCEPT_ACCESS_TYPE names. */
40static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
41
42
43/*********************************************************************************************************************************
44* Internal Functions *
45*********************************************************************************************************************************/
46NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
47 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
48
49
50#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
51
52/**
53 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
54 *
55 * @returns VBox status code.
56 * @param pVM The cross context VM structure.
57 * @param pVCpu The cross context virtual CPU structure of the caller.
58 * @param GCPhysSrc The source page. Does not need to be page aligned.
59 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
60 * when A20 is disabled.
61 * @param fFlags HV_MAP_GPA_XXX.
62 */
63DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
64{
65#ifdef IN_RING0
66 /** @todo optimize further, caller generally has the physical address. */
67 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
68 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
69 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
70 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
71 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
72 1, fFlags);
73#else
74 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
75 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
76 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
77 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
78 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
79#endif
80}
81
82
83/**
84 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
85 *
86 * @returns VBox status code.
87 * @param pVM The cross context VM structure.
88 * @param pVCpu The cross context virtual CPU structure of the caller.
89 * @param GCPhys The page to unmap. Does not need to be page aligned.
90 */
91DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
92{
93# ifdef IN_RING0
94 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
95 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
96 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
97# else
98 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
99 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
100 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
101# endif
102}
103
104#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
105#ifndef IN_RING0
106
107NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
108{
109# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
110 NOREF(pCtx);
111 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
112 AssertLogRelRCReturn(rc, rc);
113 return rc;
114
115# else
116 /*
117 * The following is very similar to what nemR0WinExportState() does.
118 */
119 WHV_REGISTER_NAME aenmNames[128];
120 WHV_REGISTER_VALUE aValues[128];
121
122 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
123 if ( !fWhat
124 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
125 return VINF_SUCCESS;
126 uintptr_t iReg = 0;
127
128# define ADD_REG64(a_enmName, a_uValue) do { \
129 aenmNames[iReg] = (a_enmName); \
130 aValues[iReg].Reg128.High64 = 0; \
131 aValues[iReg].Reg64 = (a_uValue); \
132 iReg++; \
133 } while (0)
134# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
135 aenmNames[iReg] = (a_enmName); \
136 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
137 aValues[iReg].Reg128.High64 = (a_uValueHi); \
138 iReg++; \
139 } while (0)
140
141 /* GPRs */
142 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
143 {
144 if (fWhat & CPUMCTX_EXTRN_RAX)
145 ADD_REG64(WHvX64RegisterRax, pCtx->rax);
146 if (fWhat & CPUMCTX_EXTRN_RCX)
147 ADD_REG64(WHvX64RegisterRcx, pCtx->rcx);
148 if (fWhat & CPUMCTX_EXTRN_RDX)
149 ADD_REG64(WHvX64RegisterRdx, pCtx->rdx);
150 if (fWhat & CPUMCTX_EXTRN_RBX)
151 ADD_REG64(WHvX64RegisterRbx, pCtx->rbx);
152 if (fWhat & CPUMCTX_EXTRN_RSP)
153 ADD_REG64(WHvX64RegisterRsp, pCtx->rsp);
154 if (fWhat & CPUMCTX_EXTRN_RBP)
155 ADD_REG64(WHvX64RegisterRbp, pCtx->rbp);
156 if (fWhat & CPUMCTX_EXTRN_RSI)
157 ADD_REG64(WHvX64RegisterRsi, pCtx->rsi);
158 if (fWhat & CPUMCTX_EXTRN_RDI)
159 ADD_REG64(WHvX64RegisterRdi, pCtx->rdi);
160 if (fWhat & CPUMCTX_EXTRN_R8_R15)
161 {
162 ADD_REG64(WHvX64RegisterR8, pCtx->r8);
163 ADD_REG64(WHvX64RegisterR9, pCtx->r9);
164 ADD_REG64(WHvX64RegisterR10, pCtx->r10);
165 ADD_REG64(WHvX64RegisterR11, pCtx->r11);
166 ADD_REG64(WHvX64RegisterR12, pCtx->r12);
167 ADD_REG64(WHvX64RegisterR13, pCtx->r13);
168 ADD_REG64(WHvX64RegisterR14, pCtx->r14);
169 ADD_REG64(WHvX64RegisterR15, pCtx->r15);
170 }
171 }
172
173 /* RIP & Flags */
174 if (fWhat & CPUMCTX_EXTRN_RIP)
175 ADD_REG64(WHvX64RegisterRip, pCtx->rip);
176 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
177 ADD_REG64(WHvX64RegisterRflags, pCtx->rflags.u);
178
179 /* Segments */
180# define ADD_SEG(a_enmName, a_SReg) \
181 do { \
182 aenmNames[iReg] = a_enmName; \
183 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
184 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
185 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
186 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
187 iReg++; \
188 } while (0)
189 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
190 {
191 if (fWhat & CPUMCTX_EXTRN_ES)
192 ADD_SEG(WHvX64RegisterEs, pCtx->es);
193 if (fWhat & CPUMCTX_EXTRN_CS)
194 ADD_SEG(WHvX64RegisterCs, pCtx->cs);
195 if (fWhat & CPUMCTX_EXTRN_SS)
196 ADD_SEG(WHvX64RegisterSs, pCtx->ss);
197 if (fWhat & CPUMCTX_EXTRN_DS)
198 ADD_SEG(WHvX64RegisterDs, pCtx->ds);
199 if (fWhat & CPUMCTX_EXTRN_FS)
200 ADD_SEG(WHvX64RegisterFs, pCtx->fs);
201 if (fWhat & CPUMCTX_EXTRN_GS)
202 ADD_SEG(WHvX64RegisterGs, pCtx->gs);
203 }
204
205 /* Descriptor tables & task segment. */
206 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
207 {
208 if (fWhat & CPUMCTX_EXTRN_LDTR)
209 ADD_SEG(WHvX64RegisterLdtr, pCtx->ldtr);
210 if (fWhat & CPUMCTX_EXTRN_TR)
211 ADD_SEG(WHvX64RegisterTr, pCtx->tr);
212 if (fWhat & CPUMCTX_EXTRN_IDTR)
213 {
214 aenmNames[iReg] = WHvX64RegisterIdtr;
215 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
216 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
217 iReg++;
218 }
219 if (fWhat & CPUMCTX_EXTRN_GDTR)
220 {
221 aenmNames[iReg] = WHvX64RegisterGdtr;
222 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
223 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
224 iReg++;
225 }
226 }
227
228 /* Control registers. */
229 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
230 {
231 if (fWhat & CPUMCTX_EXTRN_CR0)
232 ADD_REG64(WHvX64RegisterCr0, pCtx->cr0);
233 if (fWhat & CPUMCTX_EXTRN_CR2)
234 ADD_REG64(WHvX64RegisterCr2, pCtx->cr2);
235 if (fWhat & CPUMCTX_EXTRN_CR3)
236 ADD_REG64(WHvX64RegisterCr3, pCtx->cr3);
237 if (fWhat & CPUMCTX_EXTRN_CR4)
238 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4);
239 }
240 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
241 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
242
243 /* Debug registers. */
244/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
245 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
246 {
247 ADD_REG64(WHvX64RegisterDr0, pCtx->dr[0]); // CPUMGetHyperDR0(pVCpu));
248 ADD_REG64(WHvX64RegisterDr1, pCtx->dr[1]); // CPUMGetHyperDR1(pVCpu));
249 ADD_REG64(WHvX64RegisterDr2, pCtx->dr[2]); // CPUMGetHyperDR2(pVCpu));
250 ADD_REG64(WHvX64RegisterDr3, pCtx->dr[3]); // CPUMGetHyperDR3(pVCpu));
251 }
252 if (fWhat & CPUMCTX_EXTRN_DR6)
253 ADD_REG64(WHvX64RegisterDr6, pCtx->dr[6]); // CPUMGetHyperDR6(pVCpu));
254 if (fWhat & CPUMCTX_EXTRN_DR7)
255 ADD_REG64(WHvX64RegisterDr7, pCtx->dr[7]); // CPUMGetHyperDR7(pVCpu));
256
257 /* Floating point state. */
258 if (fWhat & CPUMCTX_EXTRN_X87)
259 {
260 ADD_REG128(WHvX64RegisterFpMmx0, pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1]);
261 ADD_REG128(WHvX64RegisterFpMmx1, pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1]);
262 ADD_REG128(WHvX64RegisterFpMmx2, pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1]);
263 ADD_REG128(WHvX64RegisterFpMmx3, pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1]);
264 ADD_REG128(WHvX64RegisterFpMmx4, pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1]);
265 ADD_REG128(WHvX64RegisterFpMmx5, pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1]);
266 ADD_REG128(WHvX64RegisterFpMmx6, pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1]);
267 ADD_REG128(WHvX64RegisterFpMmx7, pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1]);
268
269 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
270 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
271 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
272 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
273 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
274 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
275 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
276 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
277 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
278 iReg++;
279
280 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
281 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
282 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
283 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
284 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
285 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
286 iReg++;
287 }
288
289 /* Vector state. */
290 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
291 {
292 ADD_REG128(WHvX64RegisterXmm0, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
293 ADD_REG128(WHvX64RegisterXmm1, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
294 ADD_REG128(WHvX64RegisterXmm2, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
295 ADD_REG128(WHvX64RegisterXmm3, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
296 ADD_REG128(WHvX64RegisterXmm4, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
297 ADD_REG128(WHvX64RegisterXmm5, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
298 ADD_REG128(WHvX64RegisterXmm6, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
299 ADD_REG128(WHvX64RegisterXmm7, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
300 ADD_REG128(WHvX64RegisterXmm8, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
301 ADD_REG128(WHvX64RegisterXmm9, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
302 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi);
303 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi);
304 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi);
305 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi);
306 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi);
307 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi);
308 }
309
310 /* MSRs */
311 // WHvX64RegisterTsc - don't touch
312 if (fWhat & CPUMCTX_EXTRN_EFER)
313 ADD_REG64(WHvX64RegisterEfer, pCtx->msrEFER);
314 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
315 ADD_REG64(WHvX64RegisterKernelGsBase, pCtx->msrKERNELGSBASE);
316 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
317 {
318 ADD_REG64(WHvX64RegisterSysenterCs, pCtx->SysEnter.cs);
319 ADD_REG64(WHvX64RegisterSysenterEip, pCtx->SysEnter.eip);
320 ADD_REG64(WHvX64RegisterSysenterEsp, pCtx->SysEnter.esp);
321 }
322 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
323 {
324 ADD_REG64(WHvX64RegisterStar, pCtx->msrSTAR);
325 ADD_REG64(WHvX64RegisterLstar, pCtx->msrLSTAR);
326 ADD_REG64(WHvX64RegisterCstar, pCtx->msrCSTAR);
327 ADD_REG64(WHvX64RegisterSfmask, pCtx->msrSFMASK);
328 }
329 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
330 {
331 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
332 ADD_REG64(WHvX64RegisterPat, pCtx->msrPAT);
333#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
334 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
335#endif
336 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
337 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
338 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
339 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
340 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
341 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
342 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
343 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
344 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
345 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
346 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
347 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
348 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
349 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
350#if 0 /** @todo these registers aren't available? Might explain something.. .*/
351 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
352 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
353 {
354 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
355 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
356 }
357#endif
358 }
359
360 /* event injection (clear it). */
361 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
362 ADD_REG64(WHvRegisterPendingInterruption, 0);
363
364 /* Interruptibility state. This can get a little complicated since we get
365 half of the state via HV_X64_VP_EXECUTION_STATE. */
366 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
367 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
368 {
369 ADD_REG64(WHvRegisterInterruptState, 0);
370 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
371 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
372 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
373 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
374 aValues[iReg - 1].InterruptState.NmiMasked = 1;
375 }
376 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
377 {
378 if ( pVCpu->nem.s.fLastInterruptShadow
379 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
380 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
381 {
382 ADD_REG64(WHvRegisterInterruptState, 0);
383 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
384 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
385 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
386 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
387 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
388 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
389 }
390 }
391 else
392 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
393
394 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
395 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
396 if ( fDesiredIntWin
397 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
398 {
399 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
400 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
401 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
402 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
403 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
404 }
405
406 /// @todo WHvRegisterPendingEvent0
407 /// @todo WHvRegisterPendingEvent1
408
409 /*
410 * Set the registers.
411 */
412 Assert(iReg < RT_ELEMENTS(aValues));
413 Assert(iReg < RT_ELEMENTS(aenmNames));
414# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
415 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
416 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
417# endif
418 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
419 if (SUCCEEDED(hrc))
420 {
421 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
422 return VINF_SUCCESS;
423 }
424 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
425 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
426 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
427 return VERR_INTERNAL_ERROR;
428
429# undef ADD_REG64
430# undef ADD_REG128
431# undef ADD_SEG
432
433# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
434}
435
436
437NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
438{
439# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
440 /* See NEMR0ImportState */
441 NOREF(pCtx);
442 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
443 if (RT_SUCCESS(rc))
444 return rc;
445 if (rc == VERR_NEM_FLUSH_TLB)
446 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
447 if (rc == VERR_NEM_CHANGE_PGM_MODE)
448 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
449 AssertLogRelRCReturn(rc, rc);
450 return rc;
451
452# else
453 WHV_REGISTER_NAME aenmNames[128];
454
455 fWhat &= pCtx->fExtrn;
456 uintptr_t iReg = 0;
457
458 /* GPRs */
459 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
460 {
461 if (fWhat & CPUMCTX_EXTRN_RAX)
462 aenmNames[iReg++] = WHvX64RegisterRax;
463 if (fWhat & CPUMCTX_EXTRN_RCX)
464 aenmNames[iReg++] = WHvX64RegisterRcx;
465 if (fWhat & CPUMCTX_EXTRN_RDX)
466 aenmNames[iReg++] = WHvX64RegisterRdx;
467 if (fWhat & CPUMCTX_EXTRN_RBX)
468 aenmNames[iReg++] = WHvX64RegisterRbx;
469 if (fWhat & CPUMCTX_EXTRN_RSP)
470 aenmNames[iReg++] = WHvX64RegisterRsp;
471 if (fWhat & CPUMCTX_EXTRN_RBP)
472 aenmNames[iReg++] = WHvX64RegisterRbp;
473 if (fWhat & CPUMCTX_EXTRN_RSI)
474 aenmNames[iReg++] = WHvX64RegisterRsi;
475 if (fWhat & CPUMCTX_EXTRN_RDI)
476 aenmNames[iReg++] = WHvX64RegisterRdi;
477 if (fWhat & CPUMCTX_EXTRN_R8_R15)
478 {
479 aenmNames[iReg++] = WHvX64RegisterR8;
480 aenmNames[iReg++] = WHvX64RegisterR9;
481 aenmNames[iReg++] = WHvX64RegisterR10;
482 aenmNames[iReg++] = WHvX64RegisterR11;
483 aenmNames[iReg++] = WHvX64RegisterR12;
484 aenmNames[iReg++] = WHvX64RegisterR13;
485 aenmNames[iReg++] = WHvX64RegisterR14;
486 aenmNames[iReg++] = WHvX64RegisterR15;
487 }
488 }
489
490 /* RIP & Flags */
491 if (fWhat & CPUMCTX_EXTRN_RIP)
492 aenmNames[iReg++] = WHvX64RegisterRip;
493 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
494 aenmNames[iReg++] = WHvX64RegisterRflags;
495
496 /* Segments */
497 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
498 {
499 if (fWhat & CPUMCTX_EXTRN_ES)
500 aenmNames[iReg++] = WHvX64RegisterEs;
501 if (fWhat & CPUMCTX_EXTRN_CS)
502 aenmNames[iReg++] = WHvX64RegisterCs;
503 if (fWhat & CPUMCTX_EXTRN_SS)
504 aenmNames[iReg++] = WHvX64RegisterSs;
505 if (fWhat & CPUMCTX_EXTRN_DS)
506 aenmNames[iReg++] = WHvX64RegisterDs;
507 if (fWhat & CPUMCTX_EXTRN_FS)
508 aenmNames[iReg++] = WHvX64RegisterFs;
509 if (fWhat & CPUMCTX_EXTRN_GS)
510 aenmNames[iReg++] = WHvX64RegisterGs;
511 }
512
513 /* Descriptor tables. */
514 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
515 {
516 if (fWhat & CPUMCTX_EXTRN_LDTR)
517 aenmNames[iReg++] = WHvX64RegisterLdtr;
518 if (fWhat & CPUMCTX_EXTRN_TR)
519 aenmNames[iReg++] = WHvX64RegisterTr;
520 if (fWhat & CPUMCTX_EXTRN_IDTR)
521 aenmNames[iReg++] = WHvX64RegisterIdtr;
522 if (fWhat & CPUMCTX_EXTRN_GDTR)
523 aenmNames[iReg++] = WHvX64RegisterGdtr;
524 }
525
526 /* Control registers. */
527 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
528 {
529 if (fWhat & CPUMCTX_EXTRN_CR0)
530 aenmNames[iReg++] = WHvX64RegisterCr0;
531 if (fWhat & CPUMCTX_EXTRN_CR2)
532 aenmNames[iReg++] = WHvX64RegisterCr2;
533 if (fWhat & CPUMCTX_EXTRN_CR3)
534 aenmNames[iReg++] = WHvX64RegisterCr3;
535 if (fWhat & CPUMCTX_EXTRN_CR4)
536 aenmNames[iReg++] = WHvX64RegisterCr4;
537 }
538 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
539 aenmNames[iReg++] = WHvX64RegisterCr8;
540
541 /* Debug registers. */
542 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
543 {
544 aenmNames[iReg++] = WHvX64RegisterDr0;
545 aenmNames[iReg++] = WHvX64RegisterDr1;
546 aenmNames[iReg++] = WHvX64RegisterDr2;
547 aenmNames[iReg++] = WHvX64RegisterDr3;
548 }
549 if (fWhat & CPUMCTX_EXTRN_DR6)
550 aenmNames[iReg++] = WHvX64RegisterDr6;
551 if (fWhat & CPUMCTX_EXTRN_DR7)
552 aenmNames[iReg++] = WHvX64RegisterDr7;
553
554 /* Floating point state. */
555 if (fWhat & CPUMCTX_EXTRN_X87)
556 {
557 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
558 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
559 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
560 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
561 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
562 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
563 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
564 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
565 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
566 }
567 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
568 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
569
570 /* Vector state. */
571 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
572 {
573 aenmNames[iReg++] = WHvX64RegisterXmm0;
574 aenmNames[iReg++] = WHvX64RegisterXmm1;
575 aenmNames[iReg++] = WHvX64RegisterXmm2;
576 aenmNames[iReg++] = WHvX64RegisterXmm3;
577 aenmNames[iReg++] = WHvX64RegisterXmm4;
578 aenmNames[iReg++] = WHvX64RegisterXmm5;
579 aenmNames[iReg++] = WHvX64RegisterXmm6;
580 aenmNames[iReg++] = WHvX64RegisterXmm7;
581 aenmNames[iReg++] = WHvX64RegisterXmm8;
582 aenmNames[iReg++] = WHvX64RegisterXmm9;
583 aenmNames[iReg++] = WHvX64RegisterXmm10;
584 aenmNames[iReg++] = WHvX64RegisterXmm11;
585 aenmNames[iReg++] = WHvX64RegisterXmm12;
586 aenmNames[iReg++] = WHvX64RegisterXmm13;
587 aenmNames[iReg++] = WHvX64RegisterXmm14;
588 aenmNames[iReg++] = WHvX64RegisterXmm15;
589 }
590
591 /* MSRs */
592 // WHvX64RegisterTsc - don't touch
593 if (fWhat & CPUMCTX_EXTRN_EFER)
594 aenmNames[iReg++] = WHvX64RegisterEfer;
595 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
596 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
597 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
598 {
599 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
600 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
601 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
602 }
603 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
604 {
605 aenmNames[iReg++] = WHvX64RegisterStar;
606 aenmNames[iReg++] = WHvX64RegisterLstar;
607 aenmNames[iReg++] = WHvX64RegisterCstar;
608 aenmNames[iReg++] = WHvX64RegisterSfmask;
609 }
610
611//#ifdef LOG_ENABLED
612// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
613//#endif
614 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
615 {
616 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
617 aenmNames[iReg++] = WHvX64RegisterPat;
618#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
619 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
620#endif
621 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
622 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
623 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
624 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
625 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
626 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
627 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
628 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
629 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
630 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
631 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
632 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
633 aenmNames[iReg++] = WHvX64RegisterTscAux;
634 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
635//#ifdef LOG_ENABLED
636// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
637// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
638//#endif
639 }
640
641 /* Interruptibility. */
642 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
643 {
644 aenmNames[iReg++] = WHvRegisterInterruptState;
645 aenmNames[iReg++] = WHvX64RegisterRip;
646 }
647
648 /* event injection */
649 aenmNames[iReg++] = WHvRegisterPendingInterruption;
650 aenmNames[iReg++] = WHvRegisterPendingEvent0;
651 aenmNames[iReg++] = WHvRegisterPendingEvent1;
652
653 size_t const cRegs = iReg;
654 Assert(cRegs < RT_ELEMENTS(aenmNames));
655
656 /*
657 * Get the registers.
658 */
659 WHV_REGISTER_VALUE aValues[128];
660 RT_ZERO(aValues);
661 Assert(RT_ELEMENTS(aValues) >= cRegs);
662 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
663# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
664 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
665 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
666# endif
667 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
668 AssertLogRelMsgReturn(SUCCEEDED(hrc),
669 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
670 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
671 , VERR_NEM_GET_REGISTERS_FAILED);
672
673 iReg = 0;
674# define GET_REG64(a_DstVar, a_enmName) do { \
675 Assert(aenmNames[iReg] == (a_enmName)); \
676 (a_DstVar) = aValues[iReg].Reg64; \
677 iReg++; \
678 } while (0)
679# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
680 Assert(aenmNames[iReg] == (a_enmName)); \
681 if ((a_DstVar) != aValues[iReg].Reg64) \
682 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
683 (a_DstVar) = aValues[iReg].Reg64; \
684 iReg++; \
685 } while (0)
686# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
687 Assert(aenmNames[iReg] == a_enmName); \
688 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
689 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
690 iReg++; \
691 } while (0)
692# define GET_SEG(a_SReg, a_enmName) do { \
693 Assert(aenmNames[iReg] == (a_enmName)); \
694 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
695 iReg++; \
696 } while (0)
697
698 /* GPRs */
699 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
700 {
701 if (fWhat & CPUMCTX_EXTRN_RAX)
702 GET_REG64(pCtx->rax, WHvX64RegisterRax);
703 if (fWhat & CPUMCTX_EXTRN_RCX)
704 GET_REG64(pCtx->rcx, WHvX64RegisterRcx);
705 if (fWhat & CPUMCTX_EXTRN_RDX)
706 GET_REG64(pCtx->rdx, WHvX64RegisterRdx);
707 if (fWhat & CPUMCTX_EXTRN_RBX)
708 GET_REG64(pCtx->rbx, WHvX64RegisterRbx);
709 if (fWhat & CPUMCTX_EXTRN_RSP)
710 GET_REG64(pCtx->rsp, WHvX64RegisterRsp);
711 if (fWhat & CPUMCTX_EXTRN_RBP)
712 GET_REG64(pCtx->rbp, WHvX64RegisterRbp);
713 if (fWhat & CPUMCTX_EXTRN_RSI)
714 GET_REG64(pCtx->rsi, WHvX64RegisterRsi);
715 if (fWhat & CPUMCTX_EXTRN_RDI)
716 GET_REG64(pCtx->rdi, WHvX64RegisterRdi);
717 if (fWhat & CPUMCTX_EXTRN_R8_R15)
718 {
719 GET_REG64(pCtx->r8, WHvX64RegisterR8);
720 GET_REG64(pCtx->r9, WHvX64RegisterR9);
721 GET_REG64(pCtx->r10, WHvX64RegisterR10);
722 GET_REG64(pCtx->r11, WHvX64RegisterR11);
723 GET_REG64(pCtx->r12, WHvX64RegisterR12);
724 GET_REG64(pCtx->r13, WHvX64RegisterR13);
725 GET_REG64(pCtx->r14, WHvX64RegisterR14);
726 GET_REG64(pCtx->r15, WHvX64RegisterR15);
727 }
728 }
729
730 /* RIP & Flags */
731 if (fWhat & CPUMCTX_EXTRN_RIP)
732 GET_REG64(pCtx->rip, WHvX64RegisterRip);
733 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
734 GET_REG64(pCtx->rflags.u, WHvX64RegisterRflags);
735
736 /* Segments */
737 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
738 {
739 if (fWhat & CPUMCTX_EXTRN_ES)
740 GET_SEG(pCtx->es, WHvX64RegisterEs);
741 if (fWhat & CPUMCTX_EXTRN_CS)
742 GET_SEG(pCtx->cs, WHvX64RegisterCs);
743 if (fWhat & CPUMCTX_EXTRN_SS)
744 GET_SEG(pCtx->ss, WHvX64RegisterSs);
745 if (fWhat & CPUMCTX_EXTRN_DS)
746 GET_SEG(pCtx->ds, WHvX64RegisterDs);
747 if (fWhat & CPUMCTX_EXTRN_FS)
748 GET_SEG(pCtx->fs, WHvX64RegisterFs);
749 if (fWhat & CPUMCTX_EXTRN_GS)
750 GET_SEG(pCtx->gs, WHvX64RegisterGs);
751 }
752
753 /* Descriptor tables and the task segment. */
754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
755 {
756 if (fWhat & CPUMCTX_EXTRN_LDTR)
757 GET_SEG(pCtx->ldtr, WHvX64RegisterLdtr);
758
759 if (fWhat & CPUMCTX_EXTRN_TR)
760 {
761 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
762 avoid to trigger sanity assertions around the code, always fix this. */
763 GET_SEG(pCtx->tr, WHvX64RegisterTr);
764 switch (pCtx->tr.Attr.n.u4Type)
765 {
766 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
767 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
768 break;
769 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
770 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
771 break;
772 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
773 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
774 break;
775 }
776 }
777 if (fWhat & CPUMCTX_EXTRN_IDTR)
778 {
779 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
780 pCtx->idtr.cbIdt = aValues[iReg].Table.Limit;
781 pCtx->idtr.pIdt = aValues[iReg].Table.Base;
782 iReg++;
783 }
784 if (fWhat & CPUMCTX_EXTRN_GDTR)
785 {
786 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
787 pCtx->gdtr.cbGdt = aValues[iReg].Table.Limit;
788 pCtx->gdtr.pGdt = aValues[iReg].Table.Base;
789 iReg++;
790 }
791 }
792
793 /* Control registers. */
794 bool fMaybeChangedMode = false;
795 bool fFlushTlb = false;
796 bool fFlushGlobalTlb = false;
797 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
798 {
799 if (fWhat & CPUMCTX_EXTRN_CR0)
800 {
801 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
802 if (pCtx->cr0 != aValues[iReg].Reg64)
803 {
804 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
805 fMaybeChangedMode = true;
806 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
807 }
808 iReg++;
809 }
810 if (fWhat & CPUMCTX_EXTRN_CR2)
811 GET_REG64(pCtx->cr2, WHvX64RegisterCr2);
812 if (fWhat & CPUMCTX_EXTRN_CR3)
813 {
814 if (pCtx->cr3 != aValues[iReg].Reg64)
815 {
816 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
817 fFlushTlb = true;
818 }
819 iReg++;
820 }
821 if (fWhat & CPUMCTX_EXTRN_CR4)
822 {
823 if (pCtx->cr4 != aValues[iReg].Reg64)
824 {
825 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
826 fMaybeChangedMode = true;
827 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
828 }
829 iReg++;
830 }
831 }
832 if (fWhat & CPUMCTX_EXTRN_APIC_TPR)
833 {
834 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
836 iReg++;
837 }
838
839 /* Debug registers. */
840 /** @todo fixme */
841 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
842 {
843 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
844 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
845 if (pCtx->dr[0] != aValues[iReg].Reg64)
846 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
847 iReg++;
848 if (pCtx->dr[1] != aValues[iReg].Reg64)
849 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
850 iReg++;
851 if (pCtx->dr[2] != aValues[iReg].Reg64)
852 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
853 iReg++;
854 if (pCtx->dr[3] != aValues[iReg].Reg64)
855 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
856 iReg++;
857 }
858 if (fWhat & CPUMCTX_EXTRN_DR6)
859 {
860 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
861 if (pCtx->dr[6] != aValues[iReg].Reg64)
862 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
863 iReg++;
864 }
865 if (fWhat & CPUMCTX_EXTRN_DR7)
866 {
867 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
868 if (pCtx->dr[7] != aValues[iReg].Reg64)
869 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
870 iReg++;
871 }
872
873 /* Floating point state. */
874 if (fWhat & CPUMCTX_EXTRN_X87)
875 {
876 GET_REG128(pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
877 GET_REG128(pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
878 GET_REG128(pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
879 GET_REG128(pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
880 GET_REG128(pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
881 GET_REG128(pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
882 GET_REG128(pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
883 GET_REG128(pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
884
885 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
886 pCtx->pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
887 pCtx->pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
888 pCtx->pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
889 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
890 pCtx->pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
891 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
892 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
893 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
894 iReg++;
895 }
896
897 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
898 {
899 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
900 if (fWhat & CPUMCTX_EXTRN_X87)
901 {
902 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
903 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
904 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
905 }
906 pCtx->pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
907 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
908 iReg++;
909 }
910
911 /* Vector state. */
912 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
913 {
914 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
915 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
916 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
917 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
918 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
919 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
920 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
921 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
922 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
923 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
924 GET_REG128(pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
925 GET_REG128(pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
926 GET_REG128(pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
927 GET_REG128(pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
928 GET_REG128(pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
929 GET_REG128(pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
930 }
931
932 /* MSRs */
933 // WHvX64RegisterTsc - don't touch
934 if (fWhat & CPUMCTX_EXTRN_EFER)
935 {
936 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
937 if (aValues[iReg].Reg64 != pCtx->msrEFER)
938 {
939 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, aValues[iReg].Reg64));
940 if ((aValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
941 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
942 pCtx->msrEFER = aValues[iReg].Reg64;
943 fMaybeChangedMode = true;
944 }
945 iReg++;
946 }
947 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
948 GET_REG64_LOG7(pCtx->msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
949 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
950 {
951 GET_REG64_LOG7(pCtx->SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
952 GET_REG64_LOG7(pCtx->SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
953 GET_REG64_LOG7(pCtx->SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
954 }
955 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
956 {
957 GET_REG64_LOG7(pCtx->msrSTAR, WHvX64RegisterStar, "MSR STAR");
958 GET_REG64_LOG7(pCtx->msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
959 GET_REG64_LOG7(pCtx->msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
960 GET_REG64_LOG7(pCtx->msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
961 }
962 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
963 {
964 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
965 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
966 if (aValues[iReg].Reg64 != uOldBase)
967 {
968 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
969 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
970 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
971 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", VBOXSTRICTRC_VAL(rc2), aValues[iReg].Reg64));
972 }
973 iReg++;
974
975 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterPat, "MSR PAT");
976#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
977 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterMsrMtrrCap);
978#endif
979 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
980 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
981 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
982 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
983 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
984 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
985 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
986 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
987 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
988 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
989 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
990 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
991 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
992 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
993 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
994 }
995
996 /* Interruptibility. */
997 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
998 {
999 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
1000 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1001
1002 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1003 {
1004 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1005 if (aValues[iReg].InterruptState.InterruptShadow)
1006 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1007 else
1008 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1009 }
1010
1011 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1012 {
1013 if (aValues[iReg].InterruptState.NmiMasked)
1014 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1015 else
1016 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1017 }
1018
1019 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1020 iReg += 2;
1021 }
1022
1023 /* Event injection. */
1024 /// @todo WHvRegisterPendingInterruption
1025 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1026 if (aValues[iReg].PendingInterruption.InterruptionPending)
1027 {
1028 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1029 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1030 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1031 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1032 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1033 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1034 }
1035
1036 /// @todo WHvRegisterPendingEvent0
1037 /// @todo WHvRegisterPendingEvent1
1038
1039 /* Almost done, just update extrn flags and maybe change PGM mode. */
1040 pCtx->fExtrn &= ~fWhat;
1041 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
1042 pCtx->fExtrn = 0;
1043
1044 /* Typical. */
1045 if (!fMaybeChangedMode && !fFlushTlb)
1046 return VINF_SUCCESS;
1047
1048 /*
1049 * Slow.
1050 */
1051 if (fMaybeChangedMode)
1052 {
1053 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1054 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1055 }
1056
1057 if (fFlushTlb)
1058 {
1059 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1060 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
1061 }
1062
1063 return VINF_SUCCESS;
1064# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1065}
1066
1067#endif /* !IN_RING0 */
1068
1069
1070/**
1071 * Interface for importing state on demand (used by IEM).
1072 *
1073 * @returns VBox status code.
1074 * @param pVCpu The cross context CPU structure.
1075 * @param pCtx The target CPU context.
1076 * @param fWhat What to import, CPUMCTX_EXTRN_XXX.
1077 */
1078VMM_INT_DECL(int) NEMImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
1079{
1080 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnDemand);
1081
1082#ifdef IN_RING0
1083 /** @todo improve and secure this translation */
1084 PGVM pGVM = GVMMR0ByHandle(pVCpu->pVMR0->hSelf);
1085 AssertReturn(pGVM, VERR_INVALID_VMCPU_HANDLE);
1086 VMCPUID idCpu = pVCpu->idCpu;
1087 ASMCompilerBarrier();
1088 AssertReturn(idCpu < pGVM->cCpus, VERR_INVALID_VMCPU_HANDLE);
1089
1090 return nemR0WinImportState(pGVM, &pGVM->aCpus[idCpu], pCtx, fWhat);
1091#else
1092 return nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1093#endif
1094}
1095
1096
1097#ifdef LOG_ENABLED
1098/**
1099 * Get the virtual processor running status.
1100 */
1101DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1102{
1103# ifdef IN_RING0
1104 NOREF(pVCpu);
1105 return VidProcessorStatusUndefined;
1106# else
1107 RTERRVARS Saved;
1108 RTErrVarsSave(&Saved);
1109
1110 /*
1111 * This API is disabled in release builds, it seems. On build 17101 it requires
1112 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1113 */
1114 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1115 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1116 AssertRC(rcNt);
1117
1118 RTErrVarsRestore(&Saved);
1119 return enmCpuStatus;
1120# endif
1121}
1122#endif
1123
1124
1125#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1126# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1127/**
1128 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1129 *
1130 * This is an experiment only.
1131 *
1132 * @returns VBox status code.
1133 * @param pVM The cross context VM structure.
1134 * @param pVCpu The cross context virtual CPU structure of the
1135 * calling EMT.
1136 */
1137NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1138{
1139 /*
1140 * Work the state.
1141 *
1142 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1143 * So, we just need to modify the state and kick the EMT if it's waiting on
1144 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1145 */
1146 for (;;)
1147 {
1148 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1149 switch (enmState)
1150 {
1151 case VMCPUSTATE_STARTED_EXEC_NEM:
1152 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1153 {
1154 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1155 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1156 return VINF_SUCCESS;
1157 }
1158 break;
1159
1160 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1161 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1162 {
1163# ifdef IN_RING0
1164 NTSTATUS rcNt = KeAlertThread(??);
1165# else
1166 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1167# endif
1168 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1169 Assert(rcNt == STATUS_SUCCESS);
1170 if (NT_SUCCESS(rcNt))
1171 {
1172 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1173 return VINF_SUCCESS;
1174 }
1175 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1176 }
1177 break;
1178
1179 default:
1180 return VINF_SUCCESS;
1181 }
1182
1183 ASMNopPause();
1184 RT_NOREF(pVM);
1185 }
1186}
1187# endif /* IN_RING3 */
1188#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1189
1190
1191#ifdef LOG_ENABLED
1192/**
1193 * Logs the current CPU state.
1194 */
1195NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1196{
1197 if (LogIs3Enabled())
1198 {
1199# ifdef IN_RING3
1200 char szRegs[4096];
1201 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1202 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1203 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1204 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1205 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1206 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1207 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1208 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1209 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1210 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1211 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1212 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1213 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1214 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1215 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1216 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1217 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1218 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1219 " efer=%016VR{efer}\n"
1220 " pat=%016VR{pat}\n"
1221 " sf_mask=%016VR{sf_mask}\n"
1222 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1223 " lstar=%016VR{lstar}\n"
1224 " star=%016VR{star} cstar=%016VR{cstar}\n"
1225 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1226 );
1227
1228 char szInstr[256];
1229 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1230 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1231 szInstr, sizeof(szInstr), NULL);
1232 Log3(("%s%s\n", szRegs, szInstr));
1233# else
1234 /** @todo stat logging in ring-0 */
1235 RT_NOREF(pVM, pVCpu);
1236# endif
1237 }
1238}
1239#endif /* LOG_ENABLED */
1240
1241
1242/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1243#define SWITCH_IT(a_szPrefix) \
1244 do \
1245 switch (u)\
1246 { \
1247 case 0x00: return a_szPrefix ""; \
1248 case 0x01: return a_szPrefix ",Pnd"; \
1249 case 0x02: return a_szPrefix ",Dbg"; \
1250 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1251 case 0x04: return a_szPrefix ",Shw"; \
1252 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1253 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1254 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1255 default: AssertFailedReturn("WTF?"); \
1256 } \
1257 while (0)
1258
1259#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1260/**
1261 * Translates the execution stat bitfield into a short log string, VID version.
1262 *
1263 * @returns Read-only log string.
1264 * @param pMsgHdr The header which state to summarize.
1265 */
1266static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1267{
1268 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1269 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1270 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1271 if (pMsgHdr->ExecutionState.EferLma)
1272 SWITCH_IT("LM");
1273 else if (pMsgHdr->ExecutionState.Cr0Pe)
1274 SWITCH_IT("PM");
1275 else
1276 SWITCH_IT("RM");
1277}
1278#elif defined(IN_RING3)
1279/**
1280 * Translates the execution stat bitfield into a short log string, WinHv version.
1281 *
1282 * @returns Read-only log string.
1283 * @param pExitCtx The exit context which state to summarize.
1284 */
1285static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1286{
1287 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1288 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1289 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1290 if (pExitCtx->ExecutionState.EferLma)
1291 SWITCH_IT("LM");
1292 else if (pExitCtx->ExecutionState.Cr0Pe)
1293 SWITCH_IT("PM");
1294 else
1295 SWITCH_IT("RM");
1296}
1297#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1298#undef SWITCH_IT
1299
1300
1301#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1302/**
1303 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1304 *
1305 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1306 *
1307 * @param pVCpu The cross context virtual CPU structure.
1308 * @param pCtx The CPU context to update.
1309 * @param pExitCtx The exit context.
1310 */
1311DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1312{
1313 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1314
1315 /* Advance the RIP. */
1316 Assert(pMsgHdr->InstructionLength > 0 && pMsgHdr->InstructionLength < 16);
1317 pCtx->rip += pMsgHdr->InstructionLength;
1318 pCtx->rflags.Bits.u1RF = 0;
1319
1320 /* Update interrupt inhibition. */
1321 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1322 { /* likely */ }
1323 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1324 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1325}
1326#elif defined(IN_RING3)
1327/**
1328 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1329 *
1330 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1331 *
1332 * @param pVCpu The cross context virtual CPU structure.
1333 * @param pCtx The CPU context to update.
1334 * @param pExitCtx The exit context.
1335 */
1336DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1337{
1338 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1339
1340 /* Advance the RIP. */
1341 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1342 pCtx->rip += pExitCtx->InstructionLength;
1343 pCtx->rflags.Bits.u1RF = 0;
1344
1345 /* Update interrupt inhibition. */
1346 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1347 { /* likely */ }
1348 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1349 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1350}
1351#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1352
1353
1354
1355NEM_TMPL_STATIC DECLCALLBACK(int)
1356nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1357{
1358 RT_NOREF_PV(pvUser);
1359#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1360 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1361 AssertRC(rc);
1362 if (RT_SUCCESS(rc))
1363#else
1364 RT_NOREF_PV(pVCpu);
1365 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1366 if (SUCCEEDED(hrc))
1367#endif
1368 {
1369 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1370 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1371 }
1372 else
1373 {
1374#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1375 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1376#else
1377 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1378 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1379 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1380#endif
1381 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1382 }
1383 if (pVM->nem.s.cMappedPages > 0)
1384 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1385 return VINF_SUCCESS;
1386}
1387
1388
1389/**
1390 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1391 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1392 */
1393typedef struct NEMHCWINHMACPCCSTATE
1394{
1395 /** Input: Write access. */
1396 bool fWriteAccess;
1397 /** Output: Set if we did something. */
1398 bool fDidSomething;
1399 /** Output: Set it we should resume. */
1400 bool fCanResume;
1401} NEMHCWINHMACPCCSTATE;
1402
1403/**
1404 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1405 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1406 * NEMHCWINHMACPCCSTATE structure. }
1407 */
1408NEM_TMPL_STATIC DECLCALLBACK(int)
1409nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1410{
1411 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1412 pState->fDidSomething = false;
1413 pState->fCanResume = false;
1414
1415 /* If A20 is disabled, we may need to make another query on the masked
1416 page to get the correct protection information. */
1417 uint8_t u2State = pInfo->u2NemState;
1418 RTGCPHYS GCPhysSrc;
1419 if ( pVM->nem.s.fA20Enabled
1420 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1421 GCPhysSrc = GCPhys;
1422 else
1423 {
1424 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1425 PGMPHYSNEMPAGEINFO Info2;
1426 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1427 AssertRCReturn(rc, rc);
1428
1429 *pInfo = Info2;
1430 pInfo->u2NemState = u2State;
1431 }
1432
1433 /*
1434 * Consolidate current page state with actual page protection and access type.
1435 * We don't really consider downgrades here, as they shouldn't happen.
1436 */
1437#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1438 /** @todo Someone at microsoft please explain:
1439 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1440 * readonly page as writable (unmap, then map again). Specifically, this was an
1441 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1442 * a hope to work around that we no longer pre-map anything, just unmap stuff
1443 * and do it lazily here. And here we will first unmap, restart, and then remap
1444 * with new protection or backing.
1445 */
1446#endif
1447 int rc;
1448 switch (u2State)
1449 {
1450 case NEM_WIN_PAGE_STATE_UNMAPPED:
1451 case NEM_WIN_PAGE_STATE_NOT_SET:
1452 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1453 {
1454 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1455 return VINF_SUCCESS;
1456 }
1457
1458 /* Don't bother remapping it if it's a write request to a non-writable page. */
1459 if ( pState->fWriteAccess
1460 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1461 {
1462 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1463 return VINF_SUCCESS;
1464 }
1465
1466 /* Map the page. */
1467 rc = nemHCNativeSetPhysPage(pVM,
1468 pVCpu,
1469 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1470 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1471 pInfo->fNemProt,
1472 &u2State,
1473 true /*fBackingState*/);
1474 pInfo->u2NemState = u2State;
1475 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1476 GCPhys, g_apszPageStates[u2State], rc));
1477 pState->fDidSomething = true;
1478 pState->fCanResume = true;
1479 return rc;
1480
1481 case NEM_WIN_PAGE_STATE_READABLE:
1482 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1483 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1484 {
1485 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1486 return VINF_SUCCESS;
1487 }
1488
1489#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1490 /* Upgrade page to writable. */
1491/** @todo test this*/
1492 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1493 && pState->fWriteAccess)
1494 {
1495 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1496 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1497 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1498 AssertRC(rc);
1499 if (RT_SUCCESS(rc))
1500 {
1501 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1502 pState->fDidSomething = true;
1503 pState->fCanResume = true;
1504 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1505 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1506 }
1507 }
1508 else
1509 {
1510 /* Need to emulate the acces. */
1511 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1512 rc = VINF_SUCCESS;
1513 }
1514 return rc;
1515#else
1516 break;
1517#endif
1518
1519 case NEM_WIN_PAGE_STATE_WRITABLE:
1520 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1521 {
1522 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1523 return VINF_SUCCESS;
1524 }
1525#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1526 AssertFailed(); /* There should be no downgrades. */
1527#endif
1528 break;
1529
1530 default:
1531 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1532 }
1533
1534 /*
1535 * Unmap and restart the instruction.
1536 * If this fails, which it does every so often, just unmap everything for now.
1537 */
1538#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1539 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1540 AssertRC(rc);
1541 if (RT_SUCCESS(rc))
1542#else
1543 /** @todo figure out whether we mess up the state or if it's WHv. */
1544 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1545 if (SUCCEEDED(hrc))
1546#endif
1547 {
1548 pState->fDidSomething = true;
1549 pState->fCanResume = true;
1550 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1551 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1552 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1553 return VINF_SUCCESS;
1554 }
1555#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1556 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1557 return rc;
1558#else
1559 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1560 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1561 pVM->nem.s.cMappedPages));
1562
1563 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1564 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1565
1566 pState->fDidSomething = true;
1567 pState->fCanResume = true;
1568 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1569 return VINF_SUCCESS;
1570#endif
1571}
1572
1573
1574
1575#if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1576/**
1577 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and
1578 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses.
1579 *
1580 * @returns VBox strict status code.
1581 * @param pGVM The global (ring-0) VM structure.
1582 * @param pGVCpu The global (ring-0) per CPU structure.
1583 * @param pCtx The CPU context to import into.
1584 * @param fWhat What to import.
1585 * @param pszCaller Who is doing the importing.
1586 */
1587DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller)
1588{
1589 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat);
1590 if (RT_SUCCESS(rc))
1591 {
1592 Assert(rc == VINF_SUCCESS);
1593 return VINF_SUCCESS;
1594 }
1595
1596 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB || rc == VERR_NEM_UPDATE_APIC_BASE)
1597 {
1598 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1599 return -rc;
1600 }
1601 RT_NOREF(pszCaller);
1602 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1603}
1604#endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/
1605
1606#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
1607/**
1608 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1609 *
1610 * Unlike the wrapped APIs, this checks whether it's necessary.
1611 *
1612 * @returns VBox strict status code.
1613 * @param pGVM The global (ring-0) VM structure.
1614 * @param pGVCpu The global (ring-0) per CPU structure.
1615 * @param pCtx The CPU context to import into.
1616 * @param fWhat What to import.
1617 * @param pszCaller Who is doing the importing.
1618 */
1619DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx,
1620 uint64_t fWhat, const char *pszCaller)
1621{
1622 if (pCtx->fExtrn & fWhat)
1623 {
1624#ifdef IN_RING0
1625 RT_NOREF(pVCpu);
1626 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller);
1627#else
1628 RT_NOREF(pGVCpu, pszCaller);
1629 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, fWhat);
1630 AssertRCReturn(rc, rc);
1631#endif
1632 }
1633 return VINF_SUCCESS;
1634}
1635#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */
1636
1637#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1638/**
1639 * Copies register state from the X64 intercept message header.
1640 *
1641 * ASSUMES no state copied yet.
1642 *
1643 * @param pVCpu The cross context per CPU structure.
1644 * @param pCtx The registe rcontext.
1645 * @param pHdr The X64 intercept message header.
1646 * @sa nemR3WinCopyStateFromX64Header
1647 */
1648DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1649{
1650 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1651 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1652 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment);
1653 pCtx->rip = pHdr->Rip;
1654 pCtx->rflags.u = pHdr->Rflags;
1655
1656 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1657 if (!pHdr->ExecutionState.InterruptShadow)
1658 {
1659 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1660 { /* likely */ }
1661 else
1662 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1663 }
1664 else
1665 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1666
1667 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1668}
1669#elif defined(IN_RING3)
1670/**
1671 * Copies register state from the (common) exit context.
1672 *
1673 * ASSUMES no state copied yet.
1674 *
1675 * @param pVCpu The cross context per CPU structure.
1676 * @param pCtx The registe rcontext.
1677 * @param pExitCtx The common exit context.
1678 * @sa nemHCWinCopyStateFromX64Header
1679 */
1680DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1681{
1682 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1683 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1684 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs);
1685 pCtx->rip = pExitCtx->Rip;
1686 pCtx->rflags.u = pExitCtx->Rflags;
1687
1688 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1689 if (!pExitCtx->ExecutionState.InterruptShadow)
1690 {
1691 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1692 { /* likely */ }
1693 else
1694 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1695 }
1696 else
1697 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1698
1699 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1700}
1701#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1702
1703
1704#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1705/**
1706 * Deals with memory intercept message.
1707 *
1708 * @returns Strict VBox status code.
1709 * @param pVM The cross context VM structure.
1710 * @param pVCpu The cross context per CPU structure.
1711 * @param pMsg The message.
1712 * @param pCtx The register context.
1713 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1714 * @sa nemR3WinHandleExitMemory
1715 */
1716NEM_TMPL_STATIC VBOXSTRICTRC
1717nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1718{
1719 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1720 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1721 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1722 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1723
1724 /*
1725 * Whatever we do, we must clear pending event injection upon resume.
1726 */
1727 if (pMsg->Header.ExecutionState.InterruptionPending)
1728 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1729
1730#if 0 /* Experiment: 20K -> 34K exit/s. */
1731 if ( pMsg->Header.ExecutionState.EferLma
1732 && pMsg->Header.CsSegment.Long
1733 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1734 {
1735 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1736 && pMsg->InstructionBytes[0] == 0x89
1737 && pMsg->InstructionBytes[1] == 0x03)
1738 {
1739 pCtx->rip = pMsg->Header.Rip + 2;
1740 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
1741 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1742 //Log(("%RX64 msg:\n%.80Rhxd\n", pCtx->rip, pMsg));
1743 return VINF_SUCCESS;
1744 }
1745 }
1746#endif
1747
1748 /*
1749 * Ask PGM for information about the given GCPhys. We need to check if we're
1750 * out of sync first.
1751 */
1752 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1753 PGMPHYSNEMPAGEINFO Info;
1754 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1755 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1756 if (RT_SUCCESS(rc))
1757 {
1758 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1759 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1760 {
1761 if (State.fCanResume)
1762 {
1763 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1764 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1765 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1766 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1767 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1768 return VINF_SUCCESS;
1769 }
1770 }
1771 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1772 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1773 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1774 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1775 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1776 }
1777 else
1778 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1779 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1780 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
1781 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1782
1783 /*
1784 * Emulate the memory access, either access handler or special memory.
1785 */
1786 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1787 VBOXSTRICTRC rcStrict;
1788# ifdef IN_RING0
1789 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx,
1790 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES, "MemExit");
1791 if (rcStrict != VINF_SUCCESS)
1792 return rcStrict;
1793# else
1794 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1795 AssertRCReturn(rc, rc);
1796 NOREF(pGVCpu);
1797# endif
1798
1799 if (pMsg->Reserved1)
1800 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
1801 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
1802 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
1803 //if (pMsg->InstructionByteCount > 0)
1804 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1805
1806 if (pMsg->InstructionByteCount > 0)
1807 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip,
1808 pMsg->InstructionBytes, pMsg->InstructionByteCount);
1809 else
1810 rcStrict = IEMExecOne(pVCpu);
1811 /** @todo do we need to do anything wrt debugging here? */
1812 return rcStrict;
1813}
1814#elif defined(IN_RING3)
1815/**
1816 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1817 *
1818 * @returns Strict VBox status code.
1819 * @param pVM The cross context VM structure.
1820 * @param pVCpu The cross context per CPU structure.
1821 * @param pExit The VM exit information to handle.
1822 * @param pCtx The register context.
1823 * @sa nemHCWinHandleMessageMemory
1824 */
1825NEM_TMPL_STATIC VBOXSTRICTRC
1826nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
1827{
1828 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
1829 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
1830
1831 /*
1832 * Whatever we do, we must clear pending event injection upon resume.
1833 */
1834 if (pExit->VpContext.ExecutionState.InterruptionPending)
1835 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1836
1837 /*
1838 * Ask PGM for information about the given GCPhys. We need to check if we're
1839 * out of sync first.
1840 */
1841 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
1842 PGMPHYSNEMPAGEINFO Info;
1843 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1844 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1845 if (RT_SUCCESS(rc))
1846 {
1847 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
1848 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1849 {
1850 if (State.fCanResume)
1851 {
1852 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1853 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1854 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1855 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1856 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1857 return VINF_SUCCESS;
1858 }
1859 }
1860 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1861 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1862 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1863 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1864 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1865 }
1866 else
1867 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1868 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1869 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
1870 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1871
1872 /*
1873 * Emulate the memory access, either access handler or special memory.
1874 */
1875 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
1876 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1877 AssertRCReturn(rc, rc);
1878
1879 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
1880 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
1881 //if (pMsg->InstructionByteCount > 0)
1882 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1883
1884 VBOXSTRICTRC rcStrict;
1885 if (pExit->MemoryAccess.InstructionByteCount > 0)
1886 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
1887 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
1888 else
1889 rcStrict = IEMExecOne(pVCpu);
1890 /** @todo do we need to do anything wrt debugging here? */
1891 return rcStrict;
1892}
1893#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1894
1895
1896#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1897/**
1898 * Deals with I/O port intercept message.
1899 *
1900 * @returns Strict VBox status code.
1901 * @param pVM The cross context VM structure.
1902 * @param pVCpu The cross context per CPU structure.
1903 * @param pMsg The message.
1904 * @param pCtx The register context.
1905 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1906 */
1907NEM_TMPL_STATIC VBOXSTRICTRC
1908nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1909{
1910 Assert( pMsg->AccessInfo.AccessSize == 1
1911 || pMsg->AccessInfo.AccessSize == 2
1912 || pMsg->AccessInfo.AccessSize == 4);
1913 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1914 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
1915 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1916
1917 /*
1918 * Whatever we do, we must clear pending event injection upon resume.
1919 */
1920 if (pMsg->Header.ExecutionState.InterruptionPending)
1921 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1922
1923 VBOXSTRICTRC rcStrict;
1924 if (!pMsg->AccessInfo.StringOp)
1925 {
1926 /*
1927 * Simple port I/O.
1928 */
1929 static uint32_t const s_fAndMask[8] =
1930 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
1931 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
1932
1933 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1934 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1935 {
1936 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
1937 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
1938 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1939 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
1940 if (IOM_SUCCESS(rcStrict))
1941 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
1942# ifdef IN_RING0
1943 else if ( rcStrict == VINF_IOM_R3_IOPORT_WRITE
1944 && !pCtx->rflags.Bits.u1TF
1945 /** @todo check for debug breakpoints */ )
1946 return EMRZSetPendingIoPortWrite(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
1947 pMsg->AccessInfo.AccessSize, (uint32_t)pMsg->Rax & fAndMask);
1948# endif
1949 else
1950 {
1951 pCtx->rax = pMsg->Rax;
1952 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
1953 }
1954 }
1955 else
1956 {
1957 uint32_t uValue = 0;
1958 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
1959 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
1960 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1961 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
1962 if (IOM_SUCCESS(rcStrict))
1963 {
1964 if (pMsg->AccessInfo.AccessSize != 4)
1965 pCtx->rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
1966 else
1967 pCtx->rax = uValue;
1968 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
1969 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax));
1970 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
1971 }
1972 else
1973 {
1974 pCtx->rax = pMsg->Rax;
1975 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
1976# ifdef IN_RING0
1977 if ( rcStrict == VINF_IOM_R3_IOPORT_READ
1978 && !pCtx->rflags.Bits.u1TF
1979 /** @todo check for debug breakpoints */ )
1980 return EMRZSetPendingIoPortRead(pVCpu, pMsg->PortNumber, pMsg->Header.InstructionLength,
1981 pMsg->AccessInfo.AccessSize);
1982# endif
1983 }
1984 }
1985 }
1986 else
1987 {
1988 /*
1989 * String port I/O.
1990 */
1991 /** @todo Someone at Microsoft please explain how we can get the address mode
1992 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
1993 * getting the default mode, it can always be overridden by a prefix. This
1994 * forces us to interpret the instruction from opcodes, which is suboptimal.
1995 * Both AMD-V and VT-x includes the address size in the exit info, at least on
1996 * CPUs that are reasonably new.
1997 *
1998 * Of course, it's possible this is an undocumented and we just need to do some
1999 * experiments to figure out how it's communicated. Alternatively, we can scan
2000 * the opcode bytes for possible evil prefixes.
2001 */
2002 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2003 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2004 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2005 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2006 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
2007 pCtx->rax = pMsg->Rax;
2008 pCtx->rcx = pMsg->Rcx;
2009 pCtx->rdi = pMsg->Rdi;
2010 pCtx->rsi = pMsg->Rsi;
2011# ifdef IN_RING0
2012 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2013 if (rcStrict != VINF_SUCCESS)
2014 return rcStrict;
2015# else
2016 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2017 AssertRCReturn(rc, rc);
2018 RT_NOREF(pGVCpu);
2019# endif
2020
2021 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2022 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2023 pMsg->AccessInfo.RepPrefix ? "REP " : "",
2024 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
2025 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
2026 rcStrict = IEMExecOne(pVCpu);
2027 }
2028 if (IOM_SUCCESS(rcStrict))
2029 {
2030 /*
2031 * Do debug checks.
2032 */
2033 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2034 || (pMsg->Header.Rflags & X86_EFL_TF)
2035 || DBGFBpIsHwIoArmed(pVM) )
2036 {
2037 /** @todo Debugging. */
2038 }
2039 }
2040 return rcStrict;
2041}
2042#elif defined(IN_RING3)
2043/**
2044 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
2045 *
2046 * @returns Strict VBox status code.
2047 * @param pVM The cross context VM structure.
2048 * @param pVCpu The cross context per CPU structure.
2049 * @param pExit The VM exit information to handle.
2050 * @param pCtx The register context.
2051 * @sa nemHCWinHandleMessageIoPort
2052 */
2053NEM_TMPL_STATIC VBOXSTRICTRC
2054nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2055{
2056 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2057 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2058 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2059 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2060
2061 /*
2062 * Whatever we do, we must clear pending event injection upon resume.
2063 */
2064 if (pExit->VpContext.ExecutionState.InterruptionPending)
2065 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2066
2067 VBOXSTRICTRC rcStrict;
2068 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2069 {
2070 /*
2071 * Simple port I/O.
2072 */
2073 static uint32_t const s_fAndMask[8] =
2074 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2075 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2076 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2077 {
2078 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2079 pExit->IoPortAccess.AccessInfo.AccessSize);
2080 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2081 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2082 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2083 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2084 if (IOM_SUCCESS(rcStrict))
2085 {
2086 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2087 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2088 }
2089 }
2090 else
2091 {
2092 uint32_t uValue = 0;
2093 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue, pExit->IoPortAccess.AccessInfo.AccessSize);
2094 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2095 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2096 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2097 if (IOM_SUCCESS(rcStrict))
2098 {
2099 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2100 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2101 else
2102 pCtx->rax = uValue;
2103 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2104 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax));
2105 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2106 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2107 }
2108 }
2109 }
2110 else
2111 {
2112 /*
2113 * String port I/O.
2114 */
2115 /** @todo Someone at Microsoft please explain how we can get the address mode
2116 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2117 * getting the default mode, it can always be overridden by a prefix. This
2118 * forces us to interpret the instruction from opcodes, which is suboptimal.
2119 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2120 * CPUs that are reasonably new.
2121 *
2122 * Of course, it's possible this is an undocumented and we just need to do some
2123 * experiments to figure out how it's communicated. Alternatively, we can scan
2124 * the opcode bytes for possible evil prefixes.
2125 */
2126 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2127 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2128 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2129 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2130 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2131 pCtx->rax = pExit->IoPortAccess.Rax;
2132 pCtx->rcx = pExit->IoPortAccess.Rcx;
2133 pCtx->rdi = pExit->IoPortAccess.Rdi;
2134 pCtx->rsi = pExit->IoPortAccess.Rsi;
2135# ifdef IN_RING0
2136 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2137 if (rcStrict != VINF_SUCCESS)
2138 return rcStrict;
2139# else
2140 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2141 AssertRCReturn(rc, rc);
2142# endif
2143
2144 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2145 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2146 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2147 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2148 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2149 rcStrict = IEMExecOne(pVCpu);
2150 }
2151 if (IOM_SUCCESS(rcStrict))
2152 {
2153 /*
2154 * Do debug checks.
2155 */
2156 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2157 || (pExit->VpContext.Rflags & X86_EFL_TF)
2158 || DBGFBpIsHwIoArmed(pVM) )
2159 {
2160 /** @todo Debugging. */
2161 }
2162 }
2163 return rcStrict;
2164
2165}
2166#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2167
2168
2169#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2170/**
2171 * Deals with interrupt window message.
2172 *
2173 * @returns Strict VBox status code.
2174 * @param pVM The cross context VM structure.
2175 * @param pVCpu The cross context per CPU structure.
2176 * @param pMsg The message.
2177 * @param pCtx The register context.
2178 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2179 * @sa nemR3WinHandleExitInterruptWindow
2180 */
2181NEM_TMPL_STATIC VBOXSTRICTRC
2182nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg,
2183 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2184{
2185 /*
2186 * Assert message sanity.
2187 */
2188 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2189 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2190 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2191 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2192 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2193
2194 /*
2195 * Just copy the state we've got and handle it in the loop for now.
2196 */
2197 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2198 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2199 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2200 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2201
2202 /** @todo call nemHCWinHandleInterruptFF */
2203 RT_NOREF(pVM, pGVCpu);
2204 return VINF_SUCCESS;
2205}
2206#elif defined(IN_RING3)
2207/**
2208 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2209 *
2210 * @returns Strict VBox status code.
2211 * @param pVM The cross context VM structure.
2212 * @param pVCpu The cross context per CPU structure.
2213 * @param pExit The VM exit information to handle.
2214 * @param pCtx The register context.
2215 * @sa nemHCWinHandleMessageInterruptWindow
2216 */
2217NEM_TMPL_STATIC VBOXSTRICTRC
2218nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2219{
2220 /*
2221 * Assert message sanity.
2222 */
2223 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2224 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2225 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2226 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2227
2228 /*
2229 * Just copy the state we've got and handle it in the loop for now.
2230 */
2231 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2232 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2233 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2234 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2235 pExit->VpContext.ExecutionState.InterruptShadow));
2236
2237 /** @todo call nemHCWinHandleInterruptFF */
2238 RT_NOREF(pVM);
2239 return VINF_SUCCESS;
2240}
2241#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2242
2243#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2244/**
2245 * Deals with CPUID intercept message.
2246 *
2247 * @returns Strict VBox status code.
2248 * @param pVCpu The cross context per CPU structure.
2249 * @param pMsg The message.
2250 * @param pCtx The register context.
2251 */
2252NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx)
2253{
2254 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2255
2256 /*
2257 * Soak up state and execute the instruction.
2258 *
2259 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2260 * function and make everyone use it.
2261 */
2262 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2263 * only get weirder with nested VT-x and AMD-V support. */
2264 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2265
2266 /* Copy in the low register values (top is always cleared). */
2267 pCtx->rax = (uint32_t)pMsg->Rax;
2268 pCtx->rcx = (uint32_t)pMsg->Rcx;
2269 pCtx->rdx = (uint32_t)pMsg->Rdx;
2270 pCtx->rbx = (uint32_t)pMsg->Rbx;
2271 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2272
2273 /* Get the correct values. */
2274 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2275
2276 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2277 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2278 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2279 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2280 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2281
2282 /* Move RIP and we're done. */
2283 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2284
2285 return VINF_SUCCESS;
2286}
2287#elif defined(IN_RING3)
2288/**
2289 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2290 *
2291 * @returns Strict VBox status code.
2292 * @param pVM The cross context VM structure.
2293 * @param pVCpu The cross context per CPU structure.
2294 * @param pExit The VM exit information to handle.
2295 * @param pCtx The register context.
2296 * @sa nemHCWinHandleMessageInterruptWindow
2297 */
2298NEM_TMPL_STATIC VBOXSTRICTRC
2299nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2300{
2301 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2302
2303 /*
2304 * Soak up state and execute the instruction.
2305 *
2306 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2307 * function and make everyone use it.
2308 */
2309 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2310 * only get weirder with nested VT-x and AMD-V support. */
2311 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2312
2313 /* Copy in the low register values (top is always cleared). */
2314 pCtx->rax = (uint32_t)pExit->CpuidAccess.Rax;
2315 pCtx->rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2316 pCtx->rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2317 pCtx->rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2318 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2319
2320 /* Get the correct values. */
2321 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2322
2323 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2324 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2325 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2326 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2327 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2328
2329 /* Move RIP and we're done. */
2330 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2331
2332 RT_NOREF_PV(pVM);
2333 return VINF_SUCCESS;
2334}
2335#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2336
2337#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2338/**
2339 * Deals with MSR intercept message.
2340 *
2341 * @returns Strict VBox status code.
2342 * @param pVCpu The cross context per CPU structure.
2343 * @param pMsg The message.
2344 * @param pCtx The register context.
2345 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2346 * @sa nemR3WinHandleExitMsr
2347 */
2348NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg,
2349 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2350{
2351 /*
2352 * A wee bit of sanity first.
2353 */
2354 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2355 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2356 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2357
2358 /*
2359 * Check CPL as that's common to both RDMSR and WRMSR.
2360 */
2361 VBOXSTRICTRC rcStrict;
2362 if (pMsg->Header.ExecutionState.Cpl == 0)
2363 {
2364 /*
2365 * Get all the MSR state. Since we're getting EFER, we also need to
2366 * get CR0, CR4 and CR3.
2367 */
2368 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2369 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2370 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2371 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2372 "MSRs");
2373 if (rcStrict == VINF_SUCCESS)
2374 {
2375
2376 /*
2377 * Handle writes.
2378 */
2379 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2380 {
2381 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2382 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2383 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2384 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2385 if (rcStrict == VINF_SUCCESS)
2386 {
2387 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2388 return VINF_SUCCESS;
2389 }
2390# ifndef IN_RING3
2391 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2392 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2393 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2394 return rcStrict;
2395# else
2396 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2397 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2398 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2399# endif
2400 }
2401 /*
2402 * Handle reads.
2403 */
2404 else
2405 {
2406 uint64_t uValue = 0;
2407 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2408 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2409 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2410 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2411 if (rcStrict == VINF_SUCCESS)
2412 {
2413 pCtx->rax = (uint32_t)uValue;
2414 pCtx->rdx = uValue >> 32;
2415 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2416 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2417 return VINF_SUCCESS;
2418 }
2419# ifndef IN_RING3
2420 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2421 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2422 rcStrict = VINF_CPUM_R3_MSR_READ;
2423 return rcStrict;
2424# else
2425 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2426 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2427 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2428# endif
2429 }
2430 }
2431 else
2432 {
2433 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2434 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2435 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2436 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2437 return rcStrict;
2438 }
2439 }
2440 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2441 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2442 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2443 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2444 else
2445 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2446 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2447 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2448
2449 /*
2450 * If we get down here, we're supposed to #GP(0).
2451 */
2452 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2453 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2454 if (rcStrict == VINF_SUCCESS)
2455 {
2456 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2457 if (rcStrict == VINF_IEM_RAISED_XCPT)
2458 rcStrict = VINF_SUCCESS;
2459 else if (rcStrict != VINF_SUCCESS)
2460 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2461 }
2462 return rcStrict;
2463}
2464#elif defined(IN_RING3)
2465/**
2466 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2467 *
2468 * @returns Strict VBox status code.
2469 * @param pVM The cross context VM structure.
2470 * @param pVCpu The cross context per CPU structure.
2471 * @param pExit The VM exit information to handle.
2472 * @param pCtx The register context.
2473 * @sa nemHCWinHandleMessageMsr
2474 */
2475NEM_TMPL_STATIC VBOXSTRICTRC
2476nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2477{
2478 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2479
2480 /*
2481 * Check CPL as that's common to both RDMSR and WRMSR.
2482 */
2483 VBOXSTRICTRC rcStrict;
2484 if (pExit->VpContext.ExecutionState.Cpl == 0)
2485 {
2486 /*
2487 * Get all the MSR state. Since we're getting EFER, we also need to
2488 * get CR0, CR4 and CR3.
2489 */
2490 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2491 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2492 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2493 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2494 "MSRs");
2495 if (rcStrict == VINF_SUCCESS)
2496 {
2497 /*
2498 * Handle writes.
2499 */
2500 if (pExit->MsrAccess.AccessInfo.IsWrite)
2501 {
2502 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2503 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2504 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2505 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2506 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2507 if (rcStrict == VINF_SUCCESS)
2508 {
2509 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2510 return VINF_SUCCESS;
2511 }
2512 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
2513 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2514 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
2515 VBOXSTRICTRC_VAL(rcStrict) ));
2516 }
2517 /*
2518 * Handle reads.
2519 */
2520 else
2521 {
2522 uint64_t uValue = 0;
2523 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
2524 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
2525 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2526 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2527 if (rcStrict == VINF_SUCCESS)
2528 {
2529 pCtx->rax = (uint32_t)uValue;
2530 pCtx->rdx = uValue >> 32;
2531 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2532 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2533 return VINF_SUCCESS;
2534 }
2535 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2536 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2537 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2538 }
2539 }
2540 else
2541 {
2542 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2543 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2544 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2545 return rcStrict;
2546 }
2547 }
2548 else if (pExit->MsrAccess.AccessInfo.IsWrite)
2549 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2550 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2551 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
2552 else
2553 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2554 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2555 pExit->MsrAccess.MsrNumber));
2556
2557 /*
2558 * If we get down here, we're supposed to #GP(0).
2559 */
2560 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2561 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL_MSRS, "MSR");
2562 if (rcStrict == VINF_SUCCESS)
2563 {
2564 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2565 if (rcStrict == VINF_IEM_RAISED_XCPT)
2566 rcStrict = VINF_SUCCESS;
2567 else if (rcStrict != VINF_SUCCESS)
2568 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2569 }
2570
2571 RT_NOREF_PV(pVM);
2572 return rcStrict;
2573}
2574#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2575
2576
2577/**
2578 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
2579 * checks if the given opcodes are of interest at all.
2580 *
2581 * @returns true if interesting, false if not.
2582 * @param cbOpcodes Number of opcode bytes available.
2583 * @param pbOpcodes The opcode bytes.
2584 * @param f64BitMode Whether we're in 64-bit mode.
2585 */
2586DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
2587{
2588 /*
2589 * Currently only interested in VMCALL and VMMCALL.
2590 */
2591 while (cbOpcodes >= 3)
2592 {
2593 switch (pbOpcodes[0])
2594 {
2595 case 0x0f:
2596 switch (pbOpcodes[1])
2597 {
2598 case 0x01:
2599 switch (pbOpcodes[2])
2600 {
2601 case 0xc1: /* 0f 01 c1 VMCALL */
2602 return true;
2603 case 0xd9: /* 0f 01 d9 VMMCALL */
2604 return true;
2605 default:
2606 break;
2607 }
2608 break;
2609 }
2610 break;
2611
2612 default:
2613 return false;
2614
2615 /* prefixes */
2616 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
2617 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
2618 if (!f64BitMode)
2619 return false;
2620 RT_FALL_THRU();
2621 case X86_OP_PRF_CS:
2622 case X86_OP_PRF_SS:
2623 case X86_OP_PRF_DS:
2624 case X86_OP_PRF_ES:
2625 case X86_OP_PRF_FS:
2626 case X86_OP_PRF_GS:
2627 case X86_OP_PRF_SIZE_OP:
2628 case X86_OP_PRF_SIZE_ADDR:
2629 case X86_OP_PRF_LOCK:
2630 case X86_OP_PRF_REPZ:
2631 case X86_OP_PRF_REPNZ:
2632 cbOpcodes--;
2633 pbOpcodes++;
2634 continue;
2635 }
2636 break;
2637 }
2638 return false;
2639}
2640
2641
2642#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2643/**
2644 * Copies state included in a exception intercept message.
2645 *
2646 * @param pVCpu The cross context per CPU structure.
2647 * @param pMsg The message.
2648 * @param pCtx The register context.
2649 * @param fClearXcpt Clear pending exception.
2650 */
2651DECLINLINE(void) nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg,
2652 PCPUMCTX pCtx, bool fClearXcpt)
2653{
2654 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2655 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
2656 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
2657 pCtx->rax = pMsg->Rax;
2658 pCtx->rcx = pMsg->Rcx;
2659 pCtx->rdx = pMsg->Rdx;
2660 pCtx->rbx = pMsg->Rbx;
2661 pCtx->rsp = pMsg->Rsp;
2662 pCtx->rbp = pMsg->Rbp;
2663 pCtx->rsi = pMsg->Rsi;
2664 pCtx->rdi = pMsg->Rdi;
2665 pCtx->r8 = pMsg->R8;
2666 pCtx->r9 = pMsg->R9;
2667 pCtx->r10 = pMsg->R10;
2668 pCtx->r11 = pMsg->R11;
2669 pCtx->r12 = pMsg->R12;
2670 pCtx->r13 = pMsg->R13;
2671 pCtx->r14 = pMsg->R14;
2672 pCtx->r15 = pMsg->R15;
2673 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2674 NEM_WIN_COPY_BACK_SEG(pCtx->ss, pMsg->SsSegment);
2675}
2676#elif defined(IN_RING3)
2677/**
2678 * Copies state included in a exception intercept exit.
2679 *
2680 * @param pVCpu The cross context per CPU structure.
2681 * @param pExit The VM exit information.
2682 * @param pCtx The register context.
2683 * @param fClearXcpt Clear pending exception.
2684 */
2685DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit,
2686 PCPUMCTX pCtx, bool fClearXcpt)
2687{
2688 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2689 if (fClearXcpt)
2690 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2691}
2692#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2693
2694
2695#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2696/**
2697 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
2698 *
2699 * @returns Strict VBox status code.
2700 * @param pVCpu The cross context per CPU structure.
2701 * @param pMsg The message.
2702 * @param pCtx The register context.
2703 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2704 * @sa nemR3WinHandleExitMsr
2705 */
2706NEM_TMPL_STATIC VBOXSTRICTRC
2707nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
2708{
2709 /*
2710 * Assert sanity.
2711 */
2712 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2713 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2714 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2715 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
2716
2717 /*
2718 * Get most of the register state since we'll end up making IEM inject the
2719 * event. The exception isn't normally flaged as a pending event, so duh.
2720 *
2721 * Note! We can optimize this later with event injection.
2722 */
2723 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n",
2724 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2725 pMsg->ExceptionVector, pMsg->ErrorCode, pMsg->ExceptionParameter));
2726 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/);
2727 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2728 if (pMsg->ExceptionVector == X86_XCPT_DB)
2729 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
2730 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, fWhat, "Xcpt");
2731 if (rcStrict != VINF_SUCCESS)
2732 return rcStrict;
2733
2734 /*
2735 * Handle the intercept.
2736 */
2737 TRPMEVENT enmEvtType = TRPM_TRAP;
2738 switch (pMsg->ExceptionVector)
2739 {
2740 /*
2741 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2742 * and need to turn them over to GIM.
2743 *
2744 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
2745 * #UD for handling non-native hypercall instructions. (IEM will
2746 * decode both and let the GIM provider decide whether to accept it.)
2747 */
2748 case X86_XCPT_UD:
2749 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
2750 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
2751 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
2752 {
2753 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, pMsg->InstructionBytes,
2754 pMsg->InstructionByteCount);
2755 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
2756 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
2757 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
2758 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
2759 return rcStrict;
2760 }
2761 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2762 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
2763 break;
2764
2765 /*
2766 * Filter debug exceptions.
2767 */
2768 case X86_XCPT_DB:
2769 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
2770 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
2771 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header) ));
2772 break;
2773
2774 case X86_XCPT_BP:
2775 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
2776 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2777 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->Header.InstructionLength));
2778 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
2779 break;
2780
2781 /* This shouldn't happen. */
2782 default:
2783 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
2784 }
2785
2786 /*
2787 * Inject it.
2788 */
2789 rcStrict = IEMInjectTrap(pVCpu, pMsg->ExceptionVector, enmEvtType, pMsg->ErrorCode,
2790 pMsg->ExceptionParameter /*??*/, pMsg->Header.InstructionLength);
2791 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
2792 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
2793 nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->ExceptionVector, VBOXSTRICTRC_VAL(rcStrict) ));
2794 return rcStrict;
2795}
2796#elif defined(IN_RING3)
2797/**
2798 * Deals with MSR access exits (WHvRunVpExitReasonException).
2799 *
2800 * @returns Strict VBox status code.
2801 * @param pVM The cross context VM structure.
2802 * @param pVCpu The cross context per CPU structure.
2803 * @param pExit The VM exit information to handle.
2804 * @param pCtx The register context.
2805 * @sa nemR3WinHandleExitException
2806 */
2807NEM_TMPL_STATIC VBOXSTRICTRC
2808nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2809{
2810 /*
2811 * Assert sanity.
2812 */
2813 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2814
2815 /*
2816 * Get most of the register state since we'll end up making IEM inject the
2817 * event. The exception isn't normally flaged as a pending event, so duh.
2818 *
2819 * Note! We can optimize this later with event injection.
2820 */
2821 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x errcd=%#x parm=%RX64\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2822 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType,
2823 pExit->VpException.ErrorCode, pExit->VpException.ExceptionParameter ));
2824 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
2825 uint64_t fWhat = NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM;
2826 if (pExit->VpException.ExceptionType == X86_XCPT_DB)
2827 fWhat |= CPUMCTX_EXTRN_DR0_DR3 | CPUMCTX_EXTRN_DR7 | CPUMCTX_EXTRN_DR6;
2828 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, fWhat, "Xcpt");
2829 if (rcStrict != VINF_SUCCESS)
2830 return rcStrict;
2831
2832 /*
2833 * Handle the intercept.
2834 */
2835 TRPMEVENT enmEvtType = TRPM_TRAP;
2836 switch (pExit->VpException.ExceptionType)
2837 {
2838 /*
2839 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2840 * and need to turn them over to GIM.
2841 *
2842 * Note! We do not check fGIMTrapXcptUD here ASSUMING that GIM only wants
2843 * #UD for handling non-native hypercall instructions. (IEM will
2844 * decode both and let the GIM provider decide whether to accept it.)
2845 */
2846 case X86_XCPT_UD:
2847 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUd);
2848 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
2849 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
2850 {
2851 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
2852 pExit->VpException.InstructionBytes,
2853 pExit->VpException.InstructionByteCount);
2854 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
2855 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2856 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
2857 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionUdHandled);
2858 return rcStrict;
2859 }
2860
2861 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
2862 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2863 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
2864 break;
2865
2866 /*
2867 * Filter debug exceptions.
2868 */
2869 case X86_XCPT_DB:
2870 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionDb);
2871 Log4(("XcptExit/%u: %04x:%08RX64/%s: #DB - TODO\n",
2872 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext) ));
2873 break;
2874
2875 case X86_XCPT_BP:
2876 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitExceptionBp);
2877 Log4(("XcptExit/%u: %04x:%08RX64/%s: #BP - TODO - %u\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2878 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.InstructionLength));
2879 enmEvtType = TRPM_SOFTWARE_INT; /* We're at the INT3 instruction, not after it. */
2880 break;
2881
2882 /* This shouldn't happen. */
2883 default:
2884 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
2885 }
2886
2887 /*
2888 * Inject it.
2889 */
2890 rcStrict = IEMInjectTrap(pVCpu, pExit->VpException.ExceptionType, enmEvtType, pExit->VpException.ErrorCode,
2891 pExit->VpException.ExceptionParameter /*??*/, pExit->VpContext.InstructionLength);
2892 Log4(("XcptExit/%u: %04x:%08RX64/%s: %#u -> injected -> %Rrc\n",
2893 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2894 nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType, VBOXSTRICTRC_VAL(rcStrict) ));
2895
2896 RT_NOREF_PV(pVM);
2897 return rcStrict;
2898}
2899#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2900
2901
2902#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2903/**
2904 * Deals with unrecoverable exception (triple fault).
2905 *
2906 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
2907 * here too. So we'll leave it to IEM to decide.
2908 *
2909 * @returns Strict VBox status code.
2910 * @param pVCpu The cross context per CPU structure.
2911 * @param pMsgHdr The message header.
2912 * @param pCtx The register context.
2913 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2914 * @sa nemR3WinHandleExitUnrecoverableException
2915 */
2916NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
2917 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
2918 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2919{
2920 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength));
2921
2922# if 0
2923 /*
2924 * Just copy the state we've got and handle it in the loop for now.
2925 */
2926 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2927 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
2928 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
2929 return VINF_EM_TRIPLE_FAULT;
2930# else
2931 /*
2932 * Let IEM decide whether this is really it.
2933 */
2934 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2935 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2936 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
2937 if (rcStrict == VINF_SUCCESS)
2938 {
2939 rcStrict = IEMExecOne(pVCpu);
2940 if (rcStrict == VINF_SUCCESS)
2941 {
2942 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2943 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
2944 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
2945 return VINF_SUCCESS;
2946 }
2947 if (rcStrict == VINF_EM_TRIPLE_FAULT)
2948 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2949 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2950 else
2951 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2952 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2953 }
2954 else
2955 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2956 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2957 return rcStrict;
2958# endif
2959}
2960#elif defined(IN_RING3)
2961/**
2962 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2963 *
2964 * @returns Strict VBox status code.
2965 * @param pVM The cross context VM structure.
2966 * @param pVCpu The cross context per CPU structure.
2967 * @param pExit The VM exit information to handle.
2968 * @param pCtx The register context.
2969 * @sa nemHCWinHandleMessageUnrecoverableException
2970 */
2971NEM_TMPL_STATIC VBOXSTRICTRC
2972nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2973{
2974 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2975
2976# if 0
2977 /*
2978 * Just copy the state we've got and handle it in the loop for now.
2979 */
2980 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2981 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2982 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2983 RT_NOREF_PV(pVM);
2984 return VINF_EM_TRIPLE_FAULT;
2985# else
2986 /*
2987 * Let IEM decide whether this is really it.
2988 */
2989 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2990 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2991 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
2992 if (rcStrict == VINF_SUCCESS)
2993 {
2994 rcStrict = IEMExecOne(pVCpu);
2995 if (rcStrict == VINF_SUCCESS)
2996 {
2997 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2998 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2999 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
3000 return VINF_SUCCESS;
3001 }
3002 if (rcStrict == VINF_EM_TRIPLE_FAULT)
3003 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3004 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3005 else
3006 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3007 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3008 }
3009 else
3010 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
3011 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3012 RT_NOREF_PV(pVM);
3013 return rcStrict;
3014# endif
3015
3016}
3017#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3018
3019
3020#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3021/**
3022 * Handles messages (VM exits).
3023 *
3024 * @returns Strict VBox status code.
3025 * @param pVM The cross context VM structure.
3026 * @param pVCpu The cross context per CPU structure.
3027 * @param pMappingHeader The message slot mapping.
3028 * @param pCtx The register context.
3029 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3030 * @sa nemR3WinHandleExit
3031 */
3032NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3033 PCPUMCTX pCtx, PGVMCPU pGVCpu)
3034{
3035 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
3036 {
3037 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
3038 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
3039 switch (pMsg->Header.MessageType)
3040 {
3041 case HvMessageTypeUnmappedGpa:
3042 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3043 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3044 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3045
3046 case HvMessageTypeGpaIntercept:
3047 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
3048 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
3049 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
3050
3051 case HvMessageTypeX64IoPortIntercept:
3052 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
3053 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3054 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu);
3055
3056 case HvMessageTypeX64Halt:
3057 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3058 Log4(("HaltExit\n"));
3059 return VINF_EM_HALT;
3060
3061 case HvMessageTypeX64InterruptWindow:
3062 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
3063 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3064 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu);
3065
3066 case HvMessageTypeX64CpuidIntercept:
3067 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
3068 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3069 return nemHCWinHandleMessageCpuId(pVCpu, &pMsg->X64CpuIdIntercept, pCtx);
3070
3071 case HvMessageTypeX64MsrIntercept:
3072 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
3073 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3074 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pCtx, pGVCpu);
3075
3076 case HvMessageTypeX64ExceptionIntercept:
3077 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
3078 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3079 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pCtx, pGVCpu);
3080
3081 case HvMessageTypeUnrecoverableException:
3082 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
3083 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3084 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
3085
3086 case HvMessageTypeInvalidVpRegisterValue:
3087 case HvMessageTypeUnsupportedFeature:
3088 case HvMessageTypeTlbPageSizeMismatch:
3089 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3090 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
3091 VERR_NEM_IPE_3);
3092
3093 case HvMessageTypeX64ApicEoi:
3094 case HvMessageTypeX64LegacyFpError:
3095 case HvMessageTypeX64RegisterIntercept:
3096 case HvMessageTypeApicEoi:
3097 case HvMessageTypeFerrAsserted:
3098 case HvMessageTypeEventLogBufferComplete:
3099 case HvMessageTimerExpired:
3100 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3101 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3102 VERR_NEM_IPE_3);
3103
3104 default:
3105 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
3106 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3107 VERR_NEM_IPE_3);
3108 }
3109 }
3110 else
3111 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3112 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3113 VERR_NEM_IPE_4);
3114}
3115#elif defined(IN_RING3)
3116/**
3117 * Handles VM exits.
3118 *
3119 * @returns Strict VBox status code.
3120 * @param pVM The cross context VM structure.
3121 * @param pVCpu The cross context per CPU structure.
3122 * @param pExit The VM exit information to handle.
3123 * @param pCtx The register context.
3124 * @sa nemHCWinHandleMessage
3125 */
3126NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3127{
3128 switch (pExit->ExitReason)
3129 {
3130 case WHvRunVpExitReasonMemoryAccess:
3131 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3132 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx);
3133
3134 case WHvRunVpExitReasonX64IoPortAccess:
3135 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3136 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx);
3137
3138 case WHvRunVpExitReasonX64Halt:
3139 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3140 Log4(("HaltExit\n"));
3141 return VINF_EM_HALT;
3142
3143 case WHvRunVpExitReasonCanceled:
3144 return VINF_SUCCESS;
3145
3146 case WHvRunVpExitReasonX64InterruptWindow:
3147 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3148 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx);
3149
3150 case WHvRunVpExitReasonX64Cpuid:
3151 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3152 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit, pCtx);
3153
3154 case WHvRunVpExitReasonX64MsrAccess:
3155 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3156 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx);
3157
3158 case WHvRunVpExitReasonException:
3159 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3160 return nemR3WinHandleExitException(pVM, pVCpu, pExit, pCtx);
3161
3162 case WHvRunVpExitReasonUnrecoverableException:
3163 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3164 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx);
3165
3166 case WHvRunVpExitReasonUnsupportedFeature:
3167 case WHvRunVpExitReasonInvalidVpRegisterValue:
3168 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3169 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3170 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3171
3172 /* Undesired exits: */
3173 case WHvRunVpExitReasonNone:
3174 default:
3175 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3176 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3177 }
3178}
3179#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3180
3181
3182#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3183/**
3184 * Worker for nemHCWinRunGC that stops the execution on the way out.
3185 *
3186 * The CPU was running the last time we checked, no there are no messages that
3187 * needs being marked handled/whatever. Caller checks this.
3188 *
3189 * @returns rcStrict on success, error status on failure.
3190 * @param pVM The cross context VM structure.
3191 * @param pVCpu The cross context per CPU structure.
3192 * @param rcStrict The nemHCWinRunGC return status. This is a little
3193 * bit unnecessary, except in internal error cases,
3194 * since we won't need to stop the CPU if we took an
3195 * exit.
3196 * @param pMappingHeader The message slot mapping.
3197 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3198 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3199 */
3200NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3201 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3202 PGVM pGVM, PGVMCPU pGVCpu)
3203{
3204 /*
3205 * Try stopping the processor. If we're lucky we manage to do this before it
3206 * does another VM exit.
3207 */
3208# ifdef IN_RING0
3209 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3210 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3211 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3212 NULL, 0);
3213 if (NT_SUCCESS(rcNt))
3214 {
3215 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3216 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3217 return rcStrict;
3218 }
3219# else
3220 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3221 if (fRet)
3222 {
3223 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3224 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3225 return rcStrict;
3226 }
3227 RT_NOREF(pGVM, pGVCpu);
3228# endif
3229
3230 /*
3231 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3232 */
3233# ifdef IN_RING0
3234 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3235 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3236# else
3237 DWORD dwErr = RTNtLastErrorValue();
3238 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3239 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3240# endif
3241 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3242 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3243
3244 /*
3245 * First message: Exit or similar, sometimes VidMessageStopRequestComplete.
3246 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3247 */
3248# ifdef IN_RING0
3249 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3250 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3251 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3252 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3253 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3254 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3255 NULL, 0);
3256 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3257 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3258# else
3259 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3260 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3261 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3262 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3263# endif
3264
3265 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3266 if (enmVidMsgType != VidMessageStopRequestComplete)
3267 {
3268 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu);
3269 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3270 rcStrict = rcStrict2;
3271
3272 /*
3273 * Mark it as handled and get the stop request completed message, then mark
3274 * that as handled too. CPU is back into fully stopped stated then.
3275 */
3276# ifdef IN_RING0
3277 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3278 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE;
3279 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3280 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3281 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3282 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3283 NULL, 0);
3284 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3285 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3286# else
3287 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3288 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3289 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3290 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3291# endif
3292
3293 /* It should be a stop request completed message. */
3294 enmVidMsgType = pMappingHeader->enmVidMsgType;
3295 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3296 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3297 enmVidMsgType, pMappingHeader->cbMessage),
3298 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3299
3300 /*
3301 * Mark the VidMessageStopRequestComplete message as handled.
3302 */
3303# ifdef IN_RING0
3304 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3305 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE;
3306 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3307 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3308 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3309 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3310 NULL, 0);
3311 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3312 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3313# else
3314 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3315 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3316 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3317# endif
3318 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3319 }
3320 else
3321 {
3322 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPendingOdd);
3323 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc) - 1st VidMessageSlotHandleAndGetNext got VidMessageStopRequestComplete.\n",
3324 VBOXSTRICTRC_VAL(rcStrict) ));
3325 }
3326 return rcStrict;
3327}
3328#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3329
3330#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
3331
3332/**
3333 * Deals with pending interrupt related force flags, may inject interrupt.
3334 *
3335 * @returns VBox strict status code.
3336 * @param pVM The cross context VM structure.
3337 * @param pVCpu The cross context per CPU structure.
3338 * @param pGVCpu The global (ring-0) per CPU structure.
3339 * @param pCtx The register context.
3340 * @param pfInterruptWindows Where to return interrupt window flags.
3341 */
3342NEM_TMPL_STATIC VBOXSTRICTRC
3343nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows)
3344{
3345 Assert(!TRPMHasTrap(pVCpu));
3346 RT_NOREF_PV(pVM);
3347
3348 /*
3349 * First update APIC. We ASSUME this won't need TPR/CR8.
3350 */
3351 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3352 {
3353 APICUpdatePendingInterrupts(pVCpu);
3354 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3355 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3356 return VINF_SUCCESS;
3357 }
3358
3359 /*
3360 * We don't currently implement SMIs.
3361 */
3362 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3363
3364 /*
3365 * Check if we've got the minimum of state required for deciding whether we
3366 * can inject interrupts and NMIs. If we don't have it, get all we might require
3367 * for injection via IEM.
3368 */
3369 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3370 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3371 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3372 if (pCtx->fExtrn & fNeedExtrn)
3373 {
3374 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3375 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "IntFF");
3376 if (rcStrict != VINF_SUCCESS)
3377 return rcStrict;
3378 }
3379 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3380 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip;
3381
3382 /*
3383 * NMI? Try deliver it first.
3384 */
3385 if (fPendingNmi)
3386 {
3387 if ( !fInhibitInterrupts
3388 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3389 {
3390 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3391 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3392 if (rcStrict == VINF_SUCCESS)
3393 {
3394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3395 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
3396 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3397 }
3398 return rcStrict;
3399 }
3400 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
3401 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
3402 }
3403
3404 /*
3405 * APIC or PIC interrupt?
3406 */
3407 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3408 {
3409 if ( !fInhibitInterrupts
3410 && pCtx->rflags.Bits.u1IF)
3411 {
3412 AssertCompile(NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT & CPUMCTX_EXTRN_APIC_TPR);
3413 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
3414 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM_XCPT, "NMI");
3415 if (rcStrict == VINF_SUCCESS)
3416 {
3417 uint8_t bInterrupt;
3418 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
3419 if (RT_SUCCESS(rc))
3420 {
3421 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
3422 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3423 }
3424 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3425 {
3426 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
3427 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
3428 }
3429 else
3430 Log8(("PDMGetInterrupt failed -> %d\n", rc));
3431 }
3432 return rcStrict;
3433 }
3434 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
3435 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
3436 }
3437
3438 return VINF_SUCCESS;
3439}
3440
3441
3442/**
3443 * Inner NEM runloop for windows.
3444 *
3445 * @returns Strict VBox status code.
3446 * @param pVM The cross context VM structure.
3447 * @param pVCpu The cross context per CPU structure.
3448 * @param pGVM The ring-0 VM structure (NULL in ring-3).
3449 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
3450 */
3451NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
3452{
3453 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3454 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));
3455# ifdef LOG_ENABLED
3456 if (LogIs3Enabled())
3457 nemHCWinLogState(pVM, pVCpu);
3458# endif
3459# ifdef IN_RING0
3460 Assert(pVCpu->idCpu == pGVCpu->idCpu);
3461# endif
3462
3463 /*
3464 * Try switch to NEM runloop state.
3465 */
3466 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
3467 { /* likely */ }
3468 else
3469 {
3470 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3471 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
3472 return VINF_SUCCESS;
3473 }
3474
3475 /*
3476 * The run loop.
3477 *
3478 * Current approach to state updating to use the sledgehammer and sync
3479 * everything every time. This will be optimized later.
3480 */
3481# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3482 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
3483 uint32_t cMillies = 5000; /** @todo lower this later... */
3484# endif
3485 const bool fSingleStepping = DBGFIsStepping(pVCpu);
3486// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
3487// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
3488// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
3489 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3490 for (unsigned iLoop = 0;; iLoop++)
3491 {
3492# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3493 /*
3494 * Hack alert!
3495 */
3496 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
3497 if (cMappedPages >= 4000)
3498 {
3499 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
3500 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
3501 }
3502# endif
3503
3504 /*
3505 * Pending interrupts or such? Need to check and deal with this prior
3506 * to the state syncing.
3507 */
3508 pVCpu->nem.s.fDesiredInterruptWindows = 0;
3509 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
3510 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3511 {
3512# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3513 /* Make sure the CPU isn't executing. */
3514 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3515 {
3516 pVCpu->nem.s.fHandleAndGetFlags = 0;
3517 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3518 if (rcStrict == VINF_SUCCESS)
3519 { /* likely */ }
3520 else
3521 {
3522 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3523 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3524 break;
3525 }
3526 }
3527# endif
3528
3529 /* Try inject interrupt. */
3530 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows);
3531 if (rcStrict == VINF_SUCCESS)
3532 { /* likely */ }
3533 else
3534 {
3535 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3536 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3537 break;
3538 }
3539 }
3540
3541 /*
3542 * Ensure that hyper-V has the whole state.
3543 * (We always update the interrupt windows settings when active as hyper-V seems
3544 * to forget about it after an exit.)
3545 */
3546 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
3547 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
3548 || pVCpu->nem.s.fDesiredInterruptWindows
3549 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
3550 {
3551# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3552 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */);
3553# endif
3554# ifdef IN_RING0
3555 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx);
3556# else
3557 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
3558 RT_NOREF(pGVM, pGVCpu);
3559# endif
3560 AssertRCReturn(rc2, rc2);
3561 }
3562
3563 /*
3564 * Run a bit.
3565 */
3566 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3567 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3568 {
3569# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3570 if (pVCpu->nem.s.fHandleAndGetFlags)
3571 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
3572 else
3573 {
3574# ifdef IN_RING0
3575 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3576 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
3577 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3578 NULL, 0);
3579 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
3580 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
3581 VERR_NEM_IPE_5);
3582# else
3583 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
3584 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
3585 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
3586 VERR_NEM_IPE_5);
3587# endif
3588 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3589 }
3590# endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3591
3592 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
3593 {
3594# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3595# ifdef IN_RING0
3596 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3597 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
3598 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3599 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3600 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3601 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3602 NULL, 0);
3603 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3604 if (rcNt == STATUS_SUCCESS)
3605# else
3606 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3607 pVCpu->nem.s.fHandleAndGetFlags, cMillies);
3608 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3609 if (fRet)
3610# endif
3611# else
3612 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
3613 RT_ZERO(ExitReason);
3614 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
3615 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3616 if (SUCCEEDED(hrc))
3617# endif
3618 {
3619 /*
3620 * Deal with the message.
3621 */
3622# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3623 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu);
3624 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
3625# else
3626 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx);
3627# endif
3628 if (rcStrict == VINF_SUCCESS)
3629 { /* hopefully likely */ }
3630 else
3631 {
3632 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3633 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3634 break;
3635 }
3636 }
3637 else
3638 {
3639# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3640
3641 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
3642 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
3643 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
3644# ifndef IN_RING0
3645 DWORD rcNt = GetLastError();
3646# endif
3647 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
3648 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
3649 || rcNt == STATUS_ALERTED /* just in case */
3650 || rcNt == STATUS_USER_APC /* ditto */
3651 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
3652 pVCpu->idCpu, rcNt, rcNt),
3653 VERR_NEM_IPE_0);
3654 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3655 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
3656# else
3657 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
3658 pVCpu->idCpu, hrc, GetLastError()),
3659 VERR_NEM_IPE_0);
3660
3661# endif
3662 }
3663
3664 /*
3665 * If no relevant FFs are pending, loop.
3666 */
3667 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3668 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3669 continue;
3670
3671 /** @todo Try handle pending flags, not just return to EM loops. Take care
3672 * not to set important RCs here unless we've handled a message. */
3673 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
3674 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3675 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
3676 }
3677 else
3678 {
3679 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
3680 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
3681 }
3682 }
3683 else
3684 {
3685 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
3686 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
3687 }
3688 break;
3689 } /* the run loop */
3690
3691
3692 /*
3693 * If the CPU is running, make sure to stop it before we try sync back the
3694 * state and return to EM. We don't sync back the whole state if we can help it.
3695 */
3696# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3697 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3698 {
3699 pVCpu->nem.s.fHandleAndGetFlags = 0;
3700 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3701 }
3702# endif
3703
3704 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
3705 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3706
3707 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
3708 {
3709 /* Try anticipate what we might need. */
3710 uint64_t fImport = IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
3711 if ( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
3712 || RT_FAILURE(rcStrict))
3713 fImport = CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3714# ifdef IN_RING0 /* Ring-3 I/O port access optimizations: */
3715 else if ( rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
3716 || rcStrict == VINF_EM_PENDING_R3_IOPORT_WRITE)
3717 fImport = CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
3718 else if (rcStrict == VINF_EM_PENDING_R3_IOPORT_READ)
3719 fImport = CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT;
3720# endif
3721 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_INTERRUPT_APIC
3722 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3723 fImport |= IEM_CPUMCTX_EXTRN_XCPT_MASK;
3724
3725 if (pCtx->fExtrn & fImport)
3726 {
3727# ifdef IN_RING0
3728 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3729 if (RT_SUCCESS(rc2))
3730 pCtx->fExtrn &= ~fImport;
3731 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
3732 {
3733 pCtx->fExtrn &= ~fImport;
3734 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
3735 rcStrict = -rc2;
3736 else
3737 {
3738 pVCpu->nem.s.rcPending = -rc2;
3739 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
3740 }
3741 }
3742# else
3743 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, fImport | CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT);
3744 if (RT_SUCCESS(rc2))
3745 pCtx->fExtrn &= ~fImport;
3746# endif
3747 else if (RT_SUCCESS(rcStrict))
3748 rcStrict = rc2;
3749 if (!(pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT))))
3750 pCtx->fExtrn = 0;
3751 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturn);
3752 }
3753 else
3754 {
3755 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3756 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
3757 }
3758 }
3759 else
3760 {
3761 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatImportOnReturnSkipped);
3762 pCtx->fExtrn = 0;
3763 }
3764
3765 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
3766 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3767 return rcStrict;
3768}
3769
3770#endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */
3771
3772/**
3773 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
3774 */
3775NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
3776 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
3777{
3778 /* We'll just unmap the memory. */
3779 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
3780 {
3781#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3782 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
3783 AssertRC(rc);
3784 if (RT_SUCCESS(rc))
3785#else
3786 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
3787 if (SUCCEEDED(hrc))
3788#endif
3789 {
3790 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3791 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
3792 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
3793 }
3794 else
3795 {
3796#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3797 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
3798 return rc;
3799#else
3800 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3801 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3802 return VERR_NEM_IPE_2;
3803#endif
3804 }
3805 }
3806 RT_NOREF(pVCpu, pvUser);
3807 return VINF_SUCCESS;
3808}
3809
3810
3811/**
3812 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
3813 *
3814 * @returns The PGMPhysNemQueryPageInfo result.
3815 * @param pVM The cross context VM structure.
3816 * @param pVCpu The cross context virtual CPU structure.
3817 * @param GCPhys The page to unmap.
3818 */
3819NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
3820{
3821 PGMPHYSNEMPAGEINFO Info;
3822 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
3823 nemHCWinUnsetForA20CheckerCallback, NULL);
3824}
3825
3826
3827void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3828{
3829 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3830 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3831}
3832
3833
3834void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3835 int fRestoreAsRAM, bool fRestoreAsRAM2)
3836{
3837 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
3838 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
3839 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
3840}
3841
3842
3843void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3844 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3845{
3846 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3847 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3848 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3849}
3850
3851
3852/**
3853 * Worker that maps pages into Hyper-V.
3854 *
3855 * This is used by the PGM physical page notifications as well as the memory
3856 * access VMEXIT handlers.
3857 *
3858 * @returns VBox status code.
3859 * @param pVM The cross context VM structure.
3860 * @param pVCpu The cross context virtual CPU structure of the
3861 * calling EMT.
3862 * @param GCPhysSrc The source page address.
3863 * @param GCPhysDst The hyper-V destination page. This may differ from
3864 * GCPhysSrc when A20 is disabled.
3865 * @param fPageProt NEM_PAGE_PROT_XXX.
3866 * @param pu2State Our page state (input/output).
3867 * @param fBackingChanged Set if the page backing is being changed.
3868 * @thread EMT(pVCpu)
3869 */
3870NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3871 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3872{
3873#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3874 /*
3875 * When using the hypercalls instead of the ring-3 APIs, we don't need to
3876 * unmap memory before modifying it. We still want to track the state though,
3877 * since unmap will fail when called an unmapped page and we don't want to redo
3878 * upgrades/downgrades.
3879 */
3880 uint8_t const u2OldState = *pu2State;
3881 int rc;
3882 if (fPageProt == NEM_PAGE_PROT_NONE)
3883 {
3884 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3885 {
3886 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3887 if (RT_SUCCESS(rc))
3888 {
3889 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3890 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3891 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3892 }
3893 else
3894 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3895 }
3896 else
3897 rc = VINF_SUCCESS;
3898 }
3899 else if (fPageProt & NEM_PAGE_PROT_WRITE)
3900 {
3901 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
3902 {
3903 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3904 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
3905 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3906 if (RT_SUCCESS(rc))
3907 {
3908 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3909 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3910 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3911 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3912 NOREF(cMappedPages);
3913 }
3914 else
3915 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3916 }
3917 else
3918 rc = VINF_SUCCESS;
3919 }
3920 else
3921 {
3922 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
3923 {
3924 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3925 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3926 if (RT_SUCCESS(rc))
3927 {
3928 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3929 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3930 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3931 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3932 NOREF(cMappedPages);
3933 }
3934 else
3935 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3936 }
3937 else
3938 rc = VINF_SUCCESS;
3939 }
3940
3941 return VINF_SUCCESS;
3942
3943#else
3944 /*
3945 * Looks like we need to unmap a page before we can change the backing
3946 * or even modify the protection. This is going to be *REALLY* efficient.
3947 * PGM lends us two bits to keep track of the state here.
3948 */
3949 uint8_t const u2OldState = *pu2State;
3950 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3951 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3952 if ( fBackingChanged
3953 || u2NewState != u2OldState)
3954 {
3955 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3956 {
3957# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3958 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3959 AssertRC(rc);
3960 if (RT_SUCCESS(rc))
3961 {
3962 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3963 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3964 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3965 {
3966 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3967 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3968 return VINF_SUCCESS;
3969 }
3970 }
3971 else
3972 {
3973 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3974 return rc;
3975 }
3976# else
3977 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3978 if (SUCCEEDED(hrc))
3979 {
3980 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3981 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3982 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3983 {
3984 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3985 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3986 return VINF_SUCCESS;
3987 }
3988 }
3989 else
3990 {
3991 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3992 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3993 return VERR_NEM_INIT_FAILED;
3994 }
3995# endif
3996 }
3997 }
3998
3999 /*
4000 * Writeable mapping?
4001 */
4002 if (fPageProt & NEM_PAGE_PROT_WRITE)
4003 {
4004# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4005 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4006 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
4007 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4008 AssertRC(rc);
4009 if (RT_SUCCESS(rc))
4010 {
4011 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4012 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4013 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4014 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4015 return VINF_SUCCESS;
4016 }
4017 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4018 return rc;
4019# else
4020 void *pvPage;
4021 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
4022 if (RT_SUCCESS(rc))
4023 {
4024 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
4025 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
4026 if (SUCCEEDED(hrc))
4027 {
4028 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
4029 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4030 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4031 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4032 return VINF_SUCCESS;
4033 }
4034 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4035 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4036 return VERR_NEM_INIT_FAILED;
4037 }
4038 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4039 return rc;
4040# endif
4041 }
4042
4043 if (fPageProt & NEM_PAGE_PROT_READ)
4044 {
4045# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
4046 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
4047 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
4048 AssertRC(rc);
4049 if (RT_SUCCESS(rc))
4050 {
4051 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4052 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4053 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4054 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4055 return VINF_SUCCESS;
4056 }
4057 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4058 return rc;
4059# else
4060 const void *pvPage;
4061 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
4062 if (RT_SUCCESS(rc))
4063 {
4064 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
4065 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
4066 if (SUCCEEDED(hrc))
4067 {
4068 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
4069 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4070 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
4071 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
4072 return VINF_SUCCESS;
4073 }
4074 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
4075 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4076 return VERR_NEM_INIT_FAILED;
4077 }
4078 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
4079 return rc;
4080# endif
4081 }
4082
4083 /* We already unmapped it above. */
4084 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4085 return VINF_SUCCESS;
4086#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
4087}
4088
4089
4090NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
4091{
4092 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
4093 {
4094 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
4095 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4096 return VINF_SUCCESS;
4097 }
4098
4099#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4100 PVMCPU pVCpu = VMMGetCpu(pVM);
4101 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
4102 AssertRC(rc);
4103 if (RT_SUCCESS(rc))
4104 {
4105 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4106 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
4107 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4108 return VINF_SUCCESS;
4109 }
4110 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
4111 return rc;
4112#else
4113 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
4114 if (SUCCEEDED(hrc))
4115 {
4116 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
4117 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
4118 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
4119 return VINF_SUCCESS;
4120 }
4121 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
4122 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
4123 return VERR_NEM_IPE_6;
4124#endif
4125}
4126
4127
4128int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4129 PGMPAGETYPE enmType, uint8_t *pu2State)
4130{
4131 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4132 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4133 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4134
4135 int rc;
4136#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4137 PVMCPU pVCpu = VMMGetCpu(pVM);
4138 if ( pVM->nem.s.fA20Enabled
4139 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4140 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4141 else
4142 {
4143 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4144 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4145 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
4146 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4147
4148 }
4149#else
4150 RT_NOREF_PV(fPageProt);
4151 if ( pVM->nem.s.fA20Enabled
4152 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4153 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4154 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4155 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4156 else
4157 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4158#endif
4159 return rc;
4160}
4161
4162
4163void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4164 PGMPAGETYPE enmType, uint8_t *pu2State)
4165{
4166 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4167 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4168 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4169
4170#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4171 PVMCPU pVCpu = VMMGetCpu(pVM);
4172 if ( pVM->nem.s.fA20Enabled
4173 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4174 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4175 else
4176 {
4177 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4178 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4179 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4180 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4181 }
4182#else
4183 RT_NOREF_PV(fPageProt);
4184 if ( pVM->nem.s.fA20Enabled
4185 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4186 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4187 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4188 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4189 /* else: ignore since we've got the alias page at this address. */
4190#endif
4191}
4192
4193
4194void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4195 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4196{
4197 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4198 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4199 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4200
4201#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4202 PVMCPU pVCpu = VMMGetCpu(pVM);
4203 if ( pVM->nem.s.fA20Enabled
4204 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4205 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4206 else
4207 {
4208 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4209 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4210 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4211 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4212 }
4213#else
4214 RT_NOREF_PV(fPageProt);
4215 if ( pVM->nem.s.fA20Enabled
4216 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4217 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4218 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4219 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4220 /* else: ignore since we've got the alias page at this address. */
4221#endif
4222}
4223
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette