VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/NEMAllNativeTemplate-win.cpp.h@ 72448

Last change on this file since 72448 was 72446, checked in by vboxsync, 7 years ago

NEM/win: Working on intercepting VMMCALL and VMCALL. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 182.8 KB
Line 
1/* $Id: NEMAllNativeTemplate-win.cpp.h 72446 2018-06-05 08:53:01Z vboxsync $ */
2/** @file
3 * NEM - Native execution manager, Windows code template ring-0/3.
4 */
5
6/*
7 * Copyright (C) 2018 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Defined Constants And Macros *
21*********************************************************************************************************************************/
22/** Copy back a segment from hyper-V. */
23#define NEM_WIN_COPY_BACK_SEG(a_Dst, a_Src) \
24 do { \
25 (a_Dst).u64Base = (a_Src).Base; \
26 (a_Dst).u32Limit = (a_Src).Limit; \
27 (a_Dst).ValidSel = (a_Dst).Sel = (a_Src).Selector; \
28 (a_Dst).Attr.u = (a_Src).Attributes; \
29 (a_Dst).fFlags = CPUMSELREG_FLAGS_VALID; \
30 } while (0)
31
32
33/*********************************************************************************************************************************
34* Global Variables *
35*********************************************************************************************************************************/
36/** NEM_WIN_PAGE_STATE_XXX names. */
37NEM_TMPL_STATIC const char * const g_apszPageStates[4] = { "not-set", "unmapped", "readable", "writable" };
38
39/** HV_INTERCEPT_ACCESS_TYPE names. */
40static const char * const g_apszHvInterceptAccessTypes[4] = { "read", "write", "exec", "!undefined!" };
41
42
43/*********************************************************************************************************************************
44* Internal Functions *
45*********************************************************************************************************************************/
46NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
47 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged);
48
49
50#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
51
52/**
53 * Wrapper around VMMR0_DO_NEM_MAP_PAGES for a single page.
54 *
55 * @returns VBox status code.
56 * @param pVM The cross context VM structure.
57 * @param pVCpu The cross context virtual CPU structure of the caller.
58 * @param GCPhysSrc The source page. Does not need to be page aligned.
59 * @param GCPhysDst The destination page. Same as @a GCPhysSrc except for
60 * when A20 is disabled.
61 * @param fFlags HV_MAP_GPA_XXX.
62 */
63DECLINLINE(int) nemHCWinHypercallMapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst, uint32_t fFlags)
64{
65#ifdef IN_RING0
66 /** @todo optimize further, caller generally has the physical address. */
67 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
68 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
69 return nemR0WinMapPages(pGVM, pVM, &pGVM->aCpus[pVCpu->idCpu],
70 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
71 GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
72 1, fFlags);
73#else
74 pVCpu->nem.s.Hypercall.MapPages.GCPhysSrc = GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
75 pVCpu->nem.s.Hypercall.MapPages.GCPhysDst = GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
76 pVCpu->nem.s.Hypercall.MapPages.cPages = 1;
77 pVCpu->nem.s.Hypercall.MapPages.fFlags = fFlags;
78 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_MAP_PAGES, 0, NULL);
79#endif
80}
81
82
83/**
84 * Wrapper around VMMR0_DO_NEM_UNMAP_PAGES for a single page.
85 *
86 * @returns VBox status code.
87 * @param pVM The cross context VM structure.
88 * @param pVCpu The cross context virtual CPU structure of the caller.
89 * @param GCPhys The page to unmap. Does not need to be page aligned.
90 */
91DECLINLINE(int) nemHCWinHypercallUnmapPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
92{
93# ifdef IN_RING0
94 PGVM pGVM = GVMMR0FastGetGVMByVM(pVM);
95 AssertReturn(pGVM, VERR_INVALID_VM_HANDLE);
96 return nemR0WinUnmapPages(pGVM, &pGVM->aCpus[pVCpu->idCpu], GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, 1);
97# else
98 pVCpu->nem.s.Hypercall.UnmapPages.GCPhys = GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK;
99 pVCpu->nem.s.Hypercall.UnmapPages.cPages = 1;
100 return VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_UNMAP_PAGES, 0, NULL);
101# endif
102}
103
104#endif /* NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
105#ifndef IN_RING0
106
107NEM_TMPL_STATIC int nemHCWinCopyStateToHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
108{
109# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
110 NOREF(pCtx);
111 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_EXPORT_STATE, 0, NULL);
112 AssertLogRelRCReturn(rc, rc);
113 return rc;
114
115# else
116 /*
117 * The following is very similar to what nemR0WinExportState() does.
118 */
119 WHV_REGISTER_NAME aenmNames[128];
120 WHV_REGISTER_VALUE aValues[128];
121
122 uint64_t const fWhat = ~pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
123 if ( !fWhat
124 && pVCpu->nem.s.fCurrentInterruptWindows == pVCpu->nem.s.fDesiredInterruptWindows)
125 return VINF_SUCCESS;
126 uintptr_t iReg = 0;
127
128# define ADD_REG64(a_enmName, a_uValue) do { \
129 aenmNames[iReg] = (a_enmName); \
130 aValues[iReg].Reg128.High64 = 0; \
131 aValues[iReg].Reg64 = (a_uValue); \
132 iReg++; \
133 } while (0)
134# define ADD_REG128(a_enmName, a_uValueLo, a_uValueHi) do { \
135 aenmNames[iReg] = (a_enmName); \
136 aValues[iReg].Reg128.Low64 = (a_uValueLo); \
137 aValues[iReg].Reg128.High64 = (a_uValueHi); \
138 iReg++; \
139 } while (0)
140
141 /* GPRs */
142 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
143 {
144 if (fWhat & CPUMCTX_EXTRN_RAX)
145 ADD_REG64(WHvX64RegisterRax, pCtx->rax);
146 if (fWhat & CPUMCTX_EXTRN_RCX)
147 ADD_REG64(WHvX64RegisterRcx, pCtx->rcx);
148 if (fWhat & CPUMCTX_EXTRN_RDX)
149 ADD_REG64(WHvX64RegisterRdx, pCtx->rdx);
150 if (fWhat & CPUMCTX_EXTRN_RBX)
151 ADD_REG64(WHvX64RegisterRbx, pCtx->rbx);
152 if (fWhat & CPUMCTX_EXTRN_RSP)
153 ADD_REG64(WHvX64RegisterRsp, pCtx->rsp);
154 if (fWhat & CPUMCTX_EXTRN_RBP)
155 ADD_REG64(WHvX64RegisterRbp, pCtx->rbp);
156 if (fWhat & CPUMCTX_EXTRN_RSI)
157 ADD_REG64(WHvX64RegisterRsi, pCtx->rsi);
158 if (fWhat & CPUMCTX_EXTRN_RDI)
159 ADD_REG64(WHvX64RegisterRdi, pCtx->rdi);
160 if (fWhat & CPUMCTX_EXTRN_R8_R15)
161 {
162 ADD_REG64(WHvX64RegisterR8, pCtx->r8);
163 ADD_REG64(WHvX64RegisterR9, pCtx->r9);
164 ADD_REG64(WHvX64RegisterR10, pCtx->r10);
165 ADD_REG64(WHvX64RegisterR11, pCtx->r11);
166 ADD_REG64(WHvX64RegisterR12, pCtx->r12);
167 ADD_REG64(WHvX64RegisterR13, pCtx->r13);
168 ADD_REG64(WHvX64RegisterR14, pCtx->r14);
169 ADD_REG64(WHvX64RegisterR15, pCtx->r15);
170 }
171 }
172
173 /* RIP & Flags */
174 if (fWhat & CPUMCTX_EXTRN_RIP)
175 ADD_REG64(WHvX64RegisterRip, pCtx->rip);
176 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
177 ADD_REG64(WHvX64RegisterRflags, pCtx->rflags.u);
178
179 /* Segments */
180# define ADD_SEG(a_enmName, a_SReg) \
181 do { \
182 aenmNames[iReg] = a_enmName; \
183 aValues[iReg].Segment.Base = (a_SReg).u64Base; \
184 aValues[iReg].Segment.Limit = (a_SReg).u32Limit; \
185 aValues[iReg].Segment.Selector = (a_SReg).Sel; \
186 aValues[iReg].Segment.Attributes = (a_SReg).Attr.u; \
187 iReg++; \
188 } while (0)
189 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
190 {
191 if (fWhat & CPUMCTX_EXTRN_ES)
192 ADD_SEG(WHvX64RegisterEs, pCtx->es);
193 if (fWhat & CPUMCTX_EXTRN_CS)
194 ADD_SEG(WHvX64RegisterCs, pCtx->cs);
195 if (fWhat & CPUMCTX_EXTRN_SS)
196 ADD_SEG(WHvX64RegisterSs, pCtx->ss);
197 if (fWhat & CPUMCTX_EXTRN_DS)
198 ADD_SEG(WHvX64RegisterDs, pCtx->ds);
199 if (fWhat & CPUMCTX_EXTRN_FS)
200 ADD_SEG(WHvX64RegisterFs, pCtx->fs);
201 if (fWhat & CPUMCTX_EXTRN_GS)
202 ADD_SEG(WHvX64RegisterGs, pCtx->gs);
203 }
204
205 /* Descriptor tables & task segment. */
206 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
207 {
208 if (fWhat & CPUMCTX_EXTRN_LDTR)
209 ADD_SEG(WHvX64RegisterLdtr, pCtx->ldtr);
210 if (fWhat & CPUMCTX_EXTRN_TR)
211 ADD_SEG(WHvX64RegisterTr, pCtx->tr);
212 if (fWhat & CPUMCTX_EXTRN_IDTR)
213 {
214 aenmNames[iReg] = WHvX64RegisterIdtr;
215 aValues[iReg].Table.Limit = pCtx->idtr.cbIdt;
216 aValues[iReg].Table.Base = pCtx->idtr.pIdt;
217 iReg++;
218 }
219 if (fWhat & CPUMCTX_EXTRN_GDTR)
220 {
221 aenmNames[iReg] = WHvX64RegisterGdtr;
222 aValues[iReg].Table.Limit = pCtx->gdtr.cbGdt;
223 aValues[iReg].Table.Base = pCtx->gdtr.pGdt;
224 iReg++;
225 }
226 }
227
228 /* Control registers. */
229 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
230 {
231 if (fWhat & CPUMCTX_EXTRN_CR0)
232 ADD_REG64(WHvX64RegisterCr0, pCtx->cr0);
233 if (fWhat & CPUMCTX_EXTRN_CR2)
234 ADD_REG64(WHvX64RegisterCr2, pCtx->cr2);
235 if (fWhat & CPUMCTX_EXTRN_CR3)
236 ADD_REG64(WHvX64RegisterCr3, pCtx->cr3);
237 if (fWhat & CPUMCTX_EXTRN_CR4)
238 ADD_REG64(WHvX64RegisterCr4, pCtx->cr4);
239 }
240
241 /** @todo CR8/TPR */
242 ADD_REG64(WHvX64RegisterCr8, CPUMGetGuestCR8(pVCpu));
243
244 /* Debug registers. */
245/** @todo fixme. Figure out what the hyper-v version of KVM_SET_GUEST_DEBUG would be. */
246 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
247 {
248 ADD_REG64(WHvX64RegisterDr0, pCtx->dr[0]); // CPUMGetHyperDR0(pVCpu));
249 ADD_REG64(WHvX64RegisterDr1, pCtx->dr[1]); // CPUMGetHyperDR1(pVCpu));
250 ADD_REG64(WHvX64RegisterDr2, pCtx->dr[2]); // CPUMGetHyperDR2(pVCpu));
251 ADD_REG64(WHvX64RegisterDr3, pCtx->dr[3]); // CPUMGetHyperDR3(pVCpu));
252 }
253 if (fWhat & CPUMCTX_EXTRN_DR6)
254 ADD_REG64(WHvX64RegisterDr6, pCtx->dr[6]); // CPUMGetHyperDR6(pVCpu));
255 if (fWhat & CPUMCTX_EXTRN_DR7)
256 ADD_REG64(WHvX64RegisterDr7, pCtx->dr[7]); // CPUMGetHyperDR7(pVCpu));
257
258 /* Floating point state. */
259 if (fWhat & CPUMCTX_EXTRN_X87)
260 {
261 ADD_REG128(WHvX64RegisterFpMmx0, pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1]);
262 ADD_REG128(WHvX64RegisterFpMmx1, pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1]);
263 ADD_REG128(WHvX64RegisterFpMmx2, pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1]);
264 ADD_REG128(WHvX64RegisterFpMmx3, pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1]);
265 ADD_REG128(WHvX64RegisterFpMmx4, pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1]);
266 ADD_REG128(WHvX64RegisterFpMmx5, pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1]);
267 ADD_REG128(WHvX64RegisterFpMmx6, pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1]);
268 ADD_REG128(WHvX64RegisterFpMmx7, pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1]);
269
270 aenmNames[iReg] = WHvX64RegisterFpControlStatus;
271 aValues[iReg].FpControlStatus.FpControl = pCtx->pXStateR3->x87.FCW;
272 aValues[iReg].FpControlStatus.FpStatus = pCtx->pXStateR3->x87.FSW;
273 aValues[iReg].FpControlStatus.FpTag = pCtx->pXStateR3->x87.FTW;
274 aValues[iReg].FpControlStatus.Reserved = pCtx->pXStateR3->x87.FTW >> 8;
275 aValues[iReg].FpControlStatus.LastFpOp = pCtx->pXStateR3->x87.FOP;
276 aValues[iReg].FpControlStatus.LastFpRip = (pCtx->pXStateR3->x87.FPUIP)
277 | ((uint64_t)pCtx->pXStateR3->x87.CS << 32)
278 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd1 << 48);
279 iReg++;
280
281 aenmNames[iReg] = WHvX64RegisterXmmControlStatus;
282 aValues[iReg].XmmControlStatus.LastFpRdp = (pCtx->pXStateR3->x87.FPUDP)
283 | ((uint64_t)pCtx->pXStateR3->x87.DS << 32)
284 | ((uint64_t)pCtx->pXStateR3->x87.Rsrvd2 << 48);
285 aValues[iReg].XmmControlStatus.XmmStatusControl = pCtx->pXStateR3->x87.MXCSR;
286 aValues[iReg].XmmControlStatus.XmmStatusControlMask = pCtx->pXStateR3->x87.MXCSR_MASK; /** @todo ??? (Isn't this an output field?) */
287 iReg++;
288 }
289
290 /* Vector state. */
291 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
292 {
293 ADD_REG128(WHvX64RegisterXmm0, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi);
294 ADD_REG128(WHvX64RegisterXmm1, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi);
295 ADD_REG128(WHvX64RegisterXmm2, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi);
296 ADD_REG128(WHvX64RegisterXmm3, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi);
297 ADD_REG128(WHvX64RegisterXmm4, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi);
298 ADD_REG128(WHvX64RegisterXmm5, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi);
299 ADD_REG128(WHvX64RegisterXmm6, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi);
300 ADD_REG128(WHvX64RegisterXmm7, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi);
301 ADD_REG128(WHvX64RegisterXmm8, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi);
302 ADD_REG128(WHvX64RegisterXmm9, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi);
303 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi);
304 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi);
305 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi);
306 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi);
307 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi);
308 ADD_REG128(WHvX64RegisterXmm10, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi);
309 }
310
311 /* MSRs */
312 // WHvX64RegisterTsc - don't touch
313 if (fWhat & CPUMCTX_EXTRN_EFER)
314 ADD_REG64(WHvX64RegisterEfer, pCtx->msrEFER);
315 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
316 ADD_REG64(WHvX64RegisterKernelGsBase, pCtx->msrKERNELGSBASE);
317 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
318 {
319 ADD_REG64(WHvX64RegisterSysenterCs, pCtx->SysEnter.cs);
320 ADD_REG64(WHvX64RegisterSysenterEip, pCtx->SysEnter.eip);
321 ADD_REG64(WHvX64RegisterSysenterEsp, pCtx->SysEnter.esp);
322 }
323 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
324 {
325 ADD_REG64(WHvX64RegisterStar, pCtx->msrSTAR);
326 ADD_REG64(WHvX64RegisterLstar, pCtx->msrLSTAR);
327 ADD_REG64(WHvX64RegisterCstar, pCtx->msrCSTAR);
328 ADD_REG64(WHvX64RegisterSfmask, pCtx->msrSFMASK);
329 }
330 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
331 {
332 ADD_REG64(WHvX64RegisterApicBase, APICGetBaseMsrNoCheck(pVCpu));
333 ADD_REG64(WHvX64RegisterPat, pCtx->msrPAT);
334#if 0 /** @todo check if WHvX64RegisterMsrMtrrCap works here... */
335 ADD_REG64(WHvX64RegisterMsrMtrrCap, CPUMGetGuestIa32MtrrCap(pVCpu));
336#endif
337 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
338 ADD_REG64(WHvX64RegisterMsrMtrrDefType, pCtxMsrs->msr.MtrrDefType);
339 ADD_REG64(WHvX64RegisterMsrMtrrFix64k00000, pCtxMsrs->msr.MtrrFix64K_00000);
340 ADD_REG64(WHvX64RegisterMsrMtrrFix16k80000, pCtxMsrs->msr.MtrrFix16K_80000);
341 ADD_REG64(WHvX64RegisterMsrMtrrFix16kA0000, pCtxMsrs->msr.MtrrFix16K_A0000);
342 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC0000, pCtxMsrs->msr.MtrrFix4K_C0000);
343 ADD_REG64(WHvX64RegisterMsrMtrrFix4kC8000, pCtxMsrs->msr.MtrrFix4K_C8000);
344 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD0000, pCtxMsrs->msr.MtrrFix4K_D0000);
345 ADD_REG64(WHvX64RegisterMsrMtrrFix4kD8000, pCtxMsrs->msr.MtrrFix4K_D8000);
346 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE0000, pCtxMsrs->msr.MtrrFix4K_E0000);
347 ADD_REG64(WHvX64RegisterMsrMtrrFix4kE8000, pCtxMsrs->msr.MtrrFix4K_E8000);
348 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF0000, pCtxMsrs->msr.MtrrFix4K_F0000);
349 ADD_REG64(WHvX64RegisterMsrMtrrFix4kF8000, pCtxMsrs->msr.MtrrFix4K_F8000);
350 ADD_REG64(WHvX64RegisterTscAux, pCtxMsrs->msr.TscAux);
351#if 0 /** @todo these registers aren't available? Might explain something.. .*/
352 const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
353 if (enmCpuVendor != CPUMCPUVENDOR_AMD)
354 {
355 ADD_REG64(HvX64RegisterIa32MiscEnable, pCtxMsrs->msr.MiscEnable);
356 ADD_REG64(HvX64RegisterIa32FeatureControl, CPUMGetGuestIa32FeatureControl(pVCpu));
357 }
358#endif
359 }
360
361 /* event injection (clear it). */
362 if (fWhat & CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)
363 ADD_REG64(WHvRegisterPendingInterruption, 0);
364
365 /* Interruptibility state. This can get a little complicated since we get
366 half of the state via HV_X64_VP_EXECUTION_STATE. */
367 if ( (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
368 == (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI) )
369 {
370 ADD_REG64(WHvRegisterInterruptState, 0);
371 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
372 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
373 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
374 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
375 aValues[iReg - 1].InterruptState.NmiMasked = 1;
376 }
377 else if (fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT)
378 {
379 if ( pVCpu->nem.s.fLastInterruptShadow
380 || ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
381 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip))
382 {
383 ADD_REG64(WHvRegisterInterruptState, 0);
384 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
385 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)
386 aValues[iReg - 1].InterruptState.InterruptShadow = 1;
387 /** @todo Retrieve NMI state, currently assuming it's zero. (yes this may happen on I/O) */
388 //if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
389 // aValues[iReg - 1].InterruptState.NmiMasked = 1;
390 }
391 }
392 else
393 Assert(!(fWhat & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI));
394
395 /* Interrupt windows. Always set if active as Hyper-V seems to be forgetful. */
396 uint8_t const fDesiredIntWin = pVCpu->nem.s.fDesiredInterruptWindows;
397 if ( fDesiredIntWin
398 || pVCpu->nem.s.fCurrentInterruptWindows != fDesiredIntWin)
399 {
400 pVCpu->nem.s.fCurrentInterruptWindows = pVCpu->nem.s.fDesiredInterruptWindows;
401 ADD_REG64(WHvX64RegisterDeliverabilityNotifications, fDesiredIntWin);
402 Assert(aValues[iReg - 1].DeliverabilityNotifications.NmiNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_NMI));
403 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptNotification == RT_BOOL(fDesiredIntWin & NEM_WIN_INTW_F_REGULAR));
404 Assert(aValues[iReg - 1].DeliverabilityNotifications.InterruptPriority == (fDesiredIntWin & NEM_WIN_INTW_F_PRIO_MASK) >> NEM_WIN_INTW_F_PRIO_SHIFT);
405 }
406
407 /// @todo WHvRegisterPendingEvent0
408 /// @todo WHvRegisterPendingEvent1
409
410 /*
411 * Set the registers.
412 */
413 Assert(iReg < RT_ELEMENTS(aValues));
414 Assert(iReg < RT_ELEMENTS(aenmNames));
415# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
416 Log12(("Calling WHvSetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
417 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues));
418# endif
419 HRESULT hrc = WHvSetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, iReg, aValues);
420 if (SUCCEEDED(hrc))
421 {
422 pCtx->fExtrn |= CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK | CPUMCTX_EXTRN_KEEPER_NEM;
423 return VINF_SUCCESS;
424 }
425 AssertLogRelMsgFailed(("WHvSetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
426 pVM->nem.s.hPartition, pVCpu->idCpu, iReg,
427 hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
428 return VERR_INTERNAL_ERROR;
429
430# undef ADD_REG64
431# undef ADD_REG128
432# undef ADD_SEG
433
434# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
435}
436
437
438NEM_TMPL_STATIC int nemHCWinCopyStateFromHyperV(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat)
439{
440# ifdef NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS
441 /* See NEMR0ImportState */
442 NOREF(pCtx);
443 int rc = VMMR3CallR0Emt(pVM, pVCpu, VMMR0_DO_NEM_IMPORT_STATE, fWhat, NULL);
444 if (RT_SUCCESS(rc))
445 return rc;
446 if (rc == VERR_NEM_FLUSH_TLB)
447 return PGMFlushTLB(pVCpu, pCtx->cr3, true /*fGlobal*/);
448 if (rc == VERR_NEM_CHANGE_PGM_MODE)
449 return PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
450 AssertLogRelRCReturn(rc, rc);
451 return rc;
452
453# else
454 WHV_REGISTER_NAME aenmNames[128];
455
456 fWhat &= pCtx->fExtrn;
457 uintptr_t iReg = 0;
458
459 /* GPRs */
460 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
461 {
462 if (fWhat & CPUMCTX_EXTRN_RAX)
463 aenmNames[iReg++] = WHvX64RegisterRax;
464 if (fWhat & CPUMCTX_EXTRN_RCX)
465 aenmNames[iReg++] = WHvX64RegisterRcx;
466 if (fWhat & CPUMCTX_EXTRN_RDX)
467 aenmNames[iReg++] = WHvX64RegisterRdx;
468 if (fWhat & CPUMCTX_EXTRN_RBX)
469 aenmNames[iReg++] = WHvX64RegisterRbx;
470 if (fWhat & CPUMCTX_EXTRN_RSP)
471 aenmNames[iReg++] = WHvX64RegisterRsp;
472 if (fWhat & CPUMCTX_EXTRN_RBP)
473 aenmNames[iReg++] = WHvX64RegisterRbp;
474 if (fWhat & CPUMCTX_EXTRN_RSI)
475 aenmNames[iReg++] = WHvX64RegisterRsi;
476 if (fWhat & CPUMCTX_EXTRN_RDI)
477 aenmNames[iReg++] = WHvX64RegisterRdi;
478 if (fWhat & CPUMCTX_EXTRN_R8_R15)
479 {
480 aenmNames[iReg++] = WHvX64RegisterR8;
481 aenmNames[iReg++] = WHvX64RegisterR9;
482 aenmNames[iReg++] = WHvX64RegisterR10;
483 aenmNames[iReg++] = WHvX64RegisterR11;
484 aenmNames[iReg++] = WHvX64RegisterR12;
485 aenmNames[iReg++] = WHvX64RegisterR13;
486 aenmNames[iReg++] = WHvX64RegisterR14;
487 aenmNames[iReg++] = WHvX64RegisterR15;
488 }
489 }
490
491 /* RIP & Flags */
492 if (fWhat & CPUMCTX_EXTRN_RIP)
493 aenmNames[iReg++] = WHvX64RegisterRip;
494 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
495 aenmNames[iReg++] = WHvX64RegisterRflags;
496
497 /* Segments */
498 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
499 {
500 if (fWhat & CPUMCTX_EXTRN_ES)
501 aenmNames[iReg++] = WHvX64RegisterEs;
502 if (fWhat & CPUMCTX_EXTRN_CS)
503 aenmNames[iReg++] = WHvX64RegisterCs;
504 if (fWhat & CPUMCTX_EXTRN_SS)
505 aenmNames[iReg++] = WHvX64RegisterSs;
506 if (fWhat & CPUMCTX_EXTRN_DS)
507 aenmNames[iReg++] = WHvX64RegisterDs;
508 if (fWhat & CPUMCTX_EXTRN_FS)
509 aenmNames[iReg++] = WHvX64RegisterFs;
510 if (fWhat & CPUMCTX_EXTRN_GS)
511 aenmNames[iReg++] = WHvX64RegisterGs;
512 }
513
514 /* Descriptor tables. */
515 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
516 {
517 if (fWhat & CPUMCTX_EXTRN_LDTR)
518 aenmNames[iReg++] = WHvX64RegisterLdtr;
519 if (fWhat & CPUMCTX_EXTRN_TR)
520 aenmNames[iReg++] = WHvX64RegisterTr;
521 if (fWhat & CPUMCTX_EXTRN_IDTR)
522 aenmNames[iReg++] = WHvX64RegisterIdtr;
523 if (fWhat & CPUMCTX_EXTRN_GDTR)
524 aenmNames[iReg++] = WHvX64RegisterGdtr;
525 }
526
527 /* Control registers. */
528 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
529 {
530 if (fWhat & CPUMCTX_EXTRN_CR0)
531 aenmNames[iReg++] = WHvX64RegisterCr0;
532 if (fWhat & CPUMCTX_EXTRN_CR2)
533 aenmNames[iReg++] = WHvX64RegisterCr2;
534 if (fWhat & CPUMCTX_EXTRN_CR3)
535 aenmNames[iReg++] = WHvX64RegisterCr3;
536 if (fWhat & CPUMCTX_EXTRN_CR4)
537 aenmNames[iReg++] = WHvX64RegisterCr4;
538 }
539 aenmNames[iReg++] = WHvX64RegisterCr8; /// @todo CR8/TPR
540
541 /* Debug registers. */
542 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
543 {
544 aenmNames[iReg++] = WHvX64RegisterDr0;
545 aenmNames[iReg++] = WHvX64RegisterDr1;
546 aenmNames[iReg++] = WHvX64RegisterDr2;
547 aenmNames[iReg++] = WHvX64RegisterDr3;
548 }
549 if (fWhat & CPUMCTX_EXTRN_DR6)
550 aenmNames[iReg++] = WHvX64RegisterDr6;
551 if (fWhat & CPUMCTX_EXTRN_DR7)
552 aenmNames[iReg++] = WHvX64RegisterDr7;
553
554 /* Floating point state. */
555 if (fWhat & CPUMCTX_EXTRN_X87)
556 {
557 aenmNames[iReg++] = WHvX64RegisterFpMmx0;
558 aenmNames[iReg++] = WHvX64RegisterFpMmx1;
559 aenmNames[iReg++] = WHvX64RegisterFpMmx2;
560 aenmNames[iReg++] = WHvX64RegisterFpMmx3;
561 aenmNames[iReg++] = WHvX64RegisterFpMmx4;
562 aenmNames[iReg++] = WHvX64RegisterFpMmx5;
563 aenmNames[iReg++] = WHvX64RegisterFpMmx6;
564 aenmNames[iReg++] = WHvX64RegisterFpMmx7;
565 aenmNames[iReg++] = WHvX64RegisterFpControlStatus;
566 }
567 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
568 aenmNames[iReg++] = WHvX64RegisterXmmControlStatus;
569
570 /* Vector state. */
571 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
572 {
573 aenmNames[iReg++] = WHvX64RegisterXmm0;
574 aenmNames[iReg++] = WHvX64RegisterXmm1;
575 aenmNames[iReg++] = WHvX64RegisterXmm2;
576 aenmNames[iReg++] = WHvX64RegisterXmm3;
577 aenmNames[iReg++] = WHvX64RegisterXmm4;
578 aenmNames[iReg++] = WHvX64RegisterXmm5;
579 aenmNames[iReg++] = WHvX64RegisterXmm6;
580 aenmNames[iReg++] = WHvX64RegisterXmm7;
581 aenmNames[iReg++] = WHvX64RegisterXmm8;
582 aenmNames[iReg++] = WHvX64RegisterXmm9;
583 aenmNames[iReg++] = WHvX64RegisterXmm10;
584 aenmNames[iReg++] = WHvX64RegisterXmm11;
585 aenmNames[iReg++] = WHvX64RegisterXmm12;
586 aenmNames[iReg++] = WHvX64RegisterXmm13;
587 aenmNames[iReg++] = WHvX64RegisterXmm14;
588 aenmNames[iReg++] = WHvX64RegisterXmm15;
589 }
590
591 /* MSRs */
592 // WHvX64RegisterTsc - don't touch
593 if (fWhat & CPUMCTX_EXTRN_EFER)
594 aenmNames[iReg++] = WHvX64RegisterEfer;
595 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
596 aenmNames[iReg++] = WHvX64RegisterKernelGsBase;
597 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
598 {
599 aenmNames[iReg++] = WHvX64RegisterSysenterCs;
600 aenmNames[iReg++] = WHvX64RegisterSysenterEip;
601 aenmNames[iReg++] = WHvX64RegisterSysenterEsp;
602 }
603 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
604 {
605 aenmNames[iReg++] = WHvX64RegisterStar;
606 aenmNames[iReg++] = WHvX64RegisterLstar;
607 aenmNames[iReg++] = WHvX64RegisterCstar;
608 aenmNames[iReg++] = WHvX64RegisterSfmask;
609 }
610
611//#ifdef LOG_ENABLED
612// const CPUMCPUVENDOR enmCpuVendor = CPUMGetHostCpuVendor(pGVM->pVM);
613//#endif
614 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
615 {
616 aenmNames[iReg++] = WHvX64RegisterApicBase; /// @todo APIC BASE
617 aenmNames[iReg++] = WHvX64RegisterPat;
618#if 0 /*def LOG_ENABLED*/ /** @todo Check if WHvX64RegisterMsrMtrrCap works... */
619 aenmNames[iReg++] = WHvX64RegisterMsrMtrrCap;
620#endif
621 aenmNames[iReg++] = WHvX64RegisterMsrMtrrDefType;
622 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix64k00000;
623 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16k80000;
624 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix16kA0000;
625 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC0000;
626 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kC8000;
627 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD0000;
628 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kD8000;
629 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE0000;
630 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kE8000;
631 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF0000;
632 aenmNames[iReg++] = WHvX64RegisterMsrMtrrFix4kF8000;
633 aenmNames[iReg++] = WHvX64RegisterTscAux;
634 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
635//#ifdef LOG_ENABLED
636// if (enmCpuVendor != CPUMCPUVENDOR_AMD)
637// aenmNames[iReg++] = HvX64RegisterIa32FeatureControl;
638//#endif
639 }
640
641 /* Interruptibility. */
642 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
643 {
644 aenmNames[iReg++] = WHvRegisterInterruptState;
645 aenmNames[iReg++] = WHvX64RegisterRip;
646 }
647
648 /* event injection */
649 aenmNames[iReg++] = WHvRegisterPendingInterruption;
650 aenmNames[iReg++] = WHvRegisterPendingEvent0;
651 aenmNames[iReg++] = WHvRegisterPendingEvent1;
652
653 size_t const cRegs = iReg;
654 Assert(cRegs < RT_ELEMENTS(aenmNames));
655
656 /*
657 * Get the registers.
658 */
659 WHV_REGISTER_VALUE aValues[128];
660 RT_ZERO(aValues);
661 Assert(RT_ELEMENTS(aValues) >= cRegs);
662 Assert(RT_ELEMENTS(aenmNames) >= cRegs);
663# ifdef NEM_WIN_INTERCEPT_NT_IO_CTLS
664 Log12(("Calling WHvGetVirtualProcessorRegisters(%p, %u, %p, %u, %p)\n",
665 pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, cRegs, aValues));
666# endif
667 HRESULT hrc = WHvGetVirtualProcessorRegisters(pVM->nem.s.hPartition, pVCpu->idCpu, aenmNames, (uint32_t)cRegs, aValues);
668 AssertLogRelMsgReturn(SUCCEEDED(hrc),
669 ("WHvGetVirtualProcessorRegisters(%p, %u,,%u,) -> %Rhrc (Last=%#x/%u)\n",
670 pVM->nem.s.hPartition, pVCpu->idCpu, cRegs, hrc, RTNtLastStatusValue(), RTNtLastErrorValue())
671 , VERR_NEM_GET_REGISTERS_FAILED);
672
673 iReg = 0;
674# define GET_REG64(a_DstVar, a_enmName) do { \
675 Assert(aenmNames[iReg] == (a_enmName)); \
676 (a_DstVar) = aValues[iReg].Reg64; \
677 iReg++; \
678 } while (0)
679# define GET_REG64_LOG7(a_DstVar, a_enmName, a_szLogName) do { \
680 Assert(aenmNames[iReg] == (a_enmName)); \
681 if ((a_DstVar) != aValues[iReg].Reg64) \
682 Log7(("NEM/%u: " a_szLogName " changed %RX64 -> %RX64\n", pVCpu->idCpu, (a_DstVar), aValues[iReg].Reg64)); \
683 (a_DstVar) = aValues[iReg].Reg64; \
684 iReg++; \
685 } while (0)
686# define GET_REG128(a_DstVarLo, a_DstVarHi, a_enmName) do { \
687 Assert(aenmNames[iReg] == a_enmName); \
688 (a_DstVarLo) = aValues[iReg].Reg128.Low64; \
689 (a_DstVarHi) = aValues[iReg].Reg128.High64; \
690 iReg++; \
691 } while (0)
692# define GET_SEG(a_SReg, a_enmName) do { \
693 Assert(aenmNames[iReg] == (a_enmName)); \
694 NEM_WIN_COPY_BACK_SEG(a_SReg, aValues[iReg].Segment); \
695 iReg++; \
696 } while (0)
697
698 /* GPRs */
699 if (fWhat & CPUMCTX_EXTRN_GPRS_MASK)
700 {
701 if (fWhat & CPUMCTX_EXTRN_RAX)
702 GET_REG64(pCtx->rax, WHvX64RegisterRax);
703 if (fWhat & CPUMCTX_EXTRN_RCX)
704 GET_REG64(pCtx->rcx, WHvX64RegisterRcx);
705 if (fWhat & CPUMCTX_EXTRN_RDX)
706 GET_REG64(pCtx->rdx, WHvX64RegisterRdx);
707 if (fWhat & CPUMCTX_EXTRN_RBX)
708 GET_REG64(pCtx->rbx, WHvX64RegisterRbx);
709 if (fWhat & CPUMCTX_EXTRN_RSP)
710 GET_REG64(pCtx->rsp, WHvX64RegisterRsp);
711 if (fWhat & CPUMCTX_EXTRN_RBP)
712 GET_REG64(pCtx->rbp, WHvX64RegisterRbp);
713 if (fWhat & CPUMCTX_EXTRN_RSI)
714 GET_REG64(pCtx->rsi, WHvX64RegisterRsi);
715 if (fWhat & CPUMCTX_EXTRN_RDI)
716 GET_REG64(pCtx->rdi, WHvX64RegisterRdi);
717 if (fWhat & CPUMCTX_EXTRN_R8_R15)
718 {
719 GET_REG64(pCtx->r8, WHvX64RegisterR8);
720 GET_REG64(pCtx->r9, WHvX64RegisterR9);
721 GET_REG64(pCtx->r10, WHvX64RegisterR10);
722 GET_REG64(pCtx->r11, WHvX64RegisterR11);
723 GET_REG64(pCtx->r12, WHvX64RegisterR12);
724 GET_REG64(pCtx->r13, WHvX64RegisterR13);
725 GET_REG64(pCtx->r14, WHvX64RegisterR14);
726 GET_REG64(pCtx->r15, WHvX64RegisterR15);
727 }
728 }
729
730 /* RIP & Flags */
731 if (fWhat & CPUMCTX_EXTRN_RIP)
732 GET_REG64(pCtx->rip, WHvX64RegisterRip);
733 if (fWhat & CPUMCTX_EXTRN_RFLAGS)
734 GET_REG64(pCtx->rflags.u, WHvX64RegisterRflags);
735
736 /* Segments */
737 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)
738 {
739 if (fWhat & CPUMCTX_EXTRN_ES)
740 GET_SEG(pCtx->es, WHvX64RegisterEs);
741 if (fWhat & CPUMCTX_EXTRN_CS)
742 GET_SEG(pCtx->cs, WHvX64RegisterCs);
743 if (fWhat & CPUMCTX_EXTRN_SS)
744 GET_SEG(pCtx->ss, WHvX64RegisterSs);
745 if (fWhat & CPUMCTX_EXTRN_DS)
746 GET_SEG(pCtx->ds, WHvX64RegisterDs);
747 if (fWhat & CPUMCTX_EXTRN_FS)
748 GET_SEG(pCtx->fs, WHvX64RegisterFs);
749 if (fWhat & CPUMCTX_EXTRN_GS)
750 GET_SEG(pCtx->gs, WHvX64RegisterGs);
751 }
752
753 /* Descriptor tables and the task segment. */
754 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)
755 {
756 if (fWhat & CPUMCTX_EXTRN_LDTR)
757 GET_SEG(pCtx->ldtr, WHvX64RegisterLdtr);
758
759 if (fWhat & CPUMCTX_EXTRN_TR)
760 {
761 /* AMD-V likes loading TR with in AVAIL state, whereas intel insists on BUSY. So,
762 avoid to trigger sanity assertions around the code, always fix this. */
763 GET_SEG(pCtx->tr, WHvX64RegisterTr);
764 switch (pCtx->tr.Attr.n.u4Type)
765 {
766 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
767 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
768 break;
769 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
770 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
771 break;
772 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
773 pCtx->tr.Attr.n.u4Type = X86_SEL_TYPE_SYS_286_TSS_BUSY;
774 break;
775 }
776 }
777 if (fWhat & CPUMCTX_EXTRN_IDTR)
778 {
779 Assert(aenmNames[iReg] == WHvX64RegisterIdtr);
780 pCtx->idtr.cbIdt = aValues[iReg].Table.Limit;
781 pCtx->idtr.pIdt = aValues[iReg].Table.Base;
782 iReg++;
783 }
784 if (fWhat & CPUMCTX_EXTRN_GDTR)
785 {
786 Assert(aenmNames[iReg] == WHvX64RegisterGdtr);
787 pCtx->gdtr.cbGdt = aValues[iReg].Table.Limit;
788 pCtx->gdtr.pGdt = aValues[iReg].Table.Base;
789 iReg++;
790 }
791 }
792
793 /* Control registers. */
794 bool fMaybeChangedMode = false;
795 bool fFlushTlb = false;
796 bool fFlushGlobalTlb = false;
797 if (fWhat & CPUMCTX_EXTRN_CR_MASK)
798 {
799 if (fWhat & CPUMCTX_EXTRN_CR0)
800 {
801 Assert(aenmNames[iReg] == WHvX64RegisterCr0);
802 if (pCtx->cr0 != aValues[iReg].Reg64)
803 {
804 CPUMSetGuestCR0(pVCpu, aValues[iReg].Reg64);
805 fMaybeChangedMode = true;
806 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
807 }
808 iReg++;
809 }
810 if (fWhat & CPUMCTX_EXTRN_CR2)
811 GET_REG64(pCtx->cr2, WHvX64RegisterCr2);
812 if (fWhat & CPUMCTX_EXTRN_CR3)
813 {
814 if (pCtx->cr3 != aValues[iReg].Reg64)
815 {
816 CPUMSetGuestCR3(pVCpu, aValues[iReg].Reg64);
817 fFlushTlb = true;
818 }
819 iReg++;
820 }
821 if (fWhat & CPUMCTX_EXTRN_CR4)
822 {
823 if (pCtx->cr4 != aValues[iReg].Reg64)
824 {
825 CPUMSetGuestCR4(pVCpu, aValues[iReg].Reg64);
826 fMaybeChangedMode = true;
827 fFlushTlb = fFlushGlobalTlb = true; /// @todo fix this
828 }
829 iReg++;
830 }
831 }
832
833 /// @todo CR8/TPR
834 Assert(aenmNames[iReg] == WHvX64RegisterCr8);
835 APICSetTpr(pVCpu, (uint8_t)aValues[iReg].Reg64 << 4);
836 iReg++;
837
838 /* Debug registers. */
839 /** @todo fixme */
840 if (fWhat & CPUMCTX_EXTRN_DR0_DR3)
841 {
842 Assert(aenmNames[iReg] == WHvX64RegisterDr0);
843 Assert(aenmNames[iReg+3] == WHvX64RegisterDr3);
844 if (pCtx->dr[0] != aValues[iReg].Reg64)
845 CPUMSetGuestDR0(pVCpu, aValues[iReg].Reg64);
846 iReg++;
847 if (pCtx->dr[1] != aValues[iReg].Reg64)
848 CPUMSetGuestDR1(pVCpu, aValues[iReg].Reg64);
849 iReg++;
850 if (pCtx->dr[2] != aValues[iReg].Reg64)
851 CPUMSetGuestDR2(pVCpu, aValues[iReg].Reg64);
852 iReg++;
853 if (pCtx->dr[3] != aValues[iReg].Reg64)
854 CPUMSetGuestDR3(pVCpu, aValues[iReg].Reg64);
855 iReg++;
856 }
857 if (fWhat & CPUMCTX_EXTRN_DR6)
858 {
859 Assert(aenmNames[iReg] == WHvX64RegisterDr6);
860 if (pCtx->dr[6] != aValues[iReg].Reg64)
861 CPUMSetGuestDR6(pVCpu, aValues[iReg].Reg64);
862 iReg++;
863 }
864 if (fWhat & CPUMCTX_EXTRN_DR7)
865 {
866 Assert(aenmNames[iReg] == WHvX64RegisterDr7);
867 if (pCtx->dr[7] != aValues[iReg].Reg64)
868 CPUMSetGuestDR7(pVCpu, aValues[iReg].Reg64);
869 iReg++;
870 }
871
872 /* Floating point state. */
873 if (fWhat & CPUMCTX_EXTRN_X87)
874 {
875 GET_REG128(pCtx->pXStateR3->x87.aRegs[0].au64[0], pCtx->pXStateR3->x87.aRegs[0].au64[1], WHvX64RegisterFpMmx0);
876 GET_REG128(pCtx->pXStateR3->x87.aRegs[1].au64[0], pCtx->pXStateR3->x87.aRegs[1].au64[1], WHvX64RegisterFpMmx1);
877 GET_REG128(pCtx->pXStateR3->x87.aRegs[2].au64[0], pCtx->pXStateR3->x87.aRegs[2].au64[1], WHvX64RegisterFpMmx2);
878 GET_REG128(pCtx->pXStateR3->x87.aRegs[3].au64[0], pCtx->pXStateR3->x87.aRegs[3].au64[1], WHvX64RegisterFpMmx3);
879 GET_REG128(pCtx->pXStateR3->x87.aRegs[4].au64[0], pCtx->pXStateR3->x87.aRegs[4].au64[1], WHvX64RegisterFpMmx4);
880 GET_REG128(pCtx->pXStateR3->x87.aRegs[5].au64[0], pCtx->pXStateR3->x87.aRegs[5].au64[1], WHvX64RegisterFpMmx5);
881 GET_REG128(pCtx->pXStateR3->x87.aRegs[6].au64[0], pCtx->pXStateR3->x87.aRegs[6].au64[1], WHvX64RegisterFpMmx6);
882 GET_REG128(pCtx->pXStateR3->x87.aRegs[7].au64[0], pCtx->pXStateR3->x87.aRegs[7].au64[1], WHvX64RegisterFpMmx7);
883
884 Assert(aenmNames[iReg] == WHvX64RegisterFpControlStatus);
885 pCtx->pXStateR3->x87.FCW = aValues[iReg].FpControlStatus.FpControl;
886 pCtx->pXStateR3->x87.FSW = aValues[iReg].FpControlStatus.FpStatus;
887 pCtx->pXStateR3->x87.FTW = aValues[iReg].FpControlStatus.FpTag
888 /*| (aValues[iReg].FpControlStatus.Reserved << 8)*/;
889 pCtx->pXStateR3->x87.FOP = aValues[iReg].FpControlStatus.LastFpOp;
890 pCtx->pXStateR3->x87.FPUIP = (uint32_t)aValues[iReg].FpControlStatus.LastFpRip;
891 pCtx->pXStateR3->x87.CS = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 32);
892 pCtx->pXStateR3->x87.Rsrvd1 = (uint16_t)(aValues[iReg].FpControlStatus.LastFpRip >> 48);
893 iReg++;
894 }
895
896 if (fWhat & (CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX))
897 {
898 Assert(aenmNames[iReg] == WHvX64RegisterXmmControlStatus);
899 if (fWhat & CPUMCTX_EXTRN_X87)
900 {
901 pCtx->pXStateR3->x87.FPUDP = (uint32_t)aValues[iReg].XmmControlStatus.LastFpRdp;
902 pCtx->pXStateR3->x87.DS = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 32);
903 pCtx->pXStateR3->x87.Rsrvd2 = (uint16_t)(aValues[iReg].XmmControlStatus.LastFpRdp >> 48);
904 }
905 pCtx->pXStateR3->x87.MXCSR = aValues[iReg].XmmControlStatus.XmmStatusControl;
906 pCtx->pXStateR3->x87.MXCSR_MASK = aValues[iReg].XmmControlStatus.XmmStatusControlMask; /** @todo ??? (Isn't this an output field?) */
907 iReg++;
908 }
909
910 /* Vector state. */
911 if (fWhat & CPUMCTX_EXTRN_SSE_AVX)
912 {
913 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 0].uXmm.s.Hi, WHvX64RegisterXmm0);
914 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 1].uXmm.s.Hi, WHvX64RegisterXmm1);
915 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 2].uXmm.s.Hi, WHvX64RegisterXmm2);
916 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 3].uXmm.s.Hi, WHvX64RegisterXmm3);
917 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 4].uXmm.s.Hi, WHvX64RegisterXmm4);
918 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 5].uXmm.s.Hi, WHvX64RegisterXmm5);
919 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 6].uXmm.s.Hi, WHvX64RegisterXmm6);
920 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 7].uXmm.s.Hi, WHvX64RegisterXmm7);
921 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 8].uXmm.s.Hi, WHvX64RegisterXmm8);
922 GET_REG128(pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[ 9].uXmm.s.Hi, WHvX64RegisterXmm9);
923 GET_REG128(pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[10].uXmm.s.Hi, WHvX64RegisterXmm10);
924 GET_REG128(pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[11].uXmm.s.Hi, WHvX64RegisterXmm11);
925 GET_REG128(pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[12].uXmm.s.Hi, WHvX64RegisterXmm12);
926 GET_REG128(pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[13].uXmm.s.Hi, WHvX64RegisterXmm13);
927 GET_REG128(pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[14].uXmm.s.Hi, WHvX64RegisterXmm14);
928 GET_REG128(pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Lo, pCtx->pXStateR3->x87.aXMM[15].uXmm.s.Hi, WHvX64RegisterXmm15);
929 }
930
931 /* MSRs */
932 // WHvX64RegisterTsc - don't touch
933 if (fWhat & CPUMCTX_EXTRN_EFER)
934 {
935 Assert(aenmNames[iReg] == WHvX64RegisterEfer);
936 if (aValues[iReg].Reg64 != pCtx->msrEFER)
937 {
938 Log7(("NEM/%u: MSR EFER changed %RX64 -> %RX64\n", pVCpu->idCpu, pCtx->msrEFER, aValues[iReg].Reg64));
939 if ((aValues[iReg].Reg64 ^ pCtx->msrEFER) & MSR_K6_EFER_NXE)
940 PGMNotifyNxeChanged(pVCpu, RT_BOOL(aValues[iReg].Reg64 & MSR_K6_EFER_NXE));
941 pCtx->msrEFER = aValues[iReg].Reg64;
942 fMaybeChangedMode = true;
943 }
944 iReg++;
945 }
946 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)
947 GET_REG64_LOG7(pCtx->msrKERNELGSBASE, WHvX64RegisterKernelGsBase, "MSR KERNEL_GS_BASE");
948 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)
949 {
950 GET_REG64_LOG7(pCtx->SysEnter.cs, WHvX64RegisterSysenterCs, "MSR SYSENTER.CS");
951 GET_REG64_LOG7(pCtx->SysEnter.eip, WHvX64RegisterSysenterEip, "MSR SYSENTER.EIP");
952 GET_REG64_LOG7(pCtx->SysEnter.esp, WHvX64RegisterSysenterEsp, "MSR SYSENTER.ESP");
953 }
954 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)
955 {
956 GET_REG64_LOG7(pCtx->msrSTAR, WHvX64RegisterStar, "MSR STAR");
957 GET_REG64_LOG7(pCtx->msrLSTAR, WHvX64RegisterLstar, "MSR LSTAR");
958 GET_REG64_LOG7(pCtx->msrCSTAR, WHvX64RegisterCstar, "MSR CSTAR");
959 GET_REG64_LOG7(pCtx->msrSFMASK, WHvX64RegisterSfmask, "MSR SFMASK");
960 }
961 if (fWhat & CPUMCTX_EXTRN_OTHER_MSRS)
962 {
963 Assert(aenmNames[iReg] == WHvX64RegisterApicBase);
964 const uint64_t uOldBase = APICGetBaseMsrNoCheck(pVCpu);
965 if (aValues[iReg].Reg64 != uOldBase)
966 {
967 Log7(("NEM/%u: MSR APICBase changed %RX64 -> %RX64 (%RX64)\n",
968 pVCpu->idCpu, uOldBase, aValues[iReg].Reg64, aValues[iReg].Reg64 ^ uOldBase));
969 VBOXSTRICTRC rc2 = APICSetBaseMsr(pVCpu, aValues[iReg].Reg64);
970 AssertLogRelMsg(rc2 == VINF_SUCCESS, ("%Rrc %RX64\n", VBOXSTRICTRC_VAL(rc2), aValues[iReg].Reg64));
971 }
972 iReg++;
973
974 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterPat, "MSR PAT");
975#if 0 /*def LOG_ENABLED*/ /** @todo something's wrong with HvX64RegisterMtrrCap? (AMD) */
976 GET_REG64_LOG7(pCtx->msrPAT, WHvX64RegisterMsrMtrrCap);
977#endif
978 PCPUMCTXMSRS pCtxMsrs = CPUMQueryGuestCtxMsrsPtr(pVCpu);
979 GET_REG64_LOG7(pCtxMsrs->msr.MtrrDefType, WHvX64RegisterMsrMtrrDefType, "MSR MTRR_DEF_TYPE");
980 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix64K_00000, WHvX64RegisterMsrMtrrFix64k00000, "MSR MTRR_FIX_64K_00000");
981 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_80000, WHvX64RegisterMsrMtrrFix16k80000, "MSR MTRR_FIX_16K_80000");
982 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix16K_A0000, WHvX64RegisterMsrMtrrFix16kA0000, "MSR MTRR_FIX_16K_A0000");
983 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C0000, WHvX64RegisterMsrMtrrFix4kC0000, "MSR MTRR_FIX_4K_C0000");
984 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_C8000, WHvX64RegisterMsrMtrrFix4kC8000, "MSR MTRR_FIX_4K_C8000");
985 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D0000, WHvX64RegisterMsrMtrrFix4kD0000, "MSR MTRR_FIX_4K_D0000");
986 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_D8000, WHvX64RegisterMsrMtrrFix4kD8000, "MSR MTRR_FIX_4K_D8000");
987 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E0000, WHvX64RegisterMsrMtrrFix4kE0000, "MSR MTRR_FIX_4K_E0000");
988 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_E8000, WHvX64RegisterMsrMtrrFix4kE8000, "MSR MTRR_FIX_4K_E8000");
989 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F0000, WHvX64RegisterMsrMtrrFix4kF0000, "MSR MTRR_FIX_4K_F0000");
990 GET_REG64_LOG7(pCtxMsrs->msr.MtrrFix4K_F8000, WHvX64RegisterMsrMtrrFix4kF8000, "MSR MTRR_FIX_4K_F8000");
991 GET_REG64_LOG7(pCtxMsrs->msr.TscAux, WHvX64RegisterTscAux, "MSR TSC_AUX");
992 /** @todo look for HvX64RegisterIa32MiscEnable and HvX64RegisterIa32FeatureControl? */
993 }
994
995 /* Interruptibility. */
996 if (fWhat & (CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
997 {
998 Assert(aenmNames[iReg] == WHvRegisterInterruptState);
999 Assert(aenmNames[iReg + 1] == WHvX64RegisterRip);
1000
1001 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1002 {
1003 pVCpu->nem.s.fLastInterruptShadow = aValues[iReg].InterruptState.InterruptShadow;
1004 if (aValues[iReg].InterruptState.InterruptShadow)
1005 EMSetInhibitInterruptsPC(pVCpu, aValues[iReg + 1].Reg64);
1006 else
1007 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1008 }
1009
1010 if (!(pCtx->fExtrn & CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI))
1011 {
1012 if (aValues[iReg].InterruptState.NmiMasked)
1013 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
1014 else
1015 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
1016 }
1017
1018 fWhat |= CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI;
1019 iReg += 2;
1020 }
1021
1022 /* Event injection. */
1023 /// @todo WHvRegisterPendingInterruption
1024 Assert(aenmNames[iReg] == WHvRegisterPendingInterruption);
1025 if (aValues[iReg].PendingInterruption.InterruptionPending)
1026 {
1027 Log7(("PendingInterruption: type=%u vector=%#x errcd=%RTbool/%#x instr-len=%u nested=%u\n",
1028 aValues[iReg].PendingInterruption.InterruptionType, aValues[iReg].PendingInterruption.InterruptionVector,
1029 aValues[iReg].PendingInterruption.DeliverErrorCode, aValues[iReg].PendingInterruption.ErrorCode,
1030 aValues[iReg].PendingInterruption.InstructionLength, aValues[iReg].PendingInterruption.NestedEvent));
1031 AssertMsg((aValues[iReg].PendingInterruption.AsUINT64 & UINT64_C(0xfc00)) == 0,
1032 ("%#RX64\n", aValues[iReg].PendingInterruption.AsUINT64));
1033 }
1034
1035 /// @todo WHvRegisterPendingEvent0
1036 /// @todo WHvRegisterPendingEvent1
1037
1038 /* Almost done, just update extrn flags and maybe change PGM mode. */
1039 pCtx->fExtrn &= ~fWhat;
1040
1041 /* Typical. */
1042 if (!fMaybeChangedMode && !fFlushTlb)
1043 return VINF_SUCCESS;
1044
1045 /*
1046 * Slow.
1047 */
1048 if (fMaybeChangedMode)
1049 {
1050 int rc = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
1051 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
1052 }
1053
1054 if (fFlushTlb)
1055 {
1056 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, fFlushGlobalTlb);
1057 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc));
1058 }
1059
1060 return VINF_SUCCESS;
1061# endif /* !NEM_WIN_USE_HYPERCALLS_FOR_REGISTERS */
1062}
1063
1064#endif /* !IN_RING0 */
1065
1066
1067#ifdef LOG_ENABLED
1068/**
1069 * Get the virtual processor running status.
1070 */
1071DECLINLINE(VID_PROCESSOR_STATUS) nemHCWinCpuGetRunningStatus(PVMCPU pVCpu)
1072{
1073# ifdef IN_RING0
1074 NOREF(pVCpu);
1075 return VidProcessorStatusUndefined;
1076# else
1077 RTERRVARS Saved;
1078 RTErrVarsSave(&Saved);
1079
1080 /*
1081 * This API is disabled in release builds, it seems. On build 17101 it requires
1082 * the following patch to be enabled (windbg): eb vid+12180 0f 84 98 00 00 00
1083 */
1084 VID_PROCESSOR_STATUS enmCpuStatus = VidProcessorStatusUndefined;
1085 NTSTATUS rcNt = g_pfnVidGetVirtualProcessorRunningStatus(pVCpu->pVMR3->nem.s.hPartitionDevice, pVCpu->idCpu, &enmCpuStatus);
1086 AssertRC(rcNt);
1087
1088 RTErrVarsRestore(&Saved);
1089 return enmCpuStatus;
1090# endif
1091}
1092#endif
1093
1094
1095#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1096# ifdef IN_RING3 /* hopefully not needed in ring-0, as we'd need KTHREADs and KeAlertThread. */
1097/**
1098 * Our own WHvCancelRunVirtualProcessor that can later be moved to ring-0.
1099 *
1100 * This is an experiment only.
1101 *
1102 * @returns VBox status code.
1103 * @param pVM The cross context VM structure.
1104 * @param pVCpu The cross context virtual CPU structure of the
1105 * calling EMT.
1106 */
1107NEM_TMPL_STATIC int nemHCWinCancelRunVirtualProcessor(PVM pVM, PVMCPU pVCpu)
1108{
1109 /*
1110 * Work the state.
1111 *
1112 * From the looks of things, we should let the EMT call VidStopVirtualProcessor.
1113 * So, we just need to modify the state and kick the EMT if it's waiting on
1114 * messages. For the latter we use QueueUserAPC / KeAlterThread.
1115 */
1116 for (;;)
1117 {
1118 VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu);
1119 switch (enmState)
1120 {
1121 case VMCPUSTATE_STARTED_EXEC_NEM:
1122 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM))
1123 {
1124 Log8(("nemHCWinCancelRunVirtualProcessor: Switched %u to canceled state\n", pVCpu->idCpu));
1125 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelChangedState);
1126 return VINF_SUCCESS;
1127 }
1128 break;
1129
1130 case VMCPUSTATE_STARTED_EXEC_NEM_WAIT:
1131 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED, VMCPUSTATE_STARTED_EXEC_NEM_WAIT))
1132 {
1133# ifdef IN_RING0
1134 NTSTATUS rcNt = KeAlertThread(??);
1135# else
1136 NTSTATUS rcNt = NtAlertThread(pVCpu->nem.s.hNativeThreadHandle);
1137# endif
1138 Log8(("nemHCWinCancelRunVirtualProcessor: Alerted %u: %#x\n", pVCpu->idCpu, rcNt));
1139 Assert(rcNt == STATUS_SUCCESS);
1140 if (NT_SUCCESS(rcNt))
1141 {
1142 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatCancelAlertedThread);
1143 return VINF_SUCCESS;
1144 }
1145 AssertLogRelMsgFailedReturn(("NtAlertThread failed: %#x\n", rcNt), RTErrConvertFromNtStatus(rcNt));
1146 }
1147 break;
1148
1149 default:
1150 return VINF_SUCCESS;
1151 }
1152
1153 ASMNopPause();
1154 RT_NOREF(pVM);
1155 }
1156}
1157# endif /* IN_RING3 */
1158#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
1159
1160
1161#ifdef LOG_ENABLED
1162/**
1163 * Logs the current CPU state.
1164 */
1165NEM_TMPL_STATIC void nemHCWinLogState(PVM pVM, PVMCPU pVCpu)
1166{
1167 if (LogIs3Enabled())
1168 {
1169# ifdef IN_RING3
1170 char szRegs[4096];
1171 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
1172 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
1173 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
1174 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
1175 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
1176 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
1177 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
1178 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
1179 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
1180 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
1181 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
1182 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
1183 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
1184 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
1185 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
1186 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
1187 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
1188 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
1189 " efer=%016VR{efer}\n"
1190 " pat=%016VR{pat}\n"
1191 " sf_mask=%016VR{sf_mask}\n"
1192 "krnl_gs_base=%016VR{krnl_gs_base}\n"
1193 " lstar=%016VR{lstar}\n"
1194 " star=%016VR{star} cstar=%016VR{cstar}\n"
1195 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
1196 );
1197
1198 char szInstr[256];
1199 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
1200 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1201 szInstr, sizeof(szInstr), NULL);
1202 Log3(("%s%s\n", szRegs, szInstr));
1203# else
1204 /** @todo stat logging in ring-0 */
1205 RT_NOREF(pVM, pVCpu);
1206# endif
1207 }
1208}
1209#endif /* LOG_ENABLED */
1210
1211
1212#ifdef LOG_ENABLED
1213/** Macro used by nemHCWinExecStateToLogStr and nemR3WinExecStateToLogStr. */
1214# define SWITCH_IT(a_szPrefix) \
1215 do \
1216 switch (u)\
1217 { \
1218 case 0x00: return a_szPrefix ""; \
1219 case 0x01: return a_szPrefix ",Pnd"; \
1220 case 0x02: return a_szPrefix ",Dbg"; \
1221 case 0x03: return a_szPrefix ",Pnd,Dbg"; \
1222 case 0x04: return a_szPrefix ",Shw"; \
1223 case 0x05: return a_szPrefix ",Pnd,Shw"; \
1224 case 0x06: return a_szPrefix ",Shw,Dbg"; \
1225 case 0x07: return a_szPrefix ",Pnd,Shw,Dbg"; \
1226 default: AssertFailedReturn("WTF?"); \
1227 } \
1228 while (0)
1229
1230# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1231/**
1232 * Translates the execution stat bitfield into a short log string, VID version.
1233 *
1234 * @returns Read-only log string.
1235 * @param pMsgHdr The header which state to summarize.
1236 */
1237static const char *nemHCWinExecStateToLogStr(HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1238{
1239 unsigned u = (unsigned)pMsgHdr->ExecutionState.InterruptionPending
1240 | ((unsigned)pMsgHdr->ExecutionState.DebugActive << 1)
1241 | ((unsigned)pMsgHdr->ExecutionState.InterruptShadow << 2);
1242 if (pMsgHdr->ExecutionState.EferLma)
1243 SWITCH_IT("LM");
1244 else if (pMsgHdr->ExecutionState.Cr0Pe)
1245 SWITCH_IT("PM");
1246 else
1247 SWITCH_IT("RM");
1248}
1249# elif defined(IN_RING3)
1250/**
1251 * Translates the execution stat bitfield into a short log string, WinHv version.
1252 *
1253 * @returns Read-only log string.
1254 * @param pExitCtx The exit context which state to summarize.
1255 */
1256static const char *nemR3WinExecStateToLogStr(WHV_VP_EXIT_CONTEXT const *pExitCtx)
1257{
1258 unsigned u = (unsigned)pExitCtx->ExecutionState.InterruptionPending
1259 | ((unsigned)pExitCtx->ExecutionState.DebugActive << 1)
1260 | ((unsigned)pExitCtx->ExecutionState.InterruptShadow << 2);
1261 if (pExitCtx->ExecutionState.EferLma)
1262 SWITCH_IT("LM");
1263 else if (pExitCtx->ExecutionState.Cr0Pe)
1264 SWITCH_IT("PM");
1265 else
1266 SWITCH_IT("RM");
1267}
1268# endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1269# undef SWITCH_IT
1270#endif /* LOG_ENABLED */
1271
1272
1273#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1274/**
1275 * Advances the guest RIP and clear EFLAGS.RF, VID version.
1276 *
1277 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1278 *
1279 * @param pVCpu The cross context virtual CPU structure.
1280 * @param pCtx The CPU context to update.
1281 * @param pExitCtx The exit context.
1282 */
1283DECLINLINE(void) nemHCWinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr)
1284{
1285 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1286
1287 /* Advance the RIP. */
1288 Assert(pMsgHdr->InstructionLength > 0 && pMsgHdr->InstructionLength < 16);
1289 pCtx->rip += pMsgHdr->InstructionLength;
1290 pCtx->rflags.Bits.u1RF = 0;
1291
1292 /* Update interrupt inhibition. */
1293 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1294 { /* likely */ }
1295 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1296 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1297}
1298#elif defined(IN_RING3)
1299/**
1300 * Advances the guest RIP and clear EFLAGS.RF, WinHv version.
1301 *
1302 * This may clear VMCPU_FF_INHIBIT_INTERRUPTS.
1303 *
1304 * @param pVCpu The cross context virtual CPU structure.
1305 * @param pCtx The CPU context to update.
1306 * @param pExitCtx The exit context.
1307 */
1308DECLINLINE(void) nemR3WinAdvanceGuestRipAndClearRF(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1309{
1310 Assert(!(pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)));
1311
1312 /* Advance the RIP. */
1313 Assert(pExitCtx->InstructionLength > 0 && pExitCtx->InstructionLength < 16);
1314 pCtx->rip += pExitCtx->InstructionLength;
1315 pCtx->rflags.Bits.u1RF = 0;
1316
1317 /* Update interrupt inhibition. */
1318 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1319 { /* likely */ }
1320 else if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
1321 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1322}
1323#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1324
1325
1326
1327NEM_TMPL_STATIC DECLCALLBACK(int)
1328nemHCWinUnmapOnePageCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint8_t *pu2NemState, void *pvUser)
1329{
1330 RT_NOREF_PV(pvUser);
1331#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1332 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1333 AssertRC(rc);
1334 if (RT_SUCCESS(rc))
1335#else
1336 RT_NOREF_PV(pVCpu);
1337 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1338 if (SUCCEEDED(hrc))
1339#endif
1340 {
1341 Log5(("NEM GPA unmap all: %RGp (cMappedPages=%u)\n", GCPhys, pVM->nem.s.cMappedPages - 1));
1342 *pu2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1343 }
1344 else
1345 {
1346#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1347 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
1348#else
1349 LogRel(("nemR3WinUnmapOnePageCallback: GCPhys=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1350 GCPhys, g_apszPageStates[*pu2NemState], hrc, hrc, RTNtLastStatusValue(),
1351 RTNtLastErrorValue(), pVM->nem.s.cMappedPages));
1352#endif
1353 *pu2NemState = NEM_WIN_PAGE_STATE_NOT_SET;
1354 }
1355 if (pVM->nem.s.cMappedPages > 0)
1356 ASMAtomicDecU32(&pVM->nem.s.cMappedPages);
1357 return VINF_SUCCESS;
1358}
1359
1360
1361/**
1362 * State to pass between nemHCWinHandleMemoryAccess / nemR3WinWHvHandleMemoryAccess
1363 * and nemHCWinHandleMemoryAccessPageCheckerCallback.
1364 */
1365typedef struct NEMHCWINHMACPCCSTATE
1366{
1367 /** Input: Write access. */
1368 bool fWriteAccess;
1369 /** Output: Set if we did something. */
1370 bool fDidSomething;
1371 /** Output: Set it we should resume. */
1372 bool fCanResume;
1373} NEMHCWINHMACPCCSTATE;
1374
1375/**
1376 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE,
1377 * Worker for nemR3WinHandleMemoryAccess; pvUser points to a
1378 * NEMHCWINHMACPCCSTATE structure. }
1379 */
1380NEM_TMPL_STATIC DECLCALLBACK(int)
1381nemHCWinHandleMemoryAccessPageCheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
1382{
1383 NEMHCWINHMACPCCSTATE *pState = (NEMHCWINHMACPCCSTATE *)pvUser;
1384 pState->fDidSomething = false;
1385 pState->fCanResume = false;
1386
1387 /* If A20 is disabled, we may need to make another query on the masked
1388 page to get the correct protection information. */
1389 uint8_t u2State = pInfo->u2NemState;
1390 RTGCPHYS GCPhysSrc;
1391 if ( pVM->nem.s.fA20Enabled
1392 || !NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
1393 GCPhysSrc = GCPhys;
1394 else
1395 {
1396 GCPhysSrc = GCPhys & ~(RTGCPHYS)RT_BIT_32(20);
1397 PGMPHYSNEMPAGEINFO Info2;
1398 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhysSrc, pState->fWriteAccess, &Info2, NULL, NULL);
1399 AssertRCReturn(rc, rc);
1400
1401 *pInfo = Info2;
1402 pInfo->u2NemState = u2State;
1403 }
1404
1405 /*
1406 * Consolidate current page state with actual page protection and access type.
1407 * We don't really consider downgrades here, as they shouldn't happen.
1408 */
1409#ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1410 /** @todo Someone at microsoft please explain:
1411 * I'm not sure WTF was going on, but I ended up in a loop if I remapped a
1412 * readonly page as writable (unmap, then map again). Specifically, this was an
1413 * issue with the big VRAM mapping at 0xe0000000 when booing DSL 4.4.1. So, in
1414 * a hope to work around that we no longer pre-map anything, just unmap stuff
1415 * and do it lazily here. And here we will first unmap, restart, and then remap
1416 * with new protection or backing.
1417 */
1418#endif
1419 int rc;
1420 switch (u2State)
1421 {
1422 case NEM_WIN_PAGE_STATE_UNMAPPED:
1423 case NEM_WIN_PAGE_STATE_NOT_SET:
1424 if (pInfo->fNemProt == NEM_PAGE_PROT_NONE)
1425 {
1426 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1\n", GCPhys));
1427 return VINF_SUCCESS;
1428 }
1429
1430 /* Don't bother remapping it if it's a write request to a non-writable page. */
1431 if ( pState->fWriteAccess
1432 && !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE))
1433 {
1434 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #1w\n", GCPhys));
1435 return VINF_SUCCESS;
1436 }
1437
1438 /* Map the page. */
1439 rc = nemHCNativeSetPhysPage(pVM,
1440 pVCpu,
1441 GCPhysSrc & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1442 GCPhys & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK,
1443 pInfo->fNemProt,
1444 &u2State,
1445 true /*fBackingState*/);
1446 pInfo->u2NemState = u2State;
1447 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - synced => %s + %Rrc\n",
1448 GCPhys, g_apszPageStates[u2State], rc));
1449 pState->fDidSomething = true;
1450 pState->fCanResume = true;
1451 return rc;
1452
1453 case NEM_WIN_PAGE_STATE_READABLE:
1454 if ( !(pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1455 && (pInfo->fNemProt & (NEM_PAGE_PROT_READ | NEM_PAGE_PROT_EXECUTE)))
1456 {
1457 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #2\n", GCPhys));
1458 return VINF_SUCCESS;
1459 }
1460
1461#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1462 /* Upgrade page to writable. */
1463/** @todo test this*/
1464 if ( (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1465 && pState->fWriteAccess)
1466 {
1467 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhys,
1468 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
1469 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
1470 AssertRC(rc);
1471 if (RT_SUCCESS(rc))
1472 {
1473 pInfo->u2NemState = NEM_WIN_PAGE_STATE_WRITABLE;
1474 pState->fDidSomething = true;
1475 pState->fCanResume = true;
1476 Log5(("NEM GPA write-upgrade/exit: %RGp (was %s, cMappedPages=%u)\n",
1477 GCPhys, g_apszPageStates[u2State], pVM->nem.s.cMappedPages));
1478 }
1479 }
1480 else
1481 {
1482 /* Need to emulate the acces. */
1483 AssertBreak(pInfo->fNemProt != NEM_PAGE_PROT_NONE); /* There should be no downgrades. */
1484 rc = VINF_SUCCESS;
1485 }
1486 return rc;
1487#else
1488 break;
1489#endif
1490
1491 case NEM_WIN_PAGE_STATE_WRITABLE:
1492 if (pInfo->fNemProt & NEM_PAGE_PROT_WRITE)
1493 {
1494 Log4(("nemHCWinHandleMemoryAccessPageCheckerCallback: %RGp - #3\n", GCPhys));
1495 return VINF_SUCCESS;
1496 }
1497#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1498 AssertFailed(); /* There should be no downgrades. */
1499#endif
1500 break;
1501
1502 default:
1503 AssertLogRelMsgFailedReturn(("u2State=%#x\n", u2State), VERR_NEM_IPE_4);
1504 }
1505
1506 /*
1507 * Unmap and restart the instruction.
1508 * If this fails, which it does every so often, just unmap everything for now.
1509 */
1510#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1511 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
1512 AssertRC(rc);
1513 if (RT_SUCCESS(rc))
1514#else
1515 /** @todo figure out whether we mess up the state or if it's WHv. */
1516 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
1517 if (SUCCEEDED(hrc))
1518#endif
1519 {
1520 pState->fDidSomething = true;
1521 pState->fCanResume = true;
1522 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1523 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
1524 Log5(("NEM GPA unmapped/exit: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[u2State], cMappedPages));
1525 return VINF_SUCCESS;
1526 }
1527#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
1528 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhys, rc));
1529 return rc;
1530#else
1531 LogRel(("nemHCWinHandleMemoryAccessPageCheckerCallback/unmap: GCPhysDst=%RGp %s hrc=%Rhrc (%#x) Last=%#x/%u (cMappedPages=%u)\n",
1532 GCPhys, g_apszPageStates[u2State], hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue(),
1533 pVM->nem.s.cMappedPages));
1534
1535 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinUnmapOnePageCallback, NULL);
1536 Log(("nemHCWinHandleMemoryAccessPageCheckerCallback: Unmapped all (cMappedPages=%u)\n", pVM->nem.s.cMappedPages));
1537
1538 pState->fDidSomething = true;
1539 pState->fCanResume = true;
1540 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
1541 return VINF_SUCCESS;
1542#endif
1543}
1544
1545
1546
1547#if defined(IN_RING0) && defined(NEM_WIN_USE_OUR_OWN_RUN_API)
1548/**
1549 * Wrapper around nemR0WinImportState that converts VERR_NEM_CHANGE_PGM_MODE and
1550 * VERR_NEM_FLUSH_TBL into informational status codes and logs+asserts statuses.
1551 *
1552 * @returns VBox strict status code.
1553 * @param pGVM The global (ring-0) VM structure.
1554 * @param pGVCpu The global (ring-0) per CPU structure.
1555 * @param pCtx The CPU context to import into.
1556 * @param fWhat What to import.
1557 * @param pszCaller Who is doing the importing.
1558 */
1559DECLINLINE(VBOXSTRICTRC) nemR0WinImportStateStrict(PGVM pGVM, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint64_t fWhat, const char *pszCaller)
1560{
1561 int rc = nemR0WinImportState(pGVM, pGVCpu, pCtx, fWhat);
1562 if (RT_SUCCESS(rc))
1563 {
1564 Assert(rc == VINF_SUCCESS);
1565 return VINF_SUCCESS;
1566 }
1567
1568 if (rc == VERR_NEM_CHANGE_PGM_MODE || rc == VERR_NEM_FLUSH_TLB || rc == VERR_NEM_UPDATE_APIC_BASE)
1569 {
1570 Log4(("%s/%u: nemR0WinImportState -> %Rrc\n", pszCaller, pGVCpu->idCpu, -rc));
1571 return -rc;
1572 }
1573 RT_NOREF(pszCaller);
1574 AssertMsgFailedReturn(("%s/%u: nemR0WinImportState failed: %Rrc\n", pszCaller, pGVCpu->idCpu, rc), rc);
1575}
1576#endif /* IN_RING0 && NEM_WIN_USE_OUR_OWN_RUN_API*/
1577
1578#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
1579/**
1580 * Wrapper around nemR0WinImportStateStrict and nemHCWinCopyStateFromHyperV.
1581 *
1582 * Unlike the wrapped APIs, this checks whether it's necessary.
1583 *
1584 * @returns VBox strict status code.
1585 * @param pGVM The global (ring-0) VM structure.
1586 * @param pGVCpu The global (ring-0) per CPU structure.
1587 * @param pCtx The CPU context to import into.
1588 * @param fWhat What to import.
1589 * @param pszCaller Who is doing the importing.
1590 */
1591DECLINLINE(VBOXSTRICTRC) nemHCWinImportStateIfNeededStrict(PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx,
1592 uint64_t fWhat, const char *pszCaller)
1593{
1594 if (pCtx->fExtrn & fWhat)
1595 {
1596#ifdef IN_RING0
1597 RT_NOREF(pVCpu);
1598 return nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, fWhat, pszCaller);
1599#else
1600 RT_NOREF(pGVCpu, pszCaller);
1601 int rc = nemHCWinCopyStateFromHyperV(pVCpu->pVMR3, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
1602 AssertRCReturn(rc, rc);
1603#endif
1604 }
1605 return VINF_SUCCESS;
1606}
1607#endif /* NEM_WIN_USE_OUR_OWN_RUN_API || IN_RING3 */
1608
1609#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1610/**
1611 * Copies register state from the X64 intercept message header.
1612 *
1613 * ASSUMES no state copied yet.
1614 *
1615 * @param pVCpu The cross context per CPU structure.
1616 * @param pCtx The registe rcontext.
1617 * @param pHdr The X64 intercept message header.
1618 * @sa nemR3WinCopyStateFromX64Header
1619 */
1620DECLINLINE(void) nemHCWinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, HV_X64_INTERCEPT_MESSAGE_HEADER const *pHdr)
1621{
1622 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1623 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1624 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pHdr->CsSegment);
1625 pCtx->rip = pHdr->Rip;
1626 pCtx->rflags.u = pHdr->Rflags;
1627
1628 pVCpu->nem.s.fLastInterruptShadow = pHdr->ExecutionState.InterruptShadow;
1629 if (!pHdr->ExecutionState.InterruptShadow)
1630 {
1631 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1632 { /* likely */ }
1633 else
1634 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1635 }
1636 else
1637 EMSetInhibitInterruptsPC(pVCpu, pHdr->Rip);
1638
1639 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1640}
1641#elif defined(IN_RING3)
1642/**
1643 * Copies register state from the (common) exit context.
1644 *
1645 * ASSUMES no state copied yet.
1646 *
1647 * @param pVCpu The cross context per CPU structure.
1648 * @param pCtx The registe rcontext.
1649 * @param pExitCtx The common exit context.
1650 * @sa nemHCWinCopyStateFromX64Header
1651 */
1652DECLINLINE(void) nemR3WinCopyStateFromX64Header(PVMCPU pVCpu, PCPUMCTX pCtx, WHV_VP_EXIT_CONTEXT const *pExitCtx)
1653{
1654 Assert( (pCtx->fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT))
1655 == (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT));
1656 NEM_WIN_COPY_BACK_SEG(pCtx->cs, pExitCtx->Cs);
1657 pCtx->rip = pExitCtx->Rip;
1658 pCtx->rflags.u = pExitCtx->Rflags;
1659
1660 pVCpu->nem.s.fLastInterruptShadow = pExitCtx->ExecutionState.InterruptShadow;
1661 if (!pExitCtx->ExecutionState.InterruptShadow)
1662 {
1663 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1664 { /* likely */ }
1665 else
1666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1667 }
1668 else
1669 EMSetInhibitInterruptsPC(pVCpu, pExitCtx->Rip);
1670
1671 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT);
1672}
1673#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1674
1675
1676#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1677/**
1678 * Deals with memory intercept message.
1679 *
1680 * @returns Strict VBox status code.
1681 * @param pVM The cross context VM structure.
1682 * @param pVCpu The cross context per CPU structure.
1683 * @param pMsg The message.
1684 * @param pCtx The register context.
1685 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1686 * @sa nemR3WinHandleExitMemory
1687 */
1688NEM_TMPL_STATIC VBOXSTRICTRC
1689nemHCWinHandleMessageMemory(PVM pVM, PVMCPU pVCpu, HV_X64_MEMORY_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1690{
1691 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1692 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1693 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
1694 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1695
1696 /*
1697 * Whatever we do, we must clear pending event injection upon resume.
1698 */
1699 if (pMsg->Header.ExecutionState.InterruptionPending)
1700 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1701
1702#if 0 /* Experiment: 20K -> 34K exit/s. */
1703 if ( pMsg->Header.ExecutionState.EferLma
1704 && pMsg->Header.CsSegment.Long
1705 && pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1706 {
1707 if ( pMsg->Header.Rip - (uint64_t)0xf65a < (uint64_t)(0xf662 - 0xf65a)
1708 && pMsg->InstructionBytes[0] == 0x89
1709 && pMsg->InstructionBytes[1] == 0x03)
1710 {
1711 pCtx->rip = pMsg->Header.Rip + 2;
1712 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP;
1713 AssertMsg(pMsg->Header.InstructionLength == 2, ("%#x\n", pMsg->Header.InstructionLength));
1714 //Log(("%RX64 msg:\n%.80Rhxd\n", pCtx->rip, pMsg));
1715 return VINF_SUCCESS;
1716 }
1717 }
1718#endif
1719
1720 /*
1721 * Ask PGM for information about the given GCPhys. We need to check if we're
1722 * out of sync first.
1723 */
1724 NEMHCWINHMACPCCSTATE State = { pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE, false, false };
1725 PGMPHYSNEMPAGEINFO Info;
1726 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pMsg->GuestPhysicalAddress, State.fWriteAccess, &Info,
1727 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1728 if (RT_SUCCESS(rc))
1729 {
1730 if (Info.fNemProt & ( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
1731 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1732 {
1733 if (State.fCanResume)
1734 {
1735 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1736 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1737 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1738 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1739 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1740 return VINF_SUCCESS;
1741 }
1742 }
1743 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1744 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1745 pMsg->GuestPhysicalAddress, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1746 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1747 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1748 }
1749 else
1750 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1751 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1752 pMsg->GuestPhysicalAddress, rc, State.fDidSomething ? " modified-backing" : "",
1753 g_apszHvInterceptAccessTypes[pMsg->Header.InterceptAccessType]));
1754
1755 /*
1756 * Emulate the memory access, either access handler or special memory.
1757 */
1758 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1759 VBOXSTRICTRC rcStrict;
1760# ifdef IN_RING0
1761 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MemExit");
1762 if (rcStrict != VINF_SUCCESS)
1763 return rcStrict;
1764# else
1765 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
1766 AssertRCReturn(rc, rc);
1767 NOREF(pGVCpu);
1768# endif
1769
1770 if (pMsg->Reserved1)
1771 Log(("MemExit/Reserved1=%#x\n", pMsg->Reserved1));
1772 if (pMsg->Header.ExecutionState.Reserved0 || pMsg->Header.ExecutionState.Reserved1)
1773 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pMsg->Header.ExecutionState.Reserved0, pMsg->Header.ExecutionState.Reserved1));
1774 //if (pMsg->InstructionByteCount > 0)
1775 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1776
1777 if (pMsg->InstructionByteCount > 0)
1778 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip,
1779 pMsg->InstructionBytes, pMsg->InstructionByteCount);
1780 else
1781 rcStrict = IEMExecOne(pVCpu);
1782 /** @todo do we need to do anything wrt debugging here? */
1783 return rcStrict;
1784}
1785#elif defined(IN_RING3)
1786/**
1787 * Deals with memory access exits (WHvRunVpExitReasonMemoryAccess).
1788 *
1789 * @returns Strict VBox status code.
1790 * @param pVM The cross context VM structure.
1791 * @param pVCpu The cross context per CPU structure.
1792 * @param pExit The VM exit information to handle.
1793 * @param pCtx The register context.
1794 * @sa nemHCWinHandleMessageMemory
1795 */
1796NEM_TMPL_STATIC VBOXSTRICTRC
1797nemR3WinHandleExitMemory(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
1798{
1799 Assert(pExit->MemoryAccess.AccessInfo.AccessType != 3);
1800 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
1801
1802 /*
1803 * Whatever we do, we must clear pending event injection upon resume.
1804 */
1805 if (pExit->VpContext.ExecutionState.InterruptionPending)
1806 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1807
1808 /*
1809 * Ask PGM for information about the given GCPhys. We need to check if we're
1810 * out of sync first.
1811 */
1812 NEMHCWINHMACPCCSTATE State = { pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite, false, false };
1813 PGMPHYSNEMPAGEINFO Info;
1814 int rc = PGMPhysNemPageInfoChecker(pVM, pVCpu, pExit->MemoryAccess.Gpa, State.fWriteAccess, &Info,
1815 nemHCWinHandleMemoryAccessPageCheckerCallback, &State);
1816 if (RT_SUCCESS(rc))
1817 {
1818 if (Info.fNemProt & ( pExit->MemoryAccess.AccessInfo.AccessType == WHvMemoryAccessWrite
1819 ? NEM_PAGE_PROT_WRITE : NEM_PAGE_PROT_READ))
1820 {
1821 if (State.fCanResume)
1822 {
1823 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; restarting (%s)\n",
1824 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1825 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1826 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1827 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1828 return VINF_SUCCESS;
1829 }
1830 }
1831 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp (=>%RHp) %s fProt=%u%s%s%s; emulating (%s)\n",
1832 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1833 pExit->MemoryAccess.Gpa, Info.HCPhys, g_apszPageStates[Info.u2NemState], Info.fNemProt,
1834 Info.fHasHandlers ? " handlers" : "", Info.fZeroPage ? " zero-pg" : "",
1835 State.fDidSomething ? "" : " no-change", g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1836 }
1837 else
1838 Log4(("MemExit/%u: %04x:%08RX64/%s: %RGp rc=%Rrc%s; emulating (%s)\n",
1839 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
1840 pExit->MemoryAccess.Gpa, rc, State.fDidSomething ? " modified-backing" : "",
1841 g_apszHvInterceptAccessTypes[pExit->MemoryAccess.AccessInfo.AccessType]));
1842
1843 /*
1844 * Emulate the memory access, either access handler or special memory.
1845 */
1846 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
1847 rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
1848 AssertRCReturn(rc, rc);
1849
1850 if (pExit->VpContext.ExecutionState.Reserved0 || pExit->VpContext.ExecutionState.Reserved1)
1851 Log(("MemExit/Hdr/State: Reserved0=%#x Reserved1=%#x\n", pExit->VpContext.ExecutionState.Reserved0, pExit->VpContext.ExecutionState.Reserved1));
1852 //if (pMsg->InstructionByteCount > 0)
1853 // Log4(("InstructionByteCount=%#x %.16Rhxs\n", pMsg->InstructionByteCount, pMsg->InstructionBytes));
1854
1855 VBOXSTRICTRC rcStrict;
1856 if (pExit->MemoryAccess.InstructionByteCount > 0)
1857 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
1858 pExit->MemoryAccess.InstructionBytes, pExit->MemoryAccess.InstructionByteCount);
1859 else
1860 rcStrict = IEMExecOne(pVCpu);
1861 /** @todo do we need to do anything wrt debugging here? */
1862 return rcStrict;
1863}
1864#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
1865
1866
1867#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
1868/**
1869 * Deals with I/O port intercept message.
1870 *
1871 * @returns Strict VBox status code.
1872 * @param pVM The cross context VM structure.
1873 * @param pVCpu The cross context per CPU structure.
1874 * @param pMsg The message.
1875 * @param pCtx The register context.
1876 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
1877 */
1878NEM_TMPL_STATIC VBOXSTRICTRC
1879nemHCWinHandleMessageIoPort(PVM pVM, PVMCPU pVCpu, HV_X64_IO_PORT_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
1880{
1881 Assert( pMsg->AccessInfo.AccessSize == 1
1882 || pMsg->AccessInfo.AccessSize == 2
1883 || pMsg->AccessInfo.AccessSize == 4);
1884 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
1885 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
1886 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
1887
1888 /*
1889 * Whatever we do, we must clear pending event injection upon resume.
1890 */
1891 if (pMsg->Header.ExecutionState.InterruptionPending)
1892 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
1893
1894 VBOXSTRICTRC rcStrict;
1895 if (!pMsg->AccessInfo.StringOp)
1896 {
1897 /*
1898 * Simple port I/O.
1899 */
1900 static uint32_t const s_fAndMask[8] =
1901 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
1902 uint32_t const fAndMask = s_fAndMask[pMsg->AccessInfo.AccessSize];
1903 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
1904 {
1905 rcStrict = IOMIOPortWrite(pVM, pVCpu, pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize);
1906 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
1907 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1908 pMsg->PortNumber, (uint32_t)pMsg->Rax & fAndMask, pMsg->AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
1909 if (IOM_SUCCESS(rcStrict))
1910 {
1911 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1912 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
1913 }
1914 }
1915 else
1916 {
1917 uint32_t uValue = 0;
1918 rcStrict = IOMIOPortRead(pVM, pVCpu, pMsg->PortNumber, &uValue, pMsg->AccessInfo.AccessSize);
1919 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
1920 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1921 pMsg->PortNumber, pMsg->AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
1922 if (IOM_SUCCESS(rcStrict))
1923 {
1924 if (pMsg->AccessInfo.AccessSize != 4)
1925 pCtx->rax = (pMsg->Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
1926 else
1927 pCtx->rax = uValue;
1928 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
1929 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pMsg->Rax, pCtx->rax));
1930 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1931 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
1932 }
1933 }
1934 }
1935 else
1936 {
1937 /*
1938 * String port I/O.
1939 */
1940 /** @todo Someone at Microsoft please explain how we can get the address mode
1941 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
1942 * getting the default mode, it can always be overridden by a prefix. This
1943 * forces us to interpret the instruction from opcodes, which is suboptimal.
1944 * Both AMD-V and VT-x includes the address size in the exit info, at least on
1945 * CPUs that are reasonably new.
1946 *
1947 * Of course, it's possible this is an undocumented and we just need to do some
1948 * experiments to figure out how it's communicated. Alternatively, we can scan
1949 * the opcode bytes for possible evil prefixes.
1950 */
1951 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
1952 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
1953 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
1954 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
1955 NEM_WIN_COPY_BACK_SEG(pCtx->es, pMsg->EsSegment);
1956 pCtx->rax = pMsg->Rax;
1957 pCtx->rcx = pMsg->Rcx;
1958 pCtx->rdi = pMsg->Rdi;
1959 pCtx->rsi = pMsg->Rsi;
1960# ifdef IN_RING0
1961 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
1962 if (rcStrict != VINF_SUCCESS)
1963 return rcStrict;
1964# else
1965 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
1966 AssertRCReturn(rc, rc);
1967 RT_NOREF(pGVCpu);
1968# endif
1969
1970 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
1971 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
1972 pMsg->AccessInfo.RepPrefix ? "REP " : "",
1973 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "OUTS" : "INS",
1974 pMsg->PortNumber, pMsg->AccessInfo.AccessSize ));
1975 rcStrict = IEMExecOne(pVCpu);
1976 }
1977 if (IOM_SUCCESS(rcStrict))
1978 {
1979 /*
1980 * Do debug checks.
1981 */
1982 if ( pMsg->Header.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
1983 || (pMsg->Header.Rflags & X86_EFL_TF)
1984 || DBGFBpIsHwIoArmed(pVM) )
1985 {
1986 /** @todo Debugging. */
1987 }
1988 }
1989 return rcStrict;
1990}
1991#elif defined(IN_RING3)
1992/**
1993 * Deals with I/O port access exits (WHvRunVpExitReasonX64IoPortAccess).
1994 *
1995 * @returns Strict VBox status code.
1996 * @param pVM The cross context VM structure.
1997 * @param pVCpu The cross context per CPU structure.
1998 * @param pExit The VM exit information to handle.
1999 * @param pCtx The register context.
2000 * @sa nemHCWinHandleMessageIoPort
2001 */
2002NEM_TMPL_STATIC VBOXSTRICTRC
2003nemR3WinHandleExitIoPort(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2004{
2005 Assert( pExit->IoPortAccess.AccessInfo.AccessSize == 1
2006 || pExit->IoPortAccess.AccessInfo.AccessSize == 2
2007 || pExit->IoPortAccess.AccessInfo.AccessSize == 4);
2008 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2009
2010 /*
2011 * Whatever we do, we must clear pending event injection upon resume.
2012 */
2013 if (pExit->VpContext.ExecutionState.InterruptionPending)
2014 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2015
2016 VBOXSTRICTRC rcStrict;
2017 if (!pExit->IoPortAccess.AccessInfo.StringOp)
2018 {
2019 /*
2020 * Simple port I/O.
2021 */
2022 static uint32_t const s_fAndMask[8] =
2023 { UINT32_MAX, UINT32_C(0xff), UINT32_C(0xffff), UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX };
2024 uint32_t const fAndMask = s_fAndMask[pExit->IoPortAccess.AccessInfo.AccessSize];
2025 if (pExit->IoPortAccess.AccessInfo.IsWrite)
2026 {
2027 rcStrict = IOMIOPortWrite(pVM, pVCpu, pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2028 pExit->IoPortAccess.AccessInfo.AccessSize);
2029 Log4(("IOExit/%u: %04x:%08RX64/%s: OUT %#x, %#x LB %u rcStrict=%Rrc\n",
2030 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2031 pExit->IoPortAccess.PortNumber, (uint32_t)pExit->IoPortAccess.Rax & fAndMask,
2032 pExit->IoPortAccess.AccessInfo.AccessSize, VBOXSTRICTRC_VAL(rcStrict) ));
2033 if (IOM_SUCCESS(rcStrict))
2034 {
2035 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2036 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2037 }
2038 }
2039 else
2040 {
2041 uint32_t uValue = 0;
2042 rcStrict = IOMIOPortRead(pVM, pVCpu, pExit->IoPortAccess.PortNumber, &uValue, pExit->IoPortAccess.AccessInfo.AccessSize);
2043 Log4(("IOExit/%u: %04x:%08RX64/%s: IN %#x LB %u -> %#x, rcStrict=%Rrc\n",
2044 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2045 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2046 if (IOM_SUCCESS(rcStrict))
2047 {
2048 if (pExit->IoPortAccess.AccessInfo.AccessSize != 4)
2049 pCtx->rax = (pExit->IoPortAccess.Rax & ~(uint64_t)fAndMask) | (uValue & fAndMask);
2050 else
2051 pCtx->rax = uValue;
2052 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RAX;
2053 Log4(("IOExit/%u: RAX %#RX64 -> %#RX64\n", pVCpu->idCpu, pExit->IoPortAccess.Rax, pCtx->rax));
2054 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2055 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2056 }
2057 }
2058 }
2059 else
2060 {
2061 /*
2062 * String port I/O.
2063 */
2064 /** @todo Someone at Microsoft please explain how we can get the address mode
2065 * from the IoPortAccess.VpContext. CS.Attributes is only sufficient for
2066 * getting the default mode, it can always be overridden by a prefix. This
2067 * forces us to interpret the instruction from opcodes, which is suboptimal.
2068 * Both AMD-V and VT-x includes the address size in the exit info, at least on
2069 * CPUs that are reasonably new.
2070 *
2071 * Of course, it's possible this is an undocumented and we just need to do some
2072 * experiments to figure out how it's communicated. Alternatively, we can scan
2073 * the opcode bytes for possible evil prefixes.
2074 */
2075 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2076 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDI | CPUMCTX_EXTRN_RSI
2077 | CPUMCTX_EXTRN_DS | CPUMCTX_EXTRN_ES);
2078 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pExit->IoPortAccess.Ds);
2079 NEM_WIN_COPY_BACK_SEG(pCtx->es, pExit->IoPortAccess.Es);
2080 pCtx->rax = pExit->IoPortAccess.Rax;
2081 pCtx->rcx = pExit->IoPortAccess.Rcx;
2082 pCtx->rdi = pExit->IoPortAccess.Rdi;
2083 pCtx->rsi = pExit->IoPortAccess.Rsi;
2084# ifdef IN_RING0
2085 rcStrict = nemR0WinImportStateStrict(pGVCpu->pGVM, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IOExit");
2086 if (rcStrict != VINF_SUCCESS)
2087 return rcStrict;
2088# else
2089 int rc = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM);
2090 AssertRCReturn(rc, rc);
2091# endif
2092
2093 Log4(("IOExit/%u: %04x:%08RX64/%s: %s%s %#x LB %u (emulating)\n",
2094 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2095 pExit->IoPortAccess.AccessInfo.RepPrefix ? "REP " : "",
2096 pExit->IoPortAccess.AccessInfo.IsWrite ? "OUTS" : "INS",
2097 pExit->IoPortAccess.PortNumber, pExit->IoPortAccess.AccessInfo.AccessSize ));
2098 rcStrict = IEMExecOne(pVCpu);
2099 }
2100 if (IOM_SUCCESS(rcStrict))
2101 {
2102 /*
2103 * Do debug checks.
2104 */
2105 if ( pExit->VpContext.ExecutionState.DebugActive /** @todo Microsoft: Does DebugActive this only reflect DR7? */
2106 || (pExit->VpContext.Rflags & X86_EFL_TF)
2107 || DBGFBpIsHwIoArmed(pVM) )
2108 {
2109 /** @todo Debugging. */
2110 }
2111 }
2112 return rcStrict;
2113
2114}
2115#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2116
2117#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2118/**
2119 * Deals with interrupt window message.
2120 *
2121 * @returns Strict VBox status code.
2122 * @param pVM The cross context VM structure.
2123 * @param pVCpu The cross context per CPU structure.
2124 * @param pMsg The message.
2125 * @param pCtx The register context.
2126 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2127 * @sa nemR3WinHandleExitInterruptWindow
2128 */
2129NEM_TMPL_STATIC VBOXSTRICTRC
2130nemHCWinHandleMessageInterruptWindow(PVM pVM, PVMCPU pVCpu, HV_X64_INTERRUPT_WINDOW_MESSAGE const *pMsg,
2131 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2132{
2133 /*
2134 * Assert message sanity.
2135 */
2136 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE
2137 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ // READ & WRITE are probably not used here
2138 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2139 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2140 AssertMsg(pMsg->Type == HvX64PendingInterrupt || pMsg->Type == HvX64PendingNmi, ("%#x\n", pMsg->Type));
2141
2142 /*
2143 * Just copy the state we've got and handle it in the loop for now.
2144 */
2145 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2146 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2147 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2148 pMsg->Type, RT_BOOL(pMsg->Header.Rflags & X86_EFL_IF), pMsg->Header.ExecutionState.InterruptShadow));
2149
2150 /** @todo call nemHCWinHandleInterruptFF */
2151 RT_NOREF(pVM, pGVCpu);
2152 return VINF_SUCCESS;
2153}
2154#elif defined(IN_RING3)
2155/**
2156 * Deals with interrupt window exits (WHvRunVpExitReasonX64InterruptWindow).
2157 *
2158 * @returns Strict VBox status code.
2159 * @param pVM The cross context VM structure.
2160 * @param pVCpu The cross context per CPU structure.
2161 * @param pExit The VM exit information to handle.
2162 * @param pCtx The register context.
2163 * @sa nemHCWinHandleMessageInterruptWindow
2164 */
2165NEM_TMPL_STATIC VBOXSTRICTRC
2166nemR3WinHandleExitInterruptWindow(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2167{
2168 /*
2169 * Assert message sanity.
2170 */
2171 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2172 AssertMsg( pExit->InterruptWindow.DeliverableType == WHvX64PendingInterrupt
2173 || pExit->InterruptWindow.DeliverableType == WHvX64PendingNmi,
2174 ("%#x\n", pExit->InterruptWindow.DeliverableType));
2175
2176 /*
2177 * Just copy the state we've got and handle it in the loop for now.
2178 */
2179 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2180 Log4(("IntWinExit/%u: %04x:%08RX64/%s: %u IF=%d InterruptShadow=%d\n",
2181 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2182 pExit->InterruptWindow.DeliverableType, RT_BOOL(pExit->VpContext.Rflags & X86_EFL_IF),
2183 pExit->VpContext.ExecutionState.InterruptShadow));
2184
2185 /** @todo call nemHCWinHandleInterruptFF */
2186 RT_NOREF(pVM);
2187 return VINF_SUCCESS;
2188}
2189#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2190
2191#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2192/**
2193 * Deals with CPUID intercept message.
2194 *
2195 * @returns Strict VBox status code.
2196 * @param pVCpu The cross context per CPU structure.
2197 * @param pMsg The message.
2198 * @param pCtx The register context.
2199 */
2200NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageCpuId(PVMCPU pVCpu, HV_X64_CPUID_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx)
2201{
2202 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2203
2204 /*
2205 * Soak up state and execute the instruction.
2206 *
2207 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2208 * function and make everyone use it.
2209 */
2210 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2211 * only get weirder with nested VT-x and AMD-V support. */
2212 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2213
2214 /* Copy in the low register values (top is always cleared). */
2215 pCtx->rax = (uint32_t)pMsg->Rax;
2216 pCtx->rcx = (uint32_t)pMsg->Rcx;
2217 pCtx->rdx = (uint32_t)pMsg->Rdx;
2218 pCtx->rbx = (uint32_t)pMsg->Rbx;
2219 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2220
2221 /* Get the correct values. */
2222 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2223
2224 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2225 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2226 pMsg->Rax, pMsg->Rcx, pMsg->Rdx, pMsg->Rbx,
2227 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2228 pMsg->DefaultResultRax, pMsg->DefaultResultRcx, pMsg->DefaultResultRdx, pMsg->DefaultResultRbx));
2229
2230 /* Move RIP and we're done. */
2231 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2232
2233 return VINF_SUCCESS;
2234}
2235#elif defined(IN_RING3)
2236/**
2237 * Deals with CPUID exits (WHvRunVpExitReasonX64Cpuid).
2238 *
2239 * @returns Strict VBox status code.
2240 * @param pVM The cross context VM structure.
2241 * @param pVCpu The cross context per CPU structure.
2242 * @param pExit The VM exit information to handle.
2243 * @param pCtx The register context.
2244 * @sa nemHCWinHandleMessageInterruptWindow
2245 */
2246NEM_TMPL_STATIC VBOXSTRICTRC
2247nemR3WinHandleExitCpuId(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2248{
2249 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2250
2251 /*
2252 * Soak up state and execute the instruction.
2253 *
2254 * Note! If this grows slightly more complicated, combine into an IEMExecDecodedCpuId
2255 * function and make everyone use it.
2256 */
2257 /** @todo Combine implementations into IEMExecDecodedCpuId as this will
2258 * only get weirder with nested VT-x and AMD-V support. */
2259 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2260
2261 /* Copy in the low register values (top is always cleared). */
2262 pCtx->rax = (uint32_t)pExit->CpuidAccess.Rax;
2263 pCtx->rcx = (uint32_t)pExit->CpuidAccess.Rcx;
2264 pCtx->rdx = (uint32_t)pExit->CpuidAccess.Rdx;
2265 pCtx->rbx = (uint32_t)pExit->CpuidAccess.Rbx;
2266 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX);
2267
2268 /* Get the correct values. */
2269 CPUMGetGuestCpuId(pVCpu, pCtx->eax, pCtx->ecx, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
2270
2271 Log4(("CpuIdExit/%u: %04x:%08RX64/%s: rax=%08RX64 / rcx=%08RX64 / rdx=%08RX64 / rbx=%08RX64 -> %08RX32 / %08RX32 / %08RX32 / %08RX32 (hv: %08RX64 / %08RX64 / %08RX64 / %08RX64)\n",
2272 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2273 pExit->CpuidAccess.Rax, pExit->CpuidAccess.Rcx, pExit->CpuidAccess.Rdx, pExit->CpuidAccess.Rbx,
2274 pCtx->eax, pCtx->ecx, pCtx->edx, pCtx->ebx,
2275 pExit->CpuidAccess.DefaultResultRax, pExit->CpuidAccess.DefaultResultRcx, pExit->CpuidAccess.DefaultResultRdx, pExit->CpuidAccess.DefaultResultRbx));
2276
2277 /* Move RIP and we're done. */
2278 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2279
2280 RT_NOREF_PV(pVM);
2281 return VINF_SUCCESS;
2282}
2283#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2284
2285#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2286/**
2287 * Deals with MSR intercept message.
2288 *
2289 * @returns Strict VBox status code.
2290 * @param pVCpu The cross context per CPU structure.
2291 * @param pMsg The message.
2292 * @param pCtx The register context.
2293 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2294 * @sa nemR3WinHandleExitMsr
2295 */
2296NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageMsr(PVMCPU pVCpu, HV_X64_MSR_INTERCEPT_MESSAGE const *pMsg,
2297 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2298{
2299 /*
2300 * A wee bit of sanity first.
2301 */
2302 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2303 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2304 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE);
2305
2306 /*
2307 * Check CPL as that's common to both RDMSR and WRMSR.
2308 */
2309 VBOXSTRICTRC rcStrict;
2310 if (pMsg->Header.ExecutionState.Cpl == 0)
2311 {
2312 /*
2313 * Get all the MSR state. Since we're getting EFER, we also need to
2314 * get CR0, CR4 and CR3.
2315 */
2316 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2317 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2318 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2319 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2320 "MSRs");
2321 if (rcStrict == VINF_SUCCESS)
2322 {
2323
2324 /*
2325 * Handle writes.
2326 */
2327 if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2328 {
2329 rcStrict = CPUMSetGuestMsr(pVCpu, pMsg->MsrNumber, RT_MAKE_U64((uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx));
2330 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n",
2331 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2332 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2333 if (rcStrict == VINF_SUCCESS)
2334 {
2335 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2336 return VINF_SUCCESS;
2337 }
2338# ifndef IN_RING3
2339 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2340 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2341 rcStrict = VINF_CPUM_R3_MSR_WRITE;
2342 return rcStrict;
2343# else
2344 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n",
2345 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2346 pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2347# endif
2348 }
2349 /*
2350 * Handle reads.
2351 */
2352 else
2353 {
2354 uint64_t uValue = 0;
2355 rcStrict = CPUMQueryGuestMsr(pVCpu, pMsg->MsrNumber, &uValue);
2356 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2357 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2358 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2359 if (rcStrict == VINF_SUCCESS)
2360 {
2361 pCtx->rax = (uint32_t)uValue;
2362 pCtx->rdx = uValue >> 32;
2363 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2364 nemHCWinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pMsg->Header);
2365 return VINF_SUCCESS;
2366 }
2367# ifndef IN_RING3
2368 /* move to ring-3 and handle the trap/whatever there, as we want to LogRel this. */
2369 if (rcStrict == VERR_CPUM_RAISE_GP_0)
2370 rcStrict = VINF_CPUM_R3_MSR_READ;
2371 return rcStrict;
2372# else
2373 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n",
2374 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2375 pMsg->MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2376# endif
2377 }
2378 }
2379 else
2380 {
2381 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2382 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2383 pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE ? "WR" : "RD",
2384 pMsg->MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2385 return rcStrict;
2386 }
2387 }
2388 else if (pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE)
2389 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n",
2390 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2391 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber, (uint32_t)pMsg->Rax, (uint32_t)pMsg->Rdx ));
2392 else
2393 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n",
2394 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header),
2395 pMsg->Header.ExecutionState.Cpl, pMsg->MsrNumber));
2396
2397 /*
2398 * If we get down here, we're supposed to #GP(0).
2399 */
2400 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MSR");
2401 if (rcStrict == VINF_SUCCESS)
2402 {
2403 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2404 if (rcStrict == VINF_IEM_RAISED_XCPT)
2405 rcStrict = VINF_SUCCESS;
2406 else if (rcStrict != VINF_SUCCESS)
2407 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2408 }
2409 return rcStrict;
2410}
2411#elif defined(IN_RING3)
2412/**
2413 * Deals with MSR access exits (WHvRunVpExitReasonX64MsrAccess).
2414 *
2415 * @returns Strict VBox status code.
2416 * @param pVM The cross context VM structure.
2417 * @param pVCpu The cross context per CPU structure.
2418 * @param pExit The VM exit information to handle.
2419 * @param pCtx The register context.
2420 * @sa nemHCWinHandleMessageMsr
2421 */
2422NEM_TMPL_STATIC VBOXSTRICTRC
2423nemR3WinHandleExitMsr(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2424{
2425 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2426
2427 /*
2428 * Check CPL as that's common to both RDMSR and WRMSR.
2429 */
2430 VBOXSTRICTRC rcStrict;
2431 if (pExit->VpContext.ExecutionState.Cpl == 0)
2432 {
2433 /*
2434 * Get all the MSR state. Since we're getting EFER, we also need to
2435 * get CR0, CR4 and CR3.
2436 */
2437 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2438 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2439 CPUMCTX_EXTRN_ALL_MSRS | CPUMCTX_EXTRN_CR0
2440 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4,
2441 "MSRs");
2442 if (rcStrict == VINF_SUCCESS)
2443 {
2444 /*
2445 * Handle writes.
2446 */
2447 if (pExit->MsrAccess.AccessInfo.IsWrite)
2448 {
2449 rcStrict = CPUMSetGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber,
2450 RT_MAKE_U64((uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx));
2451 Log4(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2452 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2453 (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx, VBOXSTRICTRC_VAL(rcStrict) ));
2454 if (rcStrict == VINF_SUCCESS)
2455 {
2456 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2457 return VINF_SUCCESS;
2458 }
2459 LogRel(("MsrExit/%u: %04x:%08RX64/%s: WRMSR %08x, %08x:%08x -> %Rrc!\n", pVCpu->idCpu,
2460 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2461 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx,
2462 VBOXSTRICTRC_VAL(rcStrict) ));
2463 }
2464 /*
2465 * Handle reads.
2466 */
2467 else
2468 {
2469 uint64_t uValue = 0;
2470 rcStrict = CPUMQueryGuestMsr(pVCpu, pExit->MsrAccess.MsrNumber, &uValue);
2471 Log4(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu,
2472 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2473 pExit->MsrAccess.MsrNumber, uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2474 if (rcStrict == VINF_SUCCESS)
2475 {
2476 pCtx->rax = (uint32_t)uValue;
2477 pCtx->rdx = uValue >> 32;
2478 pCtx->fExtrn &= ~(CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX);
2479 nemR3WinAdvanceGuestRipAndClearRF(pVCpu, pCtx, &pExit->VpContext);
2480 return VINF_SUCCESS;
2481 }
2482 LogRel(("MsrExit/%u: %04x:%08RX64/%s: RDMSR %08x -> %08RX64 / %Rrc\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2483 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->MsrAccess.MsrNumber,
2484 uValue, VBOXSTRICTRC_VAL(rcStrict) ));
2485 }
2486 }
2487 else
2488 {
2489 LogRel(("MsrExit/%u: %04x:%08RX64/%s: %sMSR %08x -> %Rrc - msr state import\n",
2490 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2491 pExit->MsrAccess.AccessInfo.IsWrite ? "WR" : "RD", pExit->MsrAccess.MsrNumber, VBOXSTRICTRC_VAL(rcStrict) ));
2492 return rcStrict;
2493 }
2494 }
2495 else if (pExit->MsrAccess.AccessInfo.IsWrite)
2496 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); WRMSR %08x, %08x:%08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2497 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2498 pExit->MsrAccess.MsrNumber, (uint32_t)pExit->MsrAccess.Rax, (uint32_t)pExit->MsrAccess.Rdx ));
2499 else
2500 Log4(("MsrExit/%u: %04x:%08RX64/%s: CPL %u -> #GP(0); RDMSR %08x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2501 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.ExecutionState.Cpl,
2502 pExit->MsrAccess.MsrNumber));
2503
2504 /*
2505 * If we get down here, we're supposed to #GP(0).
2506 */
2507 rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "MSR");
2508 if (rcStrict == VINF_SUCCESS)
2509 {
2510 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_GP, TRPM_TRAP, 0, 0, 0);
2511 if (rcStrict == VINF_IEM_RAISED_XCPT)
2512 rcStrict = VINF_SUCCESS;
2513 else if (rcStrict != VINF_SUCCESS)
2514 Log4(("MsrExit/%u: Injecting #GP(0) failed: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict) ));
2515 }
2516
2517 RT_NOREF_PV(pVM);
2518 return rcStrict;
2519}
2520#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2521
2522
2523/**
2524 * Worker for nemHCWinHandleMessageException & nemR3WinHandleExitException that
2525 * checks if the given opcodes are of interest at all.
2526 *
2527 * @returns true if interesting, false if not.
2528 * @param cbOpcodes Number of opcode bytes available.
2529 * @param pbOpcodes The opcode bytes.
2530 * @param f64BitMode Whether we're in 64-bit mode.
2531 */
2532DECLINLINE(bool) nemHcWinIsInterestingUndefinedOpcode(uint8_t cbOpcodes, uint8_t const *pbOpcodes, bool f64BitMode)
2533{
2534 /*
2535 * Currently only interested in VMCALL and VMMCALL.
2536 */
2537 while (cbOpcodes >= 3)
2538 {
2539 switch (pbOpcodes[0])
2540 {
2541 case 0x0f:
2542 switch (pbOpcodes[1])
2543 {
2544 case 0x01:
2545 switch (pbOpcodes[2])
2546 {
2547 case 0xc1: /* 0f 01 c1 VMCALL */
2548 return true;
2549 case 0xd9: /* 0f 01 d9 VMMCALL */
2550 return true;
2551 default:
2552 break;
2553 }
2554 break;
2555 }
2556 break;
2557
2558 default:
2559 return false;
2560
2561 /* prefixes */
2562 case 0x40: case 0x41: case 0x42: case 0x43: case 0x44: case 0x45: case 0x46: case 0x47:
2563 case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4c: case 0x4d: case 0x4e: case 0x4f:
2564 if (!f64BitMode)
2565 return false;
2566 RT_FALL_THRU();
2567 case X86_OP_PRF_CS:
2568 case X86_OP_PRF_SS:
2569 case X86_OP_PRF_DS:
2570 case X86_OP_PRF_ES:
2571 case X86_OP_PRF_FS:
2572 case X86_OP_PRF_GS:
2573 case X86_OP_PRF_SIZE_OP:
2574 case X86_OP_PRF_SIZE_ADDR:
2575 case X86_OP_PRF_LOCK:
2576 case X86_OP_PRF_REPZ:
2577 case X86_OP_PRF_REPNZ:
2578 cbOpcodes--;
2579 pbOpcodes++;
2580 continue;
2581 }
2582 break;
2583 }
2584 return false;
2585}
2586
2587
2588#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2589/**
2590 * Copies state included in a exception intercept message.
2591 *
2592 * @param pVCpu The cross context per CPU structure.
2593 * @param pMsg The message.
2594 * @param pCtx The register context.
2595 * @param fClearXcpt Clear pending exception.
2596 */
2597DECLINLINE(void) nemHCWinCopyStateFromExceptionMessage(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg,
2598 PCPUMCTX pCtx, bool fClearXcpt)
2599{
2600 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, &pMsg->Header);
2601 pCtx->fExtrn &= ~( CPUMCTX_EXTRN_GPRS_MASK | CPUMCTX_EXTRN_SS | CPUMCTX_EXTRN_DS
2602 | (fClearXcpt ? CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT : 0) );
2603 pCtx->rax = pMsg->Rax;
2604 pCtx->rcx = pMsg->Rcx;
2605 pCtx->rdx = pMsg->Rdx;
2606 pCtx->rbx = pMsg->Rbx;
2607 pCtx->rsp = pMsg->Rsp;
2608 pCtx->rbp = pMsg->Rbp;
2609 pCtx->rsi = pMsg->Rsi;
2610 pCtx->rdi = pMsg->Rdi;
2611 pCtx->r8 = pMsg->R8;
2612 pCtx->r9 = pMsg->R9;
2613 pCtx->r10 = pMsg->R10;
2614 pCtx->r11 = pMsg->R11;
2615 pCtx->r12 = pMsg->R12;
2616 pCtx->r13 = pMsg->R13;
2617 pCtx->r14 = pMsg->R14;
2618 pCtx->r15 = pMsg->R15;
2619 NEM_WIN_COPY_BACK_SEG(pCtx->ds, pMsg->DsSegment);
2620 NEM_WIN_COPY_BACK_SEG(pCtx->ss, pMsg->SsSegment);
2621}
2622#elif defined(IN_RING3)
2623/**
2624 * Copies state included in a exception intercept exit.
2625 *
2626 * @param pVCpu The cross context per CPU structure.
2627 * @param pExit The VM exit information.
2628 * @param pCtx The register context.
2629 * @param fClearXcpt Clear pending exception.
2630 */
2631DECLINLINE(void) nemR3WinCopyStateFromExceptionMessage(PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit,
2632 PCPUMCTX pCtx, bool fClearXcpt)
2633{
2634 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2635 if (fClearXcpt)
2636 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT;
2637}
2638#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2639
2640
2641#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2642/**
2643 * Deals with exception intercept message (HvMessageTypeX64ExceptionIntercept).
2644 *
2645 * @returns Strict VBox status code.
2646 * @param pVCpu The cross context per CPU structure.
2647 * @param pMsg The message.
2648 * @param pCtx The register context.
2649 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2650 * @sa nemR3WinHandleExitMsr
2651 */
2652NEM_TMPL_STATIC VBOXSTRICTRC
2653nemHCWinHandleMessageException(PVMCPU pVCpu, HV_X64_EXCEPTION_INTERCEPT_MESSAGE const *pMsg, PCPUMCTX pCtx, PGVMCPU pGVCpu)
2654{
2655 /*
2656 * Assert sanity.
2657 */
2658 AssertMsg(pMsg->Header.InstructionLength < 0x10, ("%#x\n", pMsg->Header.InstructionLength));
2659 Assert( pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_READ
2660 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_WRITE
2661 || pMsg->Header.InterceptAccessType == HV_INTERCEPT_ACCESS_EXECUTE);
2662 Assert(pMsg->Header.ExecutionState.InterruptionPending);
2663
2664 /*
2665 * Handle the intercept.
2666 */
2667 switch (pMsg->ExceptionVector)
2668 {
2669 /*
2670 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2671 * and need to turn them over to GIM.
2672 */
2673 case X86_XCPT_UD:
2674 if (nemHcWinIsInterestingUndefinedOpcode(pMsg->InstructionByteCount, pMsg->InstructionBytes,
2675 pMsg->Header.ExecutionState.EferLma && pMsg->Header.CsSegment.Long ))
2676 {
2677 nemHCWinCopyStateFromExceptionMessage(pVCpu, pMsg, pCtx, true /*fClearXcpt*/);
2678 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2679 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "#UD");
2680 if (rcStrict == VINF_SUCCESS)
2681 {
2682 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pMsg->Header.Rip, pMsg->InstructionBytes,
2683 pMsg->InstructionByteCount);
2684 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2685 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
2686 }
2687 else
2688 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> state import (emulate) -> %Rrc\n",
2689 pVCpu->idCpu, pMsg->Header.CsSegment.Selector, pMsg->Header.Rip,
2690 nemHCWinExecStateToLogStr(&pMsg->Header), VBOXSTRICTRC_VAL(rcStrict) ));
2691 return rcStrict;
2692 }
2693 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu, pMsg->Header.CsSegment.Selector,
2694 pMsg->Header.Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsg->InstructionByteCount, pMsg->InstructionBytes ));
2695 break;
2696
2697 /*
2698 * Filter debug exceptions.
2699 */
2700 case X86_XCPT_DB:
2701 break;
2702
2703 case X86_XCPT_BP:
2704 break;
2705
2706 /* This shouldn't happen. */
2707 default:
2708 AssertLogRelMsgFailedReturn(("ExceptionVector=%#x\n", pMsg->ExceptionVector), VERR_IEM_IPE_6);
2709 }
2710
2711 return VINF_SUCCESS;
2712}
2713#elif defined(IN_RING3)
2714/**
2715 * Deals with MSR access exits (WHvRunVpExitReasonException).
2716 *
2717 * @returns Strict VBox status code.
2718 * @param pVM The cross context VM structure.
2719 * @param pVCpu The cross context per CPU structure.
2720 * @param pExit The VM exit information to handle.
2721 * @param pCtx The register context.
2722 * @sa nemR3WinHandleExitException
2723 */
2724NEM_TMPL_STATIC VBOXSTRICTRC
2725nemR3WinHandleExitException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2726{
2727 /*
2728 * Assert sanity.
2729 */
2730 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2731#if 0
2732 Log4(("XcptExit/%u: %04x:%08RX64/%s: %x\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpException.ExceptionType));
2733 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
2734 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "#UD");
2735 if (rcStrict != VINF_SUCCESS)
2736 return rcStrict;
2737#endif
2738 Assert(pExit->VpContext.ExecutionState.InterruptionPending);
2739
2740 /*
2741 * Handle the intercept.
2742 */
2743 switch (pExit->VpException.ExceptionType)
2744 {
2745 /*
2746 * We get undefined opcodes on VMMCALL(AMD) & VMCALL(Intel) instructions
2747 * and need to turn them over to GIM.
2748 */
2749 case X86_XCPT_UD:
2750 if (nemHcWinIsInterestingUndefinedOpcode(pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes,
2751 pExit->VpContext.ExecutionState.EferLma && pExit->VpContext.Cs.Long ))
2752 {
2753 nemR3WinCopyStateFromExceptionMessage(pVCpu, pExit, pCtx, true /*fClearXcpt*/);
2754 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2755 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "#UD");
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 rcStrict = IEMExecOneWithPrefetchedByPC(pVCpu, CPUMCTX2CORE(pCtx), pExit->VpContext.Rip,
2759 pExit->VpException.InstructionBytes,
2760 pExit->VpException.InstructionByteCount);
2761 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> emulated -> %Rrc\n",
2762 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2763 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
2764 }
2765 else
2766 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD -> state import (emulate) -> %Rrc\n",
2767 pVCpu->idCpu, pExit->VpContext.Cs.Selector, pExit->VpContext.Rip,
2768 nemR3WinExecStateToLogStr(&pExit->VpContext), VBOXSTRICTRC_VAL(rcStrict) ));
2769 return rcStrict;
2770 }
2771 Log4(("XcptExit/%u: %04x:%08RX64/%s: #UD [%.*Rhxs] -> re-injected\n", pVCpu->idCpu,
2772 pExit->VpContext.Cs.Selector, pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext),
2773 pExit->VpException.InstructionByteCount, pExit->VpException.InstructionBytes ));
2774 break;
2775
2776 /*
2777 * Filter debug exceptions.
2778 */
2779 case X86_XCPT_DB:
2780 break;
2781
2782 case X86_XCPT_BP:
2783 break;
2784
2785 /* This shouldn't happen. */
2786 default:
2787 AssertLogRelMsgFailedReturn(("ExceptionType=%#x\n", pExit->VpException.ExceptionType), VERR_IEM_IPE_6);
2788 }
2789
2790 RT_NOREF_PV(pVM);
2791 return VINF_SUCCESS;
2792}
2793#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2794
2795
2796#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2797/**
2798 * Deals with unrecoverable exception (triple fault).
2799 *
2800 * Seen WRMSR 0x201 (IA32_MTRR_PHYSMASK0) writes from grub / debian9 ending up
2801 * here too. So we'll leave it to IEM to decide.
2802 *
2803 * @returns Strict VBox status code.
2804 * @param pVCpu The cross context per CPU structure.
2805 * @param pMsgHdr The message header.
2806 * @param pCtx The register context.
2807 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2808 * @sa nemR3WinHandleExitUnrecoverableException
2809 */
2810NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessageUnrecoverableException(PVMCPU pVCpu,
2811 HV_X64_INTERCEPT_MESSAGE_HEADER const *pMsgHdr,
2812 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2813{
2814 AssertMsg(pMsgHdr->InstructionLength < 0x10, ("%#x\n", pMsgHdr->InstructionLength));
2815
2816# if 0
2817 /*
2818 * Just copy the state we've got and handle it in the loop for now.
2819 */
2820 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2821 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n",
2822 pVCpu->idCpu, pMsgHdr->CsSegment.Selector, pMsgHdr->Rip, nemHCWinExecStateToLogStr(&pMsg->Header), pMsgHdr->Rflags));
2823 return VINF_EM_TRIPLE_FAULT;
2824# else
2825 /*
2826 * Let IEM decide whether this is really it.
2827 */
2828 nemHCWinCopyStateFromX64Header(pVCpu, pCtx, pMsgHdr);
2829 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx,
2830 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
2831 if (rcStrict == VINF_SUCCESS)
2832 {
2833 rcStrict = IEMExecOne(pVCpu);
2834 if (rcStrict == VINF_SUCCESS)
2835 {
2836 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2837 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags ));
2838 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
2839 return VINF_SUCCESS;
2840 }
2841 if (rcStrict == VINF_EM_TRIPLE_FAULT)
2842 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2843 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2844 else
2845 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2846 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2847 }
2848 else
2849 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pMsgHdr->CsSegment.Selector,
2850 pMsgHdr->Rip, nemHCWinExecStateToLogStr(pMsgHdr), pMsgHdr->Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2851 return rcStrict;
2852# endif
2853}
2854#elif defined(IN_RING3)
2855/**
2856 * Deals with MSR access exits (WHvRunVpExitReasonUnrecoverableException).
2857 *
2858 * @returns Strict VBox status code.
2859 * @param pVM The cross context VM structure.
2860 * @param pVCpu The cross context per CPU structure.
2861 * @param pExit The VM exit information to handle.
2862 * @param pCtx The register context.
2863 * @sa nemHCWinHandleMessageUnrecoverableException
2864 */
2865NEM_TMPL_STATIC VBOXSTRICTRC
2866nemR3WinHandleExitUnrecoverableException(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
2867{
2868 AssertMsg(pExit->VpContext.InstructionLength < 0x10, ("%#x\n", pExit->VpContext.InstructionLength));
2869
2870# if 0
2871 /*
2872 * Just copy the state we've got and handle it in the loop for now.
2873 */
2874 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2875 Log(("TripleExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2876 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2877 RT_NOREF_PV(pVM);
2878 return VINF_EM_TRIPLE_FAULT;
2879# else
2880 /*
2881 * Let IEM decide whether this is really it.
2882 */
2883 nemR3WinCopyStateFromX64Header(pVCpu, pCtx, &pExit->VpContext);
2884 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, NULL, pCtx,
2885 NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM | CPUMCTX_EXTRN_ALL, "TripleExit");
2886 if (rcStrict == VINF_SUCCESS)
2887 {
2888 rcStrict = IEMExecOne(pVCpu);
2889 if (rcStrict == VINF_SUCCESS)
2890 {
2891 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_SUCCESS\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2892 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags));
2893 pCtx->fExtrn &= ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT; /* Make sure to reset pending #DB(0). */
2894 return VINF_SUCCESS;
2895 }
2896 if (rcStrict == VINF_EM_TRIPLE_FAULT)
2897 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> VINF_EM_TRIPLE_FAULT!\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2898 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2899 else
2900 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (IEMExecOne)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2901 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2902 }
2903 else
2904 Log(("UnrecovExit/%u: %04x:%08RX64/%s: RFL=%#RX64 -> %Rrc (state import)\n", pVCpu->idCpu, pExit->VpContext.Cs.Selector,
2905 pExit->VpContext.Rip, nemR3WinExecStateToLogStr(&pExit->VpContext), pExit->VpContext.Rflags, VBOXSTRICTRC_VAL(rcStrict) ));
2906 RT_NOREF_PV(pVM);
2907 return rcStrict;
2908# endif
2909
2910}
2911#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
2912
2913#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
2914/**
2915 * Handles messages (VM exits).
2916 *
2917 * @returns Strict VBox status code.
2918 * @param pVM The cross context VM structure.
2919 * @param pVCpu The cross context per CPU structure.
2920 * @param pMappingHeader The message slot mapping.
2921 * @param pCtx The register context.
2922 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
2923 * @sa nemR3WinHandleExit
2924 */
2925NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinHandleMessage(PVM pVM, PVMCPU pVCpu, VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
2926 PCPUMCTX pCtx, PGVMCPU pGVCpu)
2927{
2928 if (pMappingHeader->enmVidMsgType == VidMessageHypervisorMessage)
2929 {
2930 AssertMsg(pMappingHeader->cbMessage == HV_MESSAGE_SIZE, ("%#x\n", pMappingHeader->cbMessage));
2931 HV_MESSAGE const *pMsg = (HV_MESSAGE const *)(pMappingHeader + 1);
2932 switch (pMsg->Header.MessageType)
2933 {
2934 case HvMessageTypeUnmappedGpa:
2935 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
2936 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
2937 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
2938
2939 case HvMessageTypeGpaIntercept:
2940 Assert(pMsg->Header.PayloadSize == RT_UOFFSETOF(HV_X64_MEMORY_INTERCEPT_MESSAGE, DsSegment));
2941 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemIntercept);
2942 return nemHCWinHandleMessageMemory(pVM, pVCpu, &pMsg->X64MemoryIntercept, pCtx, pGVCpu);
2943
2944 case HvMessageTypeX64IoPortIntercept:
2945 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64IoPortIntercept));
2946 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
2947 return nemHCWinHandleMessageIoPort(pVM, pVCpu, &pMsg->X64IoPortIntercept, pCtx, pGVCpu);
2948
2949 case HvMessageTypeX64Halt:
2950 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
2951 Log4(("HaltExit\n"));
2952 return VINF_EM_HALT;
2953
2954 case HvMessageTypeX64InterruptWindow:
2955 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterruptWindow));
2956 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
2957 return nemHCWinHandleMessageInterruptWindow(pVM, pVCpu, &pMsg->X64InterruptWindow, pCtx, pGVCpu);
2958
2959 case HvMessageTypeX64CpuidIntercept:
2960 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64CpuIdIntercept));
2961 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
2962 return nemHCWinHandleMessageCpuId(pVCpu, &pMsg->X64CpuIdIntercept, pCtx);
2963
2964 case HvMessageTypeX64MsrIntercept:
2965 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64MsrIntercept));
2966 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
2967 return nemHCWinHandleMessageMsr(pVCpu, &pMsg->X64MsrIntercept, pCtx, pGVCpu);
2968
2969 case HvMessageTypeX64ExceptionIntercept:
2970 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64ExceptionIntercept));
2971 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
2972 return nemHCWinHandleMessageException(pVCpu, &pMsg->X64ExceptionIntercept, pCtx, pGVCpu);
2973
2974 case HvMessageTypeUnrecoverableException:
2975 Assert(pMsg->Header.PayloadSize == sizeof(pMsg->X64InterceptHeader));
2976 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
2977 return nemHCWinHandleMessageUnrecoverableException(pVCpu, &pMsg->X64InterceptHeader, pCtx, pGVCpu);
2978
2979 case HvMessageTypeInvalidVpRegisterValue:
2980 case HvMessageTypeUnsupportedFeature:
2981 case HvMessageTypeTlbPageSizeMismatch:
2982 LogRel(("Unimplemented msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
2983 AssertLogRelMsgFailedReturn(("Message type %#x not implemented!\n%.32Rhxd\n", pMsg->Header.MessageType, pMsg),
2984 VERR_NEM_IPE_3);
2985
2986 case HvMessageTypeX64ApicEoi:
2987 case HvMessageTypeX64LegacyFpError:
2988 case HvMessageTypeX64RegisterIntercept:
2989 case HvMessageTypeApicEoi:
2990 case HvMessageTypeFerrAsserted:
2991 case HvMessageTypeEventLogBufferComplete:
2992 case HvMessageTimerExpired:
2993 LogRel(("Unexpected msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
2994 AssertLogRelMsgFailedReturn(("Unexpected message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
2995 VERR_NEM_IPE_3);
2996
2997 default:
2998 LogRel(("Unknown msg:\n%.*Rhxd\n", (int)sizeof(*pMsg), pMsg));
2999 AssertLogRelMsgFailedReturn(("Unknown message on CPU #%u: %#x\n", pVCpu->idCpu, pMsg->Header.MessageType),
3000 VERR_NEM_IPE_3);
3001 }
3002 }
3003 else
3004 AssertLogRelMsgFailedReturn(("Unexpected VID message type on CPU #%u: %#x LB %u\n",
3005 pVCpu->idCpu, pMappingHeader->enmVidMsgType, pMappingHeader->cbMessage),
3006 VERR_NEM_IPE_4);
3007}
3008#elif defined(IN_RING3)
3009/**
3010 * Handles VM exits.
3011 *
3012 * @returns Strict VBox status code.
3013 * @param pVM The cross context VM structure.
3014 * @param pVCpu The cross context per CPU structure.
3015 * @param pExit The VM exit information to handle.
3016 * @param pCtx The register context.
3017 * @sa nemHCWinHandleMessage
3018 */
3019NEM_TMPL_STATIC VBOXSTRICTRC nemR3WinHandleExit(PVM pVM, PVMCPU pVCpu, WHV_RUN_VP_EXIT_CONTEXT const *pExit, PCPUMCTX pCtx)
3020{
3021 switch (pExit->ExitReason)
3022 {
3023 case WHvRunVpExitReasonMemoryAccess:
3024 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMemUnmapped);
3025 return nemR3WinHandleExitMemory(pVM, pVCpu, pExit, pCtx);
3026
3027 case WHvRunVpExitReasonX64IoPortAccess:
3028 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitPortIo);
3029 return nemR3WinHandleExitIoPort(pVM, pVCpu, pExit, pCtx);
3030
3031 case WHvRunVpExitReasonX64Halt:
3032 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitHalt);
3033 Log4(("HaltExit\n"));
3034 return VINF_EM_HALT;
3035
3036 case WHvRunVpExitReasonCanceled:
3037 return VINF_SUCCESS;
3038
3039 case WHvRunVpExitReasonX64InterruptWindow:
3040 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitInterruptWindow);
3041 return nemR3WinHandleExitInterruptWindow(pVM, pVCpu, pExit, pCtx);
3042
3043 case WHvRunVpExitReasonX64Cpuid:
3044 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitCpuId);
3045 return nemR3WinHandleExitCpuId(pVM, pVCpu, pExit, pCtx);
3046
3047 case WHvRunVpExitReasonX64MsrAccess:
3048 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitMsr);
3049 return nemR3WinHandleExitMsr(pVM, pVCpu, pExit, pCtx);
3050
3051 case WHvRunVpExitReasonException:
3052 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitException);
3053 return nemR3WinHandleExitException(pVM, pVCpu, pExit, pCtx);
3054
3055 case WHvRunVpExitReasonUnrecoverableException:
3056 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatExitUnrecoverable);
3057 return nemR3WinHandleExitUnrecoverableException(pVM, pVCpu, pExit, pCtx);
3058
3059 case WHvRunVpExitReasonUnsupportedFeature:
3060 case WHvRunVpExitReasonInvalidVpRegisterValue:
3061 LogRel(("Unimplemented exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3062 AssertLogRelMsgFailedReturn(("Unexpected exit on CPU #%u: %#x\n%.32Rhxd\n",
3063 pVCpu->idCpu, pExit->ExitReason, pExit), VERR_NEM_IPE_3);
3064
3065 /* Undesired exits: */
3066 case WHvRunVpExitReasonNone:
3067 default:
3068 LogRel(("Unknown exit:\n%.*Rhxd\n", (int)sizeof(*pExit), pExit));
3069 AssertLogRelMsgFailedReturn(("Unknown exit on CPU #%u: %#x!\n", pVCpu->idCpu, pExit->ExitReason), VERR_NEM_IPE_3);
3070 }
3071}
3072#endif /* IN_RING3 && !NEM_WIN_USE_OUR_OWN_RUN_API */
3073
3074#ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3075/**
3076 * Worker for nemHCWinRunGC that stops the execution on the way out.
3077 *
3078 * The CPU was running the last time we checked, no there are no messages that
3079 * needs being marked handled/whatever. Caller checks this.
3080 *
3081 * @returns rcStrict on success, error status on failure.
3082 * @param pVM The cross context VM structure.
3083 * @param pVCpu The cross context per CPU structure.
3084 * @param rcStrict The nemHCWinRunGC return status. This is a little
3085 * bit unnecessary, except in internal error cases,
3086 * since we won't need to stop the CPU if we took an
3087 * exit.
3088 * @param pMappingHeader The message slot mapping.
3089 * @param pGVM The global (ring-0) VM structure (NULL in r3).
3090 * @param pGVCpu The global (ring-0) per CPU structure (NULL in r3).
3091 */
3092NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinStopCpu(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict,
3093 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader,
3094 PGVM pGVM, PGVMCPU pGVCpu)
3095{
3096 /*
3097 * Try stopping the processor. If we're lucky we manage to do this before it
3098 * does another VM exit.
3099 */
3100# ifdef IN_RING0
3101 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3102 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStopVirtualProcessor.uFunction,
3103 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3104 NULL, 0);
3105 if (NT_SUCCESS(rcNt))
3106 {
3107 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3108 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3109 return rcStrict;
3110 }
3111# else
3112 BOOL fRet = VidStopVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu);
3113 if (fRet)
3114 {
3115 Log8(("nemHCWinStopCpu: Stopping CPU succeeded (cpu status %u)\n", nemHCWinCpuGetRunningStatus(pVCpu) ));
3116 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuSuccess);
3117 return rcStrict;
3118 }
3119 RT_NOREF(pGVM, pGVCpu);
3120# endif
3121
3122 /*
3123 * Dang. The CPU stopped by itself and we got a couple of message to deal with.
3124 */
3125# ifdef IN_RING0
3126 AssertLogRelMsgReturn(rcNt == ERROR_VID_STOP_PENDING, ("rcNt=%#x\n", rcNt),
3127 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3128# else
3129 DWORD dwErr = RTNtLastErrorValue();
3130 AssertLogRelMsgReturn(dwErr == ERROR_VID_STOP_PENDING, ("dwErr=%#u (%#x)\n", dwErr, dwErr),
3131 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3132# endif
3133 Log8(("nemHCWinStopCpu: Stopping CPU #%u pending...\n", pVCpu->idCpu));
3134 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatStopCpuPending);
3135
3136 /*
3137 * First message: Exit or similar.
3138 * Note! We can safely ASSUME that rcStrict isn't an important information one.
3139 */
3140# ifdef IN_RING0
3141 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3142 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3143 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3144 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3145 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3146 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3147 NULL, 0);
3148 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3149 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3150# else
3151 BOOL fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3152 VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3153 AssertLogRelMsgReturn(fWait, ("1st VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3154 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3155# endif
3156
3157 /* It should be a hypervisor message and definitely not a stop request completed message. */
3158 VID_MESSAGE_TYPE enmVidMsgType = pMappingHeader->enmVidMsgType;
3159 AssertLogRelMsgReturn(enmVidMsgType != VidMessageStopRequestComplete,
3160 ("Unexpected 1st message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3161 enmVidMsgType, pMappingHeader->cbMessage),
3162 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3163
3164 VBOXSTRICTRC rcStrict2 = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, CPUMQueryGuestCtxPtr(pVCpu), pGVCpu);
3165 if (rcStrict2 != VINF_SUCCESS && RT_SUCCESS(rcStrict))
3166 rcStrict = rcStrict2;
3167
3168 /*
3169 * Mark it as handled and get the stop request completed message, then mark
3170 * that as handled too. CPU is back into fully stopped stated then.
3171 */
3172# ifdef IN_RING0
3173 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3174 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE;
3175 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3176 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3177 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3178 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3179 NULL, 0);
3180 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3181 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3182# else
3183 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3184 VID_MSHAGN_F_HANDLE_MESSAGE | VID_MSHAGN_F_GET_NEXT_MESSAGE, 30000 /*ms*/);
3185 AssertLogRelMsgReturn(fWait, ("2nd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3186 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3187# endif
3188
3189 /* It should be a stop request completed message. */
3190 enmVidMsgType = pMappingHeader->enmVidMsgType;
3191 AssertLogRelMsgReturn(enmVidMsgType == VidMessageStopRequestComplete,
3192 ("Unexpected 2nd message following ERROR_VID_STOP_PENDING: %#x LB %#x\n",
3193 enmVidMsgType, pMappingHeader->cbMessage),
3194 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3195
3196 /* Mark this as handled. */
3197# ifdef IN_RING0
3198 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3199 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = VID_MSHAGN_F_HANDLE_MESSAGE;
3200 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = 30000; /*ms*/
3201 rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3202 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3203 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3204 NULL, 0);
3205 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %#x\n", rcNt),
3206 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3207# else
3208 fWait = g_pfnVidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu, VID_MSHAGN_F_HANDLE_MESSAGE, 30000 /*ms*/);
3209 AssertLogRelMsgReturn(fWait, ("3rd VidMessageSlotHandleAndGetNext after ERROR_VID_STOP_PENDING failed: %u\n", RTNtLastErrorValue()),
3210 RT_SUCCESS(rcStrict) ? VERR_NEM_IPE_5 : rcStrict);
3211# endif
3212 Log8(("nemHCWinStopCpu: Stopped the CPU (rcStrict=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict) ));
3213 return rcStrict;
3214}
3215#endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3216
3217#if defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3)
3218
3219/**
3220 * Deals with pending interrupt related force flags, may inject interrupt.
3221 *
3222 * @returns VBox strict status code.
3223 * @param pVM The cross context VM structure.
3224 * @param pVCpu The cross context per CPU structure.
3225 * @param pGVCpu The global (ring-0) per CPU structure.
3226 * @param pCtx The register context.
3227 * @param pfInterruptWindows Where to return interrupt window flags.
3228 */
3229NEM_TMPL_STATIC VBOXSTRICTRC
3230nemHCWinHandleInterruptFF(PVM pVM, PVMCPU pVCpu, PGVMCPU pGVCpu, PCPUMCTX pCtx, uint8_t *pfInterruptWindows)
3231{
3232 Assert(!TRPMHasTrap(pVCpu));
3233 RT_NOREF_PV(pVM);
3234
3235 /*
3236 * First update APIC.
3237 */
3238 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3239 {
3240 APICUpdatePendingInterrupts(pVCpu);
3241 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3242 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3243 return VINF_SUCCESS;
3244 }
3245
3246 /*
3247 * We don't currently implement SMIs.
3248 */
3249 AssertReturn(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_SMI), VERR_NEM_IPE_0);
3250
3251 /*
3252 * Check if we've got the minimum of state required for deciding whether we
3253 * can inject interrupts and NMIs. If we don't have it, get all we might require
3254 * for injection via IEM.
3255 */
3256 bool const fPendingNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3257 uint64_t fNeedExtrn = CPUMCTX_EXTRN_NEM_WIN_INHIBIT_INT | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS
3258 | (fPendingNmi ? CPUMCTX_EXTRN_NEM_WIN_INHIBIT_NMI : 0);
3259 if (pCtx->fExtrn & fNeedExtrn)
3260 {
3261 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "IntFF");
3262 if (rcStrict != VINF_SUCCESS)
3263 return rcStrict;
3264 }
3265 bool const fInhibitInterrupts = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
3266 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip;
3267
3268 /*
3269 * NMI? Try deliver it first.
3270 */
3271 if (fPendingNmi)
3272 {
3273 if ( !fInhibitInterrupts
3274 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))
3275 {
3276 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI");
3277 if (rcStrict == VINF_SUCCESS)
3278 {
3279 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
3280 rcStrict = IEMInjectTrap(pVCpu, X86_XCPT_NMI, TRPM_HARDWARE_INT, 0, 0, 0);
3281 Log8(("Injected NMI on %u (%d)\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3282 }
3283 return rcStrict;
3284 }
3285 *pfInterruptWindows |= NEM_WIN_INTW_F_NMI;
3286 Log8(("NMI window pending on %u\n", pVCpu->idCpu));
3287 }
3288
3289 /*
3290 * APIC or PIC interrupt?
3291 */
3292 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
3293 {
3294 if ( !fInhibitInterrupts
3295 && pCtx->rflags.Bits.u1IF)
3296 {
3297 VBOXSTRICTRC rcStrict = nemHCWinImportStateIfNeededStrict(pVCpu, pGVCpu, pCtx, NEM_WIN_CPUMCTX_EXTRN_MASK_FOR_IEM, "NMI");
3298 if (rcStrict == VINF_SUCCESS)
3299 {
3300 uint8_t bInterrupt;
3301 int rc = PDMGetInterrupt(pVCpu, &bInterrupt);
3302 if (RT_SUCCESS(rc))
3303 {
3304 rcStrict = IEMInjectTrap(pVCpu, bInterrupt, TRPM_HARDWARE_INT, 0, 0, 0);
3305 Log8(("Injected interrupt %#x on %u (%d)\n", bInterrupt, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3306 }
3307 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR)
3308 {
3309 *pfInterruptWindows |= (bInterrupt >> 4 /*??*/) << NEM_WIN_INTW_F_PRIO_SHIFT;
3310 Log8(("VERR_APIC_INTR_MASKED_BY_TPR: *pfInterruptWindows=%#x\n", *pfInterruptWindows));
3311 }
3312 else
3313 Log8(("PDMGetInterrupt failed -> %d\n", rc));
3314 }
3315 return rcStrict;
3316 }
3317 *pfInterruptWindows |= NEM_WIN_INTW_F_REGULAR;
3318 Log8(("Interrupt window pending on %u\n", pVCpu->idCpu));
3319 }
3320
3321 return VINF_SUCCESS;
3322}
3323
3324
3325/**
3326 * Inner NEM runloop for windows.
3327 *
3328 * @returns Strict VBox status code.
3329 * @param pVM The cross context VM structure.
3330 * @param pVCpu The cross context per CPU structure.
3331 * @param pGVM The ring-0 VM structure (NULL in ring-3).
3332 * @param pGVCpu The ring-0 per CPU structure (NULL in ring-3).
3333 */
3334NEM_TMPL_STATIC VBOXSTRICTRC nemHCWinRunGC(PVM pVM, PVMCPU pVCpu, PGVM pGVM, PGVMCPU pGVCpu)
3335{
3336 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu);
3337 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags));
3338# ifdef LOG_ENABLED
3339 if (LogIs3Enabled())
3340 nemHCWinLogState(pVM, pVCpu);
3341# endif
3342# ifdef IN_RING0
3343 Assert(pVCpu->idCpu == pGVCpu->idCpu);
3344# endif
3345
3346 /*
3347 * Try switch to NEM runloop state.
3348 */
3349 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED))
3350 { /* likely */ }
3351 else
3352 {
3353 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3354 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu));
3355 return VINF_SUCCESS;
3356 }
3357
3358 /*
3359 * The run loop.
3360 *
3361 * Current approach to state updating to use the sledgehammer and sync
3362 * everything every time. This will be optimized later.
3363 */
3364# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3365 VID_MESSAGE_MAPPING_HEADER volatile *pMappingHeader = (VID_MESSAGE_MAPPING_HEADER volatile *)pVCpu->nem.s.pvMsgSlotMapping;
3366 uint32_t cMillies = 5000; /** @todo lower this later... */
3367# endif
3368 const bool fSingleStepping = DBGFIsStepping(pVCpu);
3369// const uint32_t fCheckVmFFs = !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK
3370// : VM_FF_HP_R0_PRE_HM_STEP_MASK;
3371// const uint32_t fCheckCpuFFs = !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK;
3372 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
3373 for (unsigned iLoop = 0;; iLoop++)
3374 {
3375# ifndef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3376 /*
3377 * Hack alert!
3378 */
3379 uint32_t const cMappedPages = pVM->nem.s.cMappedPages;
3380 if (cMappedPages >= 4000)
3381 {
3382 PGMPhysNemEnumPagesByState(pVM, pVCpu, NEM_WIN_PAGE_STATE_READABLE, nemR3WinWHvUnmapOnePageCallback, NULL);
3383 Log(("nemHCWinRunGC: Unmapped all; cMappedPages=%u -> %u\n", cMappedPages, pVM->nem.s.cMappedPages));
3384 }
3385# endif
3386
3387 /*
3388 * Pending interrupts or such? Need to check and deal with this prior
3389 * to the state syncing.
3390 */
3391 pVCpu->nem.s.fDesiredInterruptWindows = 0;
3392 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC
3393 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
3394 {
3395# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3396 /* Make sure the CPU isn't executing. */
3397 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3398 {
3399 pVCpu->nem.s.fHandleAndGetFlags = 0;
3400 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3401 if (rcStrict == VINF_SUCCESS)
3402 { /* likely */ }
3403 else
3404 {
3405 LogFlow(("NEM/%u: breaking: nemHCWinStopCpu -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3406 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3407 break;
3408 }
3409 }
3410# endif
3411
3412 /* Try inject interrupt. */
3413 rcStrict = nemHCWinHandleInterruptFF(pVM, pVCpu, pGVCpu, pCtx, &pVCpu->nem.s.fDesiredInterruptWindows);
3414 if (rcStrict == VINF_SUCCESS)
3415 { /* likely */ }
3416 else
3417 {
3418 LogFlow(("NEM/%u: breaking: nemHCWinHandleInterruptFF -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3419 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3420 break;
3421 }
3422 }
3423
3424 /*
3425 * Ensure that hyper-V has the whole state.
3426 * (We always update the interrupt windows settings when active as hyper-V seems
3427 * to forget about it after an exit.)
3428 */
3429 if ( (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK))
3430 != (CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK)
3431 || pVCpu->nem.s.fDesiredInterruptWindows
3432 || pVCpu->nem.s.fCurrentInterruptWindows != pVCpu->nem.s.fDesiredInterruptWindows)
3433 {
3434# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3435 Assert(pVCpu->nem.s.fHandleAndGetFlags != VID_MSHAGN_F_GET_NEXT_MESSAGE /* not running */);
3436# endif
3437# ifdef IN_RING0
3438 int rc2 = nemR0WinExportState(pGVM, pGVCpu, pCtx);
3439# else
3440 int rc2 = nemHCWinCopyStateToHyperV(pVM, pVCpu, pCtx);
3441 RT_NOREF(pGVM, pGVCpu);
3442# endif
3443 AssertRCReturn(rc2, rc2);
3444 }
3445
3446 /*
3447 * Run a bit.
3448 */
3449 if ( !VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
3450 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
3451 {
3452# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3453 if (pVCpu->nem.s.fHandleAndGetFlags)
3454 { /* Very likely that the CPU does NOT need starting (pending msg, running). */ }
3455 else
3456 {
3457# ifdef IN_RING0
3458 pVCpu->nem.s.uIoCtlBuf.idCpu = pGVCpu->idCpu;
3459 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlStartVirtualProcessor.uFunction,
3460 &pVCpu->nem.s.uIoCtlBuf.idCpu, sizeof(pVCpu->nem.s.uIoCtlBuf.idCpu),
3461 NULL, 0);
3462 LogFlow(("NEM/%u: IoCtlStartVirtualProcessor -> %#x\n", pVCpu->idCpu, rcNt));
3463 AssertLogRelMsgReturn(NT_SUCCESS(rcNt), ("VidStartVirtualProcessor failed for CPU #%u: %#x\n", pGVCpu->idCpu, rcNt),
3464 VERR_NEM_IPE_5);
3465# else
3466 AssertLogRelMsgReturn(g_pfnVidStartVirtualProcessor(pVM->nem.s.hPartitionDevice, pVCpu->idCpu),
3467 ("VidStartVirtualProcessor failed for CPU #%u: %u (%#x, rcNt=%#x)\n",
3468 pVCpu->idCpu, RTNtLastErrorValue(), RTNtLastErrorValue(), RTNtLastStatusValue()),
3469 VERR_NEM_IPE_5);
3470# endif
3471 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3472 }
3473# endif /* NEM_WIN_USE_OUR_OWN_RUN_API */
3474
3475 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM_WAIT, VMCPUSTATE_STARTED_EXEC_NEM))
3476 {
3477# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3478# ifdef IN_RING0
3479 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.iCpu = pGVCpu->idCpu;
3480 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.fFlags = pVCpu->nem.s.fHandleAndGetFlags;
3481 pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext.cMillies = cMillies;
3482 NTSTATUS rcNt = nemR0NtPerformIoControl(pGVM, pGVM->nem.s.IoCtlMessageSlotHandleAndGetNext.uFunction,
3483 &pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext,
3484 sizeof(pVCpu->nem.s.uIoCtlBuf.MsgSlotHandleAndGetNext),
3485 NULL, 0);
3486 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3487 if (rcNt == STATUS_SUCCESS)
3488# else
3489 BOOL fRet = VidMessageSlotHandleAndGetNext(pVM->nem.s.hPartitionDevice, pVCpu->idCpu,
3490 pVCpu->nem.s.fHandleAndGetFlags, cMillies);
3491 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3492 if (fRet)
3493# endif
3494# else
3495 WHV_RUN_VP_EXIT_CONTEXT ExitReason;
3496 RT_ZERO(ExitReason);
3497 HRESULT hrc = WHvRunVirtualProcessor(pVM->nem.s.hPartition, pVCpu->idCpu, &ExitReason, sizeof(ExitReason));
3498 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_WAIT);
3499 if (SUCCEEDED(hrc))
3500# endif
3501 {
3502 /*
3503 * Deal with the message.
3504 */
3505# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3506 rcStrict = nemHCWinHandleMessage(pVM, pVCpu, pMappingHeader, pCtx, pGVCpu);
3507 pVCpu->nem.s.fHandleAndGetFlags |= VID_MSHAGN_F_HANDLE_MESSAGE;
3508# else
3509 rcStrict = nemR3WinHandleExit(pVM, pVCpu, &ExitReason, pCtx);
3510# endif
3511 if (rcStrict == VINF_SUCCESS)
3512 { /* hopefully likely */ }
3513 else
3514 {
3515 LogFlow(("NEM/%u: breaking: nemHCWinHandleMessage -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) ));
3516 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus);
3517 break;
3518 }
3519 }
3520 else
3521 {
3522# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3523
3524 /* VID.SYS merges STATUS_ALERTED and STATUS_USER_APC into STATUS_TIMEOUT,
3525 so after NtAlertThread we end up here with a STATUS_TIMEOUT. And yeah,
3526 the error code conversion is into WAIT_XXX, i.e. NT status codes. */
3527# ifndef IN_RING0
3528 DWORD rcNt = GetLastError();
3529# endif
3530 LogFlow(("NEM/%u: VidMessageSlotHandleAndGetNext -> %#x\n", pVCpu->idCpu, rcNt));
3531 AssertLogRelMsgReturn( rcNt == STATUS_TIMEOUT
3532 || rcNt == STATUS_ALERTED /* just in case */
3533 || rcNt == STATUS_USER_APC /* ditto */
3534 , ("VidMessageSlotHandleAndGetNext failed for CPU #%u: %#x (%u)\n",
3535 pVCpu->idCpu, rcNt, rcNt),
3536 VERR_NEM_IPE_0);
3537 pVCpu->nem.s.fHandleAndGetFlags = VID_MSHAGN_F_GET_NEXT_MESSAGE;
3538 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatGetMsgTimeout);
3539# else
3540 AssertLogRelMsgFailedReturn(("WHvRunVirtualProcessor failed for CPU #%u: %#x (%u)\n",
3541 pVCpu->idCpu, hrc, GetLastError()),
3542 VERR_NEM_IPE_0);
3543
3544# endif
3545 }
3546
3547 /*
3548 * If no relevant FFs are pending, loop.
3549 */
3550 if ( !VM_FF_IS_PENDING( pVM, !fSingleStepping ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
3551 && !VMCPU_FF_IS_PENDING(pVCpu, !fSingleStepping ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
3552 continue;
3553
3554 /** @todo Try handle pending flags, not just return to EM loops. Take care
3555 * not to set important RCs here unless we've handled a message. */
3556 LogFlow(("NEM/%u: breaking: pending FF (%#x / %#x)\n",
3557 pVCpu->idCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions));
3558 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPost);
3559 }
3560 else
3561 {
3562 LogFlow(("NEM/%u: breaking: canceled %d (pre exec)\n", pVCpu->idCpu, VMCPU_GET_STATE(pVCpu) ));
3563 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnCancel);
3564 }
3565 }
3566 else
3567 {
3568 LogFlow(("NEM/%u: breaking: pending FF (pre exec)\n", pVCpu->idCpu));
3569 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnFFPre);
3570 }
3571 break;
3572 } /* the run loop */
3573
3574
3575 /*
3576 * If the CPU is running, make sure to stop it before we try sync back the
3577 * state and return to EM.
3578 */
3579# ifdef NEM_WIN_USE_OUR_OWN_RUN_API
3580 if (pVCpu->nem.s.fHandleAndGetFlags == VID_MSHAGN_F_GET_NEXT_MESSAGE)
3581 {
3582 pVCpu->nem.s.fHandleAndGetFlags = 0;
3583 rcStrict = nemHCWinStopCpu(pVM, pVCpu, rcStrict, pMappingHeader, pGVM, pGVCpu);
3584 }
3585# endif
3586
3587 if (!VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM))
3588 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED);
3589
3590 if (pCtx->fExtrn & (CPUMCTX_EXTRN_ALL | (CPUMCTX_EXTRN_NEM_WIN_MASK & ~CPUMCTX_EXTRN_NEM_WIN_EVENT_INJECT)))
3591 {
3592# ifdef IN_RING0
3593 int rc2 = nemR0WinImportState(pGVM, pGVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
3594 if (RT_SUCCESS(rc2))
3595 pCtx->fExtrn = 0;
3596 else if (rc2 == VERR_NEM_CHANGE_PGM_MODE || rc2 == VERR_NEM_FLUSH_TLB || rc2 == VERR_NEM_UPDATE_APIC_BASE)
3597 {
3598 pCtx->fExtrn = 0;
3599 if (rcStrict == VINF_SUCCESS || rcStrict == -rc2)
3600 rcStrict = -rc2;
3601 else
3602 {
3603 pVCpu->nem.s.rcPending = -rc2;
3604 LogFlow(("NEM/%u: rcPending=%Rrc (rcStrict=%Rrc)\n", pVCpu->idCpu, rc2, VBOXSTRICTRC_VAL(rcStrict) ));
3605 }
3606 }
3607# else
3608 int rc2 = nemHCWinCopyStateFromHyperV(pVM, pVCpu, pCtx, CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_NEM_WIN_MASK);
3609 if (RT_SUCCESS(rc2))
3610 pCtx->fExtrn = 0;
3611# endif
3612 else if (RT_SUCCESS(rcStrict))
3613 rcStrict = rc2;
3614 }
3615 else
3616 pCtx->fExtrn = 0;
3617
3618 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 => %Rrc\n",
3619 pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, pCtx->rflags, VBOXSTRICTRC_VAL(rcStrict) ));
3620 return rcStrict;
3621}
3622
3623#endif /* defined(NEM_WIN_USE_OUR_OWN_RUN_API) || defined(IN_RING3) */
3624
3625/**
3626 * @callback_method_impl{FNPGMPHYSNEMCHECKPAGE}
3627 */
3628NEM_TMPL_STATIC DECLCALLBACK(int) nemHCWinUnsetForA20CheckerCallback(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys,
3629 PPGMPHYSNEMPAGEINFO pInfo, void *pvUser)
3630{
3631 /* We'll just unmap the memory. */
3632 if (pInfo->u2NemState > NEM_WIN_PAGE_STATE_UNMAPPED)
3633 {
3634#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3635 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhys);
3636 AssertRC(rc);
3637 if (RT_SUCCESS(rc))
3638#else
3639 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhys, X86_PAGE_SIZE);
3640 if (SUCCEEDED(hrc))
3641#endif
3642 {
3643 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3644 Log5(("NEM GPA unmapped/A20: %RGp (was %s, cMappedPages=%u)\n", GCPhys, g_apszPageStates[pInfo->u2NemState], cMappedPages));
3645 pInfo->u2NemState = NEM_WIN_PAGE_STATE_UNMAPPED;
3646 }
3647 else
3648 {
3649#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3650 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp rc=%Rrc\n", GCPhys, rc));
3651 return rc;
3652#else
3653 LogRel(("nemHCWinUnsetForA20CheckerCallback/unmap: GCPhys=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3654 GCPhys, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3655 return VERR_NEM_IPE_2;
3656#endif
3657 }
3658 }
3659 RT_NOREF(pVCpu, pvUser);
3660 return VINF_SUCCESS;
3661}
3662
3663
3664/**
3665 * Unmaps a page from Hyper-V for the purpose of emulating A20 gate behavior.
3666 *
3667 * @returns The PGMPhysNemQueryPageInfo result.
3668 * @param pVM The cross context VM structure.
3669 * @param pVCpu The cross context virtual CPU structure.
3670 * @param GCPhys The page to unmap.
3671 */
3672NEM_TMPL_STATIC int nemHCWinUnmapPageForA20Gate(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
3673{
3674 PGMPHYSNEMPAGEINFO Info;
3675 return PGMPhysNemPageInfoChecker(pVM, pVCpu, GCPhys, false /*fMakeWritable*/, &Info,
3676 nemHCWinUnsetForA20CheckerCallback, NULL);
3677}
3678
3679
3680void nemHCNativeNotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb)
3681{
3682 Log5(("nemHCNativeNotifyHandlerPhysicalRegister: %RGp LB %RGp enmKind=%d\n", GCPhys, cb, enmKind));
3683 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb);
3684}
3685
3686
3687void nemHCNativeNotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3688 int fRestoreAsRAM, bool fRestoreAsRAM2)
3689{
3690 Log5(("nemHCNativeNotifyHandlerPhysicalDeregister: %RGp LB %RGp enmKind=%d fRestoreAsRAM=%d fRestoreAsRAM2=%d\n",
3691 GCPhys, cb, enmKind, fRestoreAsRAM, fRestoreAsRAM2));
3692 NOREF(pVM); NOREF(enmKind); NOREF(GCPhys); NOREF(cb); NOREF(fRestoreAsRAM); NOREF(fRestoreAsRAM2);
3693}
3694
3695
3696void nemHCNativeNotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld,
3697 RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fRestoreAsRAM)
3698{
3699 Log5(("nemHCNativeNotifyHandlerPhysicalModify: %RGp LB %RGp -> %RGp enmKind=%d fRestoreAsRAM=%d\n",
3700 GCPhysOld, cb, GCPhysNew, enmKind, fRestoreAsRAM));
3701 NOREF(pVM); NOREF(enmKind); NOREF(GCPhysOld); NOREF(GCPhysNew); NOREF(cb); NOREF(fRestoreAsRAM);
3702}
3703
3704
3705/**
3706 * Worker that maps pages into Hyper-V.
3707 *
3708 * This is used by the PGM physical page notifications as well as the memory
3709 * access VMEXIT handlers.
3710 *
3711 * @returns VBox status code.
3712 * @param pVM The cross context VM structure.
3713 * @param pVCpu The cross context virtual CPU structure of the
3714 * calling EMT.
3715 * @param GCPhysSrc The source page address.
3716 * @param GCPhysDst The hyper-V destination page. This may differ from
3717 * GCPhysSrc when A20 is disabled.
3718 * @param fPageProt NEM_PAGE_PROT_XXX.
3719 * @param pu2State Our page state (input/output).
3720 * @param fBackingChanged Set if the page backing is being changed.
3721 * @thread EMT(pVCpu)
3722 */
3723NEM_TMPL_STATIC int nemHCNativeSetPhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysSrc, RTGCPHYS GCPhysDst,
3724 uint32_t fPageProt, uint8_t *pu2State, bool fBackingChanged)
3725{
3726#ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3727 /*
3728 * When using the hypercalls instead of the ring-3 APIs, we don't need to
3729 * unmap memory before modifying it. We still want to track the state though,
3730 * since unmap will fail when called an unmapped page and we don't want to redo
3731 * upgrades/downgrades.
3732 */
3733 uint8_t const u2OldState = *pu2State;
3734 int rc;
3735 if (fPageProt == NEM_PAGE_PROT_NONE)
3736 {
3737 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3738 {
3739 rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3740 if (RT_SUCCESS(rc))
3741 {
3742 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3743 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3744 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3745 }
3746 else
3747 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3748 }
3749 else
3750 rc = VINF_SUCCESS;
3751 }
3752 else if (fPageProt & NEM_PAGE_PROT_WRITE)
3753 {
3754 if (u2OldState != NEM_WIN_PAGE_STATE_WRITABLE || fBackingChanged)
3755 {
3756 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3757 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
3758 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3759 if (RT_SUCCESS(rc))
3760 {
3761 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3762 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3763 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3764 Log5(("NEM GPA writable/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3765 NOREF(cMappedPages);
3766 }
3767 else
3768 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3769 }
3770 else
3771 rc = VINF_SUCCESS;
3772 }
3773 else
3774 {
3775 if (u2OldState != NEM_WIN_PAGE_STATE_READABLE || fBackingChanged)
3776 {
3777 rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3778 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3779 if (RT_SUCCESS(rc))
3780 {
3781 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3782 uint32_t cMappedPages = u2OldState <= NEM_WIN_PAGE_STATE_UNMAPPED
3783 ? ASMAtomicIncU32(&pVM->nem.s.cMappedPages) : pVM->nem.s.cMappedPages;
3784 Log5(("NEM GPA read+exec/set: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3785 NOREF(cMappedPages);
3786 }
3787 else
3788 AssertLogRelMsgFailed(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3789 }
3790 else
3791 rc = VINF_SUCCESS;
3792 }
3793
3794 return VINF_SUCCESS;
3795
3796#else
3797 /*
3798 * Looks like we need to unmap a page before we can change the backing
3799 * or even modify the protection. This is going to be *REALLY* efficient.
3800 * PGM lends us two bits to keep track of the state here.
3801 */
3802 uint8_t const u2OldState = *pu2State;
3803 uint8_t const u2NewState = fPageProt & NEM_PAGE_PROT_WRITE ? NEM_WIN_PAGE_STATE_WRITABLE
3804 : fPageProt & NEM_PAGE_PROT_READ ? NEM_WIN_PAGE_STATE_READABLE : NEM_WIN_PAGE_STATE_UNMAPPED;
3805 if ( fBackingChanged
3806 || u2NewState != u2OldState)
3807 {
3808 if (u2OldState > NEM_WIN_PAGE_STATE_UNMAPPED)
3809 {
3810# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3811 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3812 AssertRC(rc);
3813 if (RT_SUCCESS(rc))
3814 {
3815 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3816 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3817 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3818 {
3819 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3820 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3821 return VINF_SUCCESS;
3822 }
3823 }
3824 else
3825 {
3826 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3827 return rc;
3828 }
3829# else
3830 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst, X86_PAGE_SIZE);
3831 if (SUCCEEDED(hrc))
3832 {
3833 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3834 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3835 if (u2NewState == NEM_WIN_PAGE_STATE_UNMAPPED)
3836 {
3837 Log5(("NEM GPA unmapped/set: %RGp (was %s, cMappedPages=%u)\n",
3838 GCPhysDst, g_apszPageStates[u2OldState], cMappedPages));
3839 return VINF_SUCCESS;
3840 }
3841 }
3842 else
3843 {
3844 LogRel(("nemHCNativeSetPhysPage/unmap: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3845 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3846 return VERR_NEM_INIT_FAILED;
3847 }
3848# endif
3849 }
3850 }
3851
3852 /*
3853 * Writeable mapping?
3854 */
3855 if (fPageProt & NEM_PAGE_PROT_WRITE)
3856 {
3857# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3858 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3859 HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE
3860 | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3861 AssertRC(rc);
3862 if (RT_SUCCESS(rc))
3863 {
3864 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3865 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3866 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3867 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3868 return VINF_SUCCESS;
3869 }
3870 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3871 return rc;
3872# else
3873 void *pvPage;
3874 int rc = nemR3NativeGCPhys2R3PtrWriteable(pVM, GCPhysSrc, &pvPage);
3875 if (RT_SUCCESS(rc))
3876 {
3877 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, pvPage, GCPhysDst, X86_PAGE_SIZE,
3878 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute | WHvMapGpaRangeFlagWrite);
3879 if (SUCCEEDED(hrc))
3880 {
3881 *pu2State = NEM_WIN_PAGE_STATE_WRITABLE;
3882 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3883 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3884 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3885 return VINF_SUCCESS;
3886 }
3887 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3888 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3889 return VERR_NEM_INIT_FAILED;
3890 }
3891 LogRel(("nemHCNativeSetPhysPage/writable: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3892 return rc;
3893# endif
3894 }
3895
3896 if (fPageProt & NEM_PAGE_PROT_READ)
3897 {
3898# ifdef NEM_WIN_USE_HYPERCALLS_FOR_PAGES
3899 int rc = nemHCWinHypercallMapPage(pVM, pVCpu, GCPhysSrc, GCPhysDst,
3900 HV_MAP_GPA_READABLE | HV_MAP_GPA_EXECUTABLE | HV_MAP_GPA_EXECUTABLE_AGAIN);
3901 AssertRC(rc);
3902 if (RT_SUCCESS(rc))
3903 {
3904 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3905 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3906 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3907 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3908 return VINF_SUCCESS;
3909 }
3910 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3911 return rc;
3912# else
3913 const void *pvPage;
3914 int rc = nemR3NativeGCPhys2R3PtrReadOnly(pVM, GCPhysSrc, &pvPage);
3915 if (RT_SUCCESS(rc))
3916 {
3917 HRESULT hrc = WHvMapGpaRange(pVM->nem.s.hPartition, (void *)pvPage, GCPhysDst, X86_PAGE_SIZE,
3918 WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagExecute);
3919 if (SUCCEEDED(hrc))
3920 {
3921 *pu2State = NEM_WIN_PAGE_STATE_READABLE;
3922 uint32_t cMappedPages = ASMAtomicIncU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3923 Log5(("NEM GPA mapped/set: %RGp %s (was %s, cMappedPages=%u)\n",
3924 GCPhysDst, g_apszPageStates[u2NewState], g_apszPageStates[u2OldState], cMappedPages));
3925 return VINF_SUCCESS;
3926 }
3927 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysDst=%RGp hrc=%Rhrc (%#x) Last=%#x/%u\n",
3928 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3929 return VERR_NEM_INIT_FAILED;
3930 }
3931 LogRel(("nemHCNativeSetPhysPage/readonly: GCPhysSrc=%RGp rc=%Rrc\n", GCPhysSrc, rc));
3932 return rc;
3933# endif
3934 }
3935
3936 /* We already unmapped it above. */
3937 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3938 return VINF_SUCCESS;
3939#endif /* !NEM_WIN_USE_HYPERCALLS_FOR_PAGES */
3940}
3941
3942
3943NEM_TMPL_STATIC int nemHCJustUnmapPageFromHyperV(PVM pVM, RTGCPHYS GCPhysDst, uint8_t *pu2State)
3944{
3945 if (*pu2State <= NEM_WIN_PAGE_STATE_UNMAPPED)
3946 {
3947 Log5(("nemHCJustUnmapPageFromHyperV: %RGp == unmapped\n", GCPhysDst));
3948 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3949 return VINF_SUCCESS;
3950 }
3951
3952#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
3953 PVMCPU pVCpu = VMMGetCpu(pVM);
3954 int rc = nemHCWinHypercallUnmapPage(pVM, pVCpu, GCPhysDst);
3955 AssertRC(rc);
3956 if (RT_SUCCESS(rc))
3957 {
3958 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3959 Log5(("NEM GPA unmapped/just: %RGp (was %s, cMappedPages=%u)\n", GCPhysDst, g_apszPageStates[*pu2State], cMappedPages));
3960 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3961 return VINF_SUCCESS;
3962 }
3963 LogRel(("nemHCJustUnmapPageFromHyperV/unmap: GCPhysDst=%RGp rc=%Rrc\n", GCPhysDst, rc));
3964 return rc;
3965#else
3966 HRESULT hrc = WHvUnmapGpaRange(pVM->nem.s.hPartition, GCPhysDst & ~(RTGCPHYS)X86_PAGE_OFFSET_MASK, X86_PAGE_SIZE);
3967 if (SUCCEEDED(hrc))
3968 {
3969 uint32_t cMappedPages = ASMAtomicDecU32(&pVM->nem.s.cMappedPages); NOREF(cMappedPages);
3970 *pu2State = NEM_WIN_PAGE_STATE_UNMAPPED;
3971 Log5(("nemHCJustUnmapPageFromHyperV: %RGp => unmapped (total %u)\n", GCPhysDst, cMappedPages));
3972 return VINF_SUCCESS;
3973 }
3974 LogRel(("nemHCJustUnmapPageFromHyperV(%RGp): failed! hrc=%Rhrc (%#x) Last=%#x/%u\n",
3975 GCPhysDst, hrc, hrc, RTNtLastStatusValue(), RTNtLastErrorValue()));
3976 return VERR_NEM_IPE_6;
3977#endif
3978}
3979
3980
3981int nemHCNativeNotifyPhysPageAllocated(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
3982 PGMPAGETYPE enmType, uint8_t *pu2State)
3983{
3984 Log5(("nemHCNativeNotifyPhysPageAllocated: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
3985 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
3986 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
3987
3988 int rc;
3989#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
3990 PVMCPU pVCpu = VMMGetCpu(pVM);
3991 if ( pVM->nem.s.fA20Enabled
3992 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
3993 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
3994 else
3995 {
3996 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
3997 rc = nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
3998 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys) && RT_SUCCESS(rc))
3999 rc = nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4000
4001 }
4002#else
4003 RT_NOREF_PV(fPageProt);
4004 if ( pVM->nem.s.fA20Enabled
4005 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4006 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4007 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4008 rc = nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4009 else
4010 rc = VINF_SUCCESS; /* ignore since we've got the alias page at this address. */
4011#endif
4012 return rc;
4013}
4014
4015
4016void nemHCNativeNotifyPhysPageProtChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint32_t fPageProt,
4017 PGMPAGETYPE enmType, uint8_t *pu2State)
4018{
4019 Log5(("nemHCNativeNotifyPhysPageProtChanged: %RGp HCPhys=%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4020 GCPhys, HCPhys, fPageProt, enmType, *pu2State));
4021 RT_NOREF_PV(HCPhys); RT_NOREF_PV(enmType);
4022
4023#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4024 PVMCPU pVCpu = VMMGetCpu(pVM);
4025 if ( pVM->nem.s.fA20Enabled
4026 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4027 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4028 else
4029 {
4030 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4031 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4032 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4033 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, false /*fBackingChanged*/);
4034 }
4035#else
4036 RT_NOREF_PV(fPageProt);
4037 if ( pVM->nem.s.fA20Enabled
4038 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4039 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4040 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4041 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4042 /* else: ignore since we've got the alias page at this address. */
4043#endif
4044}
4045
4046
4047void nemHCNativeNotifyPhysPageChanged(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS HCPhysPrev, RTHCPHYS HCPhysNew,
4048 uint32_t fPageProt, PGMPAGETYPE enmType, uint8_t *pu2State)
4049{
4050 Log5(("nemHCNativeNotifyPhysPageChanged: %RGp HCPhys=%RHp->%RHp fPageProt=%#x enmType=%d *pu2State=%d\n",
4051 GCPhys, HCPhysPrev, HCPhysNew, fPageProt, enmType, *pu2State));
4052 RT_NOREF_PV(HCPhysPrev); RT_NOREF_PV(HCPhysNew); RT_NOREF_PV(enmType);
4053
4054#if defined(NEM_WIN_USE_HYPERCALLS_FOR_PAGES) || defined(IN_RING0)
4055 PVMCPU pVCpu = VMMGetCpu(pVM);
4056 if ( pVM->nem.s.fA20Enabled
4057 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4058 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4059 else
4060 {
4061 /* To keep effort at a minimum, we unmap the HMA page alias and resync it lazily when needed. */
4062 nemHCWinUnmapPageForA20Gate(pVM, pVCpu, GCPhys | RT_BIT_32(20));
4063 if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4064 nemHCNativeSetPhysPage(pVM, pVCpu, GCPhys, GCPhys, fPageProt, pu2State, true /*fBackingChanged*/);
4065 }
4066#else
4067 RT_NOREF_PV(fPageProt);
4068 if ( pVM->nem.s.fA20Enabled
4069 || !NEM_WIN_IS_RELEVANT_TO_A20(GCPhys))
4070 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4071 else if (!NEM_WIN_IS_SUBJECT_TO_A20(GCPhys))
4072 nemR3JustUnmapPageFromHyperV(pVM, GCPhys, pu2State);
4073 /* else: ignore since we've got the alias page at this address. */
4074#endif
4075}
4076
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette